diff --git a/.github/.kodiak.toml b/.github/.kodiak.toml
new file mode 100644
index 0000000..ded81e4
--- /dev/null
+++ b/.github/.kodiak.toml
@@ -0,0 +1,18 @@
+version = 1
+
+[merge]
+method = "squash" # default: "merge"
+delete_branch_on_merge = true # default: false
+optimistic_updates = true # default: true
+prioritize_ready_to_merge = true # default: false
+
+[merge.message]
+title = "pull_request_title" # default: "github_default"
+body = "github_default" # default: "github_default"
+strip_html_comments = true # default: false
+
+[update]
+always = true # default: false
+
+[approve]
+auto_approve_usernames = ["1gtm", "tamalsaha"]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..22c217f
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,101 @@
+name: CI
+
+on:
+ pull_request:
+ branches:
+ - "*"
+ push:
+ branches:
+ - master
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build
+ runs-on: ubuntu-24.04
+ steps:
+ - name: Set up Go 1.23
+ uses: actions/setup-go@v1
+ with:
+ go-version: '1.23'
+ id: go
+
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v1
+
+ - name: Prepare Host
+ run: |
+ sudo apt-get -qq update || true
+ sudo apt-get install -y bzr
+ # install yq
+ curl -fsSL -o yqq https://github.com/mikefarah/yq/releases/download/3.3.0/yq_linux_amd64
+ chmod +x yqq
+ sudo mv yqq /usr/local/bin/yqq
+ pipx install yq
+ # install kubectl
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.1/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ sudo mv ./kubectl /usr/local/bin/kubectl
+
+ - name: Run checks
+ run: |
+ ./hack/scripts/update-chart-dependencies.sh
+ make ci
+
+ kubernetes:
+ name: Kubernetes
+ runs-on: ubuntu-24.04
+ needs: build
+ strategy:
+ matrix:
+ k8s: [v1.26.15, v1.27.16, v1.28.9, v1.29.7, v1.30.3, v1.31.0]
+ steps:
+ - uses: actions/checkout@v1
+
+ - name: Install yq
+ run: |
+ curl -fsSL -o yqq https://github.com/mikefarah/yq/releases/download/3.3.0/yq_linux_amd64
+ chmod +x yqq
+ sudo mv yqq /usr/local/bin/yqq
+
+ - name: Create Kubernetes ${{ matrix.k8s }} cluster
+ id: kind
+ uses: engineerd/setup-kind@v0.5.0
+ with:
+ version: v0.25.0
+ image: kindest/node:${{ matrix.k8s }}
+
+ - name: Prepare cluster for testing
+ id: local-path
+ env:
+ USERNAME: 1gtm
+ REGISTRY_SECRET: regcred
+ run: |
+ echo "waiting for nodes to be ready ..."
+ kubectl wait --for=condition=Ready nodes --all --timeout=5m
+ kubectl get nodes
+ echo
+ echo "install helm 3"
+ pushd /usr/local/bin && sudo curl -fsSLO https://github.com/x-helm/helm/releases/latest/download/helm && sudo chmod +x helm && popd
+
+ - name: Issue License
+ env:
+ BYTEBUILDERS_LICENSE_TOKEN: ${{ secrets.BYTEBUILDERS_LICENSE_TOKEN }}
+ run: |
+ export KUBECONFIG="${HOME}/.kube/config"
+ CLUSTER_UID=$(kubectl get ns kube-system -o=jsonpath='{.metadata.uid}')
+ LICENSE_KEY=$(curl -X POST -d "name=1gtm&email=1gtm@appscode.com&product=kubevault-enterprise&cluster=${CLUSTER_UID}&tos=true&token=${BYTEBUILDERS_LICENSE_TOKEN}" https://license-issuer.appscode.com/issue-license)
+ echo "${LICENSE_KEY}" > /tmp/license.txt
+ # ref: https://github.com/mikefarah/yq/issues/230#issuecomment-487458629
+ # yqq w -i ./charts/kubevault-operator/ci/ci-values.yaml license --tag '!!str' -- "${LICENSE_KEY}"
+ # yqq w -i ./charts/kubevault-webhook-server/ci/ci-values.yaml license --tag '!!str' -- "${LICENSE_KEY}"
+
+ - name: Test charts
+ run: |
+ export KUBECONFIG="${HOME}/.kube/config"
+ ./hack/scripts/update-chart-dependencies.sh
+ ./hack/scripts/ct.sh
diff --git a/.github/workflows/cve-report.yml b/.github/workflows/cve-report.yml
new file mode 100644
index 0000000..052078b
--- /dev/null
+++ b/.github/workflows/cve-report.yml
@@ -0,0 +1,73 @@
+name: cve-report
+
+on:
+ schedule:
+ - cron: '0 17 * * *'
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ report:
+ name: Report
+ runs-on: ubuntu-24.04
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Prepare git
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ run: |
+ set -x
+ git config --global user.name "1gtm"
+ git config --global user.email "1gtm@appscode.com"
+ git config --global \
+ url."https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com".insteadOf \
+ "https://github.com"
+ # git remote set-url origin https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git
+
+ - name: Install trivy
+ run: |
+ # wget https://github.com/aquasecurity/trivy/releases/download/v0.18.3/trivy_0.18.3_Linux-64bit.deb
+ # sudo dpkg -i trivy_0.18.3_Linux-64bit.deb
+ sudo apt-get install -y --no-install-recommends wget apt-transport-https gnupg lsb-release
+ wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add -
+ echo deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main | sudo tee -a /etc/apt/sources.list.d/trivy.list
+ sudo apt-get update
+ sudo apt-get install -y --no-install-recommends trivy
+
+ - name: Install image packer
+ run: |
+ cd /tmp
+ curl -fsSL -O https://github.com/kmodules/image-packer/releases/latest/download/image-packer-linux-amd64.tar.gz
+ tar -xvf image-packer-linux-amd64.tar.gz
+ chmod +x image-packer-linux-amd64
+ sudo mv image-packer-linux-amd64 /usr/local/bin/image-packer
+
+ - name: Generate report
+ run: |
+ image-packer generate-cve-report \
+ --output-dir=catalog \
+ --src=catalog/imagelist.yaml
+ git add catalog/README.md || true
+ git commit -s -a -m "Update cve report $(date --rfc-3339=date)"
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v6
+ with:
+ token: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ title: Update cve report
+ branch: update-cve-report
+ delete-branch: true
+ add-paths: |
+ catalog/README.md
+ labels: |
+ automerge
diff --git a/.github/workflows/publish-oci.yml b/.github/workflows/publish-oci.yml
new file mode 100644
index 0000000..573ce58
--- /dev/null
+++ b/.github/workflows/publish-oci.yml
@@ -0,0 +1,60 @@
+name: OCI
+
+on:
+ push:
+ tags:
+ - "*.*"
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}-oci
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build
+ runs-on: ubuntu-24.04
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v1
+
+ - name: Set up QEMU
+ id: qemu
+ uses: docker/setup-qemu-action@v1
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Log in to the GitHub Container registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: 1gtm
+ password: ${{ secrets.LGTM_GITHUB_TOKEN }}
+
+ - name: Install Helm 3
+ run: |
+ pushd /usr/local/bin && sudo curl -fsSLO https://github.com/x-helm/helm/releases/latest/download/helm && sudo chmod +x helm && popd
+
+ - name: Clone charts repository
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ CHART_REPOSITORY: github.com/appscode/charts
+ run: |
+ url="https://${GITHUB_USER}:${GITHUB_TOKEN}@${CHART_REPOSITORY}.git"
+ cd $RUNNER_WORKSPACE
+ git clone $url
+ cd $(basename $CHART_REPOSITORY)
+ git config user.name "${GITHUB_USER}"
+ git config user.email "${GITHUB_USER}@appscode.com"
+
+ - name: Publish OCI charts
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ CHART_REPOSITORY: github.com/appscode/charts
+ run: |
+ export REGISTRY_0=oci://ghcr.io/appscode-charts
+ ./hack/scripts/update-chart-dependencies.sh
+ $RUNNER_WORKSPACE/$(basename $CHART_REPOSITORY)/hack/scripts/publish-oci-charts.sh $(pwd)
diff --git a/.github/workflows/release-tracker.yml b/.github/workflows/release-tracker.yml
new file mode 100644
index 0000000..e23a985
--- /dev/null
+++ b/.github/workflows/release-tracker.yml
@@ -0,0 +1,41 @@
+name: release-tracker
+
+on:
+ pull_request:
+ types: [closed]
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ runs-on: ubuntu-24.04
+
+ steps:
+ - uses: actions/checkout@v1
+
+ - name: Prepare git
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ run: |
+ git config --global user.name "${GITHUB_USER}"
+ git config --global user.email "${GITHUB_USER}@appscode.com"
+ git remote set-url origin https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git
+
+ - name: Install GitHub CLI
+ run: |
+ curl -fsSL https://github.com/github/hub/raw/master/script/get | bash -s 2.14.1
+ sudo mv bin/hub /usr/local/bin
+
+ - name: Update release tracker
+ if: |
+ github.event.action == 'closed' &&
+ github.event.pull_request.merged == true
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ run: |
+ ./hack/scripts/update-release-tracker.sh
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..8be4101
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,55 @@
+name: Release
+
+on:
+ push:
+ tags:
+ - "*.*"
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}-release
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build
+ runs-on: ubuntu-24.04
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v1
+
+ - name: Install GitHub CLI
+ run: |
+ curl -fsSL https://github.com/github/hub/raw/master/script/get | bash -s 2.14.1
+ sudo mv bin/hub /usr/local/bin
+
+ - name: Install Helm 3
+ run: |
+ pushd /usr/local/bin && sudo curl -fsSLO https://github.com/x-helm/helm/releases/latest/download/helm && sudo chmod +x helm && popd
+
+ - name: Clone charts repository
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ CHART_REPOSITORY: ${{ secrets.CHART_REPOSITORY }}
+ run: |
+ url="https://${GITHUB_USER}:${GITHUB_TOKEN}@${CHART_REPOSITORY}.git"
+ cd $RUNNER_WORKSPACE
+ git clone $url
+ cd $(basename $CHART_REPOSITORY)
+ git config user.name "${GITHUB_USER}"
+ git config user.email "${GITHUB_USER}@appscode.com"
+
+ - name: Package
+ env:
+ GITHUB_USER: 1gtm
+ GITHUB_TOKEN: ${{ secrets.LGTM_GITHUB_TOKEN }}
+ CHART_REPOSITORY: ${{ secrets.CHART_REPOSITORY }}
+ run: |
+ ./hack/scripts/update-chart-dependencies.sh
+ cd $RUNNER_WORKSPACE/$(basename $CHART_REPOSITORY)
+ GIT_TAG=${GITHUB_REF#"refs/tags/"}
+ if [[ $GIT_TAG =~ "-alpha." ]] || [[ $GIT_TAG =~ "-beta." ]]; then
+ export REPO_DIR=testing
+ fi
+ ./hack/scripts/open-pr.sh $GITHUB_WORKSPACE
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..12a18b4
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,41 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+/bin
+/.go
+
+/.crds
+/.idea
+/.markdownlint.json
+/.vscode
+/apiserver.local.config
+/coverage.txt
+/dist
+/hack/config/.env
+/test/e2e/junit.xml
+/test/e2e/report.xml
+
+.terraform
+*.tfstate*
+
+/charts/kubevault/charts
+/charts/kubevault-opscenter/charts
diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md
new file mode 100644
index 0000000..dfc1179
--- /dev/null
+++ b/DEVELOPMENT.md
@@ -0,0 +1,9 @@
+## catalog
+
+## Generate image scripts
+
+Uses [image-packer](https://github.com/kmodules/image-packer)
+
+```bash
+./hack/scripts/update-catalog.sh
+```
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..dc16ee1
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,5 @@
+## License
+
+Source code in this repository, Binaries, Docker images and Charts produced by the build process are licensed under the AppsCode Community License 1.0.0. You may obtain a copy of the License at
+
+ - [AppsCode-Community-1.0.0](https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md)
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..a086c27
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,467 @@
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SHELL=/bin/bash -o pipefail
+
+GO_PKG := go.virtual-secrets.dev
+REPO := $(notdir $(shell pwd))
+BIN := installer
+
+# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
+CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
+# https://github.com/appscodelabs/gengo-builder
+CODE_GENERATOR_IMAGE ?= ghcr.io/appscode/gengo:release-1.29
+API_GROUPS ?= installer:v1alpha1
+
+# This version-strategy uses git tags to set the version string
+git_branch := $(shell git rev-parse --abbrev-ref HEAD)
+git_tag := $(shell git describe --exact-match --abbrev=0 2>/dev/null || echo "")
+commit_hash := $(shell git rev-parse --verify HEAD)
+commit_timestamp := $(shell date --date="@$$(git show -s --format=%ct)" --utc +%FT%T)
+
+VERSION := $(shell git describe --tags --always --dirty)
+version_strategy := commit_hash
+ifdef git_tag
+ VERSION := $(git_tag)
+ version_strategy := tag
+else
+ ifeq (,$(findstring $(git_branch),master HEAD))
+ ifneq (,$(patsubst release-%,,$(git_branch)))
+ VERSION := $(git_branch)
+ version_strategy := branch
+ endif
+ endif
+endif
+
+###
+### These variables should not need tweaking.
+###
+
+SRC_PKGS := apis tests # directories which hold app source (not vendored)
+SRC_DIRS := $(SRC_PKGS)
+
+DOCKER_PLATFORMS := linux/amd64 linux/arm linux/arm64
+BIN_PLATFORMS := $(DOCKER_PLATFORMS)
+
+# Used internally. Users should pass GOOS and/or GOARCH.
+OS := $(if $(GOOS),$(GOOS),$(shell go env GOOS))
+ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH))
+
+BASEIMAGE_PROD ?= gcr.io/distroless/static-debian12
+BASEIMAGE_DBG ?= debian:12
+
+GO_VERSION ?= 1.23
+BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION)
+CHART_TEST_IMAGE ?= quay.io/helmpack/chart-testing:v3.11.0
+
+OUTBIN = bin/$(OS)_$(ARCH)/$(BIN)
+ifeq ($(OS),windows)
+ OUTBIN = bin/$(OS)_$(ARCH)/$(BIN).exe
+endif
+
+# Directories that we need created to build/test.
+BUILD_DIRS := bin/$(OS)_$(ARCH) \
+ .go/bin/$(OS)_$(ARCH) \
+ .go/cache \
+ hack/config \
+ $(HOME)/.credentials \
+ $(HOME)/.kube \
+ $(HOME)/.minikube
+
+DOCKER_REPO_ROOT := /go/src/$(GO_PKG)/$(REPO)
+
+# If you want to build all binaries, see the 'all-build' rule.
+# If you want to build all containers, see the 'all-container' rule.
+# If you want to build AND push all containers, see the 'all-push' rule.
+all: fmt build
+
+# For the following OS/ARCH expansions, we transform OS/ARCH into OS_ARCH
+# because make pattern rules don't match with embedded '/' characters.
+
+build-%:
+ @$(MAKE) build \
+ --no-print-directory \
+ GOOS=$(firstword $(subst _, ,$*)) \
+ GOARCH=$(lastword $(subst _, ,$*))
+
+all-build: $(addprefix build-, $(subst /,_, $(BIN_PLATFORMS)))
+
+version:
+ @echo version=$(VERSION)
+ @echo version_strategy=$(version_strategy)
+ @echo git_tag=$(git_tag)
+ @echo git_branch=$(git_branch)
+ @echo commit_hash=$(commit_hash)
+ @echo commit_timestamp=$(commit_timestamp)
+
+.PHONY: clientset
+clientset:
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ /go/src/k8s.io/code-generator/generate-groups.sh \
+ "deepcopy" \
+ $(GO_PKG)/$(REPO)/client \
+ $(GO_PKG)/$(REPO)/apis \
+ "$(API_GROUPS)" \
+ --go-header-file "./hack/license/go.txt"
+
+# Generate openapi schema
+.PHONY: openapi
+openapi: $(addprefix openapi-, $(subst :,_, $(API_GROUPS)))
+ @echo "Generating api/openapi-spec/swagger.json"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ --env GO111MODULE=on \
+ --env GOFLAGS="-mod=vendor" \
+ $(BUILD_IMAGE) \
+ go run hack/gencrd/main.go
+
+openapi-%:
+ @echo "Generating openapi schema for $(subst _,/,$*)"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ openapi-gen \
+ --v 1 --logtostderr \
+ --go-header-file "./hack/license/go.txt" \
+ --input-dirs "$(GO_PKG)/$(REPO)/apis/$(subst _,/,$*),k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/util/intstr,k8s.io/apimachinery/pkg/version,k8s.io/api/core/v1,k8s.io/api/apps/v1,k8s.io/api/rbac/v1" \
+ --output-package "$(GO_PKG)/$(REPO)/apis/$(subst _,/,$*)" \
+ --report-filename /tmp/violation_exceptions.list
+
+# Generate CRD manifests
+.PHONY: gen-crds
+gen-crds:
+ @echo "Generating CRD manifests"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ controller-gen \
+ $(CRD_OPTIONS) \
+ paths="./apis/..." \
+ output:crd:artifacts:config=.crds
+
+crds_to_patch := installer.virtual-secrets.dev_kubevaultoperators.yaml
+
+.PHONY: patch-crds
+patch-crds: $(addprefix patch-crd-, $(crds_to_patch))
+patch-crd-%: $(BUILD_DIRS)
+ @echo "patching $*"
+ @kubectl patch -f .crds/$* -p "$$(cat hack/crd-patch.json)" --type=json --local=true -o yaml > bin/$*
+ @mv bin/$* .crds/$*
+
+.PHONY: label-crds
+label-crds: $(BUILD_DIRS)
+ @for f in .crds/*.yaml; do \
+ echo "applying app.kubernetes.io/name=kubevault label to $$f"; \
+ kubectl label --overwrite -f $$f --local=true -o yaml app.kubernetes.io/name=kubevault > bin/crd.yaml; \
+ mv bin/crd.yaml $$f; \
+ done
+
+.PHONY: gen-crd-protos
+gen-crd-protos: $(addprefix gen-crd-protos-, $(subst :,_, $(API_GROUPS)))
+
+gen-crd-protos-%:
+ @echo "Generating protobuf for $(subst _,/,$*)"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ go-to-protobuf \
+ --go-header-file "./hack/license/go.txt" \
+ --proto-import=$(DOCKER_REPO_ROOT)/vendor \
+ --proto-import=$(DOCKER_REPO_ROOT)/third_party/protobuf \
+ --apimachinery-packages=-k8s.io/apimachinery/pkg/api/resource,-k8s.io/apimachinery/pkg/apis/meta/v1,-k8s.io/apimachinery/pkg/apis/meta/v1beta1,-k8s.io/apimachinery/pkg/runtime,-k8s.io/apimachinery/pkg/runtime/schema,-k8s.io/apimachinery/pkg/util/intstr \
+ --packages=-k8s.io/api/core/v1,go.virtual-secrets.dev/installer/apis/$(subst _,/,$*)
+
+.PHONY: gen-bindata
+gen-bindata:
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src/.crds \
+ -v /tmp:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ go-bindata -ignore=\\.go -ignore=\\.DS_Store -mode=0644 -modtime=1573722179 -o bindata.go -pkg .crds ./...
+
+.PHONY: gen-values-schema
+gen-values-schema: $(BUILD_DIRS)
+ @for dir in charts/*/; do \
+ dir=$${dir%*/}; \
+ dir=$${dir##*/}; \
+ crd_file=.crds/installer.virtual-secrets.dev_$$(echo $$dir | tr -d '-')s.yaml; \
+ if [ ! -f $${crd_file} ]; then \
+ continue; \
+ fi; \
+ yq -y --indentless '.spec.versions[0].schema.openAPIV3Schema.properties.spec | del(.description)' $${crd_file} > charts/$${dir}/values.openapiv3_schema.yaml; \
+ done
+
+.PHONY: gen-chart-doc
+gen-chart-doc: $(shell find $$(pwd)/charts -maxdepth 1 -mindepth 1 -type d -printf 'gen-chart-doc-%f ')
+
+gen-chart-doc-%:
+ @echo "Generate $* chart docs"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ chart-doc-gen -d ./charts/$*/doc.yaml -v ./charts/$*/values.yaml > ./charts/$*/README.md
+
+.PHONY: manifests
+manifests: gen-crds gen-values-schema gen-chart-doc
+
+.PHONY: gen
+gen: clientset manifests
+
+CHART_REGISTRY ?= appscode
+CHART_REGISTRY_URL ?= https://charts.appscode.com/stable/
+CHART_VERSION ?=
+APP_VERSION ?= $(CHART_VERSION)
+
+.PHONY: update-charts
+update-charts: $(shell find $$(pwd)/charts -maxdepth 1 -mindepth 1 -type d -printf 'chart-%f ')
+
+chart-%:
+ @$(MAKE) contents-$* gen-chart-doc-$* --no-print-directory
+
+contents-%:
+ @yq -y --indentless -i '.repository.name="$(CHART_REGISTRY)"' ./charts/$*/doc.yaml
+ @yq -y --indentless -i '.repository.url="$(CHART_REGISTRY_URL)"' ./charts/$*/doc.yaml
+ @if [ -n "$(CHART_VERSION)" ]; then \
+ yq -y --indentless -i '.version="$(CHART_VERSION)"' ./charts/$*/Chart.yaml; \
+ yq -y --indentless -i '.dependencies |= map(select(.name == "$*").version="$(CHART_VERSION)")' ./charts/kubevault/Chart.yaml; \
+ yq -y --indentless -i '.dependencies |= map(select(.name == "$*").version="$(CHART_VERSION)")' ./charts/kubevault-opscenter/Chart.yaml; \
+ fi
+ @if [ ! -z "$(APP_VERSION)" ]; then \
+ yq -y --indentless -i '.appVersion="$(APP_VERSION)"' ./charts/$*/Chart.yaml; \
+ fi
+
+fmt: $(BUILD_DIRS)
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \
+ -v $$(pwd)/.go/cache:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ /bin/bash -c " \
+ set -eou pipefail; \
+ REPO_PKG=$(GO_PKG) \
+ ./hack/fmt.sh $(SRC_DIRS) \
+ "
+
+build: $(OUTBIN)
+
+.PHONY: .go/$(OUTBIN)
+$(OUTBIN): $(BUILD_DIRS)
+ @echo "making $(OUTBIN)"
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \
+ -v $$(pwd)/.go/cache:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ /bin/bash -c " \
+ ARCH=$(ARCH) \
+ OS=$(OS) \
+ VERSION=$(VERSION) \
+ version_strategy=$(version_strategy) \
+ git_branch=$(git_branch) \
+ git_tag=$(git_tag) \
+ commit_hash=$(commit_hash) \
+ commit_timestamp=$(commit_timestamp) \
+ ./hack/build.sh \
+ "
+ @echo
+
+.PHONY: test
+test: unit-tests
+
+unit-tests: $(BUILD_DIRS)
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \
+ -v $$(pwd)/.go/cache:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ /bin/bash -c " \
+ ARCH=$(ARCH) \
+ OS=$(OS) \
+ VERSION=$(VERSION) \
+ ./hack/test.sh $(SRC_PKGS) \
+ "
+
+CT_COMMAND ?= lint-and-install
+TEST_CHARTS ?=
+KUBE_NAMESPACE ?=
+
+ifeq ($(CT_COMMAND),lint-and-install)
+ ct_namespace = --namespace=$(KUBE_NAMESPACE)
+endif
+
+ifeq ($(strip $(TEST_CHARTS)),)
+ CT_ARGS = --all $(ct_namespace)
+else
+ CT_ARGS = --charts=$(TEST_CHARTS) $(ct_namespace)
+endif
+
+.PHONY: ct
+ct: $(BUILD_DIRS)
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src \
+ --net=host \
+ -v $(HOME)/.kube:/.kube \
+ -v $(HOME)/.minikube:$(HOME)/.minikube \
+ -v $(HOME)/.credentials:$(HOME)/.credentials \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \
+ -v $$(pwd)/.go/cache:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ --env KUBECONFIG=$(subst $(HOME),,$(KUBECONFIG)) \
+ $(CHART_TEST_IMAGE) \
+ /bin/sh -c " \
+ kubectl delete crds --selector=app.kubernetes.io/name=kubevault; \
+ ct $(CT_COMMAND) --debug --validate-maintainers=false $(CT_ARGS) \
+ "
+
+ADDTL_LINTERS := gofmt,goimports,unparam
+
+.PHONY: lint
+lint: $(BUILD_DIRS)
+ @echo "running linter"
+ @docker run \
+ -i \
+ --rm \
+ -u $$(id -u):$$(id -g) \
+ -v $$(pwd):/src \
+ -w /src \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \
+ -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \
+ -v $$(pwd)/.go/cache:/.cache \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ --env GO111MODULE=on \
+ --env GOFLAGS="-mod=vendor" \
+ $(BUILD_IMAGE) \
+ golangci-lint run --enable $(ADDTL_LINTERS) --timeout=10m --skip-files="generated.*\.go$\" --skip-dirs-use-default --skip-dirs=client,vendor
+
+$(BUILD_DIRS):
+ @mkdir -p $@
+
+.PHONY: dev
+dev: gen fmt
+
+.PHONY: verify
+verify: verify-modules
+
+.PHONY: verify-modules
+verify-modules: gen fmt
+ GO111MODULE=on go mod tidy
+ GO111MODULE=on go mod vendor
+ @if !(git diff --exit-code HEAD); then \
+ echo "go module files are out of date"; exit 1; \
+ fi
+
+.PHONY: verify-gen
+verify-gen: gen fmt
+ @if !(git diff --exit-code HEAD); then \
+ echo "generated files are out of date, run make gen"; exit 1; \
+ fi
+
+.PHONY: add-license
+add-license:
+ @echo "Adding license header"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ ltag -t "./hack/license" --excludes "vendor contrib libbuild" -v
+
+.PHONY: check-license
+check-license:
+ @echo "Checking files for license header"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(BUILD_IMAGE) \
+ ltag -t "./hack/license" --excludes "vendor contrib libbuild" --check -v
+
+.PHONY: ci
+ci: verify check-license lint build unit-tests #cover
+
+.PHONY: clean
+clean:
+ rm -rf .go bin
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2223a59
--- /dev/null
+++ b/README.md
@@ -0,0 +1,3 @@
+# Virtual Secrets installer
+
+Virtual Secrets Helm charts
diff --git a/apis/doc.go b/apis/doc.go
new file mode 100644
index 0000000..0bc5151
--- /dev/null
+++ b/apis/doc.go
@@ -0,0 +1,17 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
diff --git a/apis/installer/fuzzer/fuzzer.go b/apis/installer/fuzzer/fuzzer.go
new file mode 100644
index 0000000..f4ab34b
--- /dev/null
+++ b/apis/installer/fuzzer/fuzzer.go
@@ -0,0 +1,33 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzzer
+
+import (
+ "go.virtual-secrets.dev/installer/apis/installer/v1alpha1"
+
+ fuzz "github.com/google/gofuzz"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+// Funcs returns the fuzzer functions for this api group.
+var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ func(s *v1alpha1.VirtualSecrets, c fuzz.Continue) {
+ c.FuzzNoCustom(s) // fuzz self without calling this function again
+ },
+ }
+}
diff --git a/apis/installer/install/install.go b/apis/installer/install/install.go
new file mode 100644
index 0000000..60ca560
--- /dev/null
+++ b/apis/installer/install/install.go
@@ -0,0 +1,30 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package install
+
+import (
+ "go.virtual-secrets.dev/installer/apis/installer/v1alpha1"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Install registers the API group and adds types to a scheme
+func Install(scheme *runtime.Scheme) {
+ utilruntime.Must(v1alpha1.AddToScheme(scheme))
+ utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
+}
diff --git a/apis/installer/install/roundtrip_test.go b/apis/installer/install/roundtrip_test.go
new file mode 100644
index 0000000..9071963
--- /dev/null
+++ b/apis/installer/install/roundtrip_test.go
@@ -0,0 +1,27 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package install
+
+import (
+ "testing"
+
+ "k8s.io/apimachinery/pkg/api/apitesting/roundtrip"
+)
+
+func TestRoundTripTypes(t *testing.T) {
+ roundtrip.RoundTripTestForAPIGroup(t, Install, nil)
+}
diff --git a/apis/installer/register.go b/apis/installer/register.go
new file mode 100644
index 0000000..22163c2
--- /dev/null
+++ b/apis/installer/register.go
@@ -0,0 +1,20 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+// GroupName is the group name use in this package
+const GroupName = "installer.virtual-secrets.dev"
diff --git a/apis/installer/v1alpha1/doc.go b/apis/installer/v1alpha1/doc.go
new file mode 100644
index 0000000..8325ba2
--- /dev/null
+++ b/apis/installer/v1alpha1/doc.go
@@ -0,0 +1,25 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=go.virtual-secrets.dev/installer/apis/installer
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=installer.virtual-secrets.dev
+package v1alpha1
diff --git a/apis/installer/v1alpha1/register.go b/apis/installer/v1alpha1/register.go
new file mode 100644
index 0000000..68eef2d
--- /dev/null
+++ b/apis/installer/v1alpha1/register.go
@@ -0,0 +1,66 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "go.virtual-secrets.dev/installer/apis/installer"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var SchemeGroupVersion = schema.GroupVersion{Group: installer.GroupName, Version: "v1alpha1"}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &VirtualSecrets{},
+ &VirtualSecretsList{},
+ )
+
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &metav1.Status{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/apis/installer/v1alpha1/types.go b/apis/installer/v1alpha1/types.go
new file mode 100644
index 0000000..4d2f31f
--- /dev/null
+++ b/apis/installer/v1alpha1/types.go
@@ -0,0 +1,98 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ core "k8s.io/api/core/v1"
+)
+
+type ImageRef struct {
+ Registry string `json:"registry"`
+ Repository string `json:"repository"`
+ Tag string `json:"tag"`
+}
+
+type Container struct {
+ ImageRef `json:",inline"`
+ // Compute Resources required by the sidecar container.
+ // +optional
+ Resources core.ResourceRequirements `json:"resources"`
+ // Security options the pod should run with.
+ // +optional
+ SecurityContext *core.SecurityContext `json:"securityContext"`
+}
+
+type ServiceAccountSpec struct {
+ Create bool `json:"create"`
+ //+optional
+ Name *string `json:"name"`
+ //+optional
+ Annotations map[string]string `json:"annotations"`
+}
+
+type EASSpec struct {
+ GroupPriorityMinimum int32 `json:"groupPriorityMinimum"`
+ VersionPriority int32 `json:"versionPriority"`
+ UseKubeapiserverFqdnForAks bool `json:"useKubeapiserverFqdnForAks"`
+ Healthcheck EASHealthcheckSpec `json:"healthcheck"`
+ ServingCerts ServingCerts `json:"servingCerts"`
+}
+
+type EASHealthcheckSpec struct {
+ // +optional
+ Enabled bool `json:"enabled"`
+}
+
+type EASMonitoring struct {
+ Agent MonitoringAgent `json:"agent"`
+ ServiceMonitor *ServiceMonitorLabels `json:"serviceMonitor"`
+}
+
+type WebHookSpec struct {
+ UseKubeapiserverFqdnForAks bool `json:"useKubeapiserverFqdnForAks"`
+ Healthcheck HealthcheckSpec `json:"healthcheck"`
+}
+
+type ServingCerts struct {
+ Generate bool `json:"generate"`
+ // +optional
+ CaCrt string `json:"caCrt"`
+ // +optional
+ ServerCrt string `json:"serverCrt"`
+ // +optional
+ ServerKey string `json:"serverKey"`
+}
+
+type HealthcheckSpec struct {
+ // +optional
+ Enabled bool `json:"enabled"`
+ ProbePort int `json:"probePort"`
+}
+
+// +kubebuilder:validation:Enum=prometheus.io;prometheus.io/operator;prometheus.io/builtin
+type MonitoringAgent string
+
+type Monitoring struct {
+ Agent MonitoringAgent `json:"agent"`
+ BindPort int `json:"bindPort"`
+ ServiceMonitor *ServiceMonitorLabels `json:"serviceMonitor"`
+}
+
+type ServiceMonitorLabels struct {
+ // +optional
+ Labels map[string]string `json:"labels"`
+}
diff --git a/apis/installer/v1alpha1/types_test.go b/apis/installer/v1alpha1/types_test.go
new file mode 100644
index 0000000..e037fd6
--- /dev/null
+++ b/apis/installer/v1alpha1/types_test.go
@@ -0,0 +1,33 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1_test
+
+import (
+ "os"
+ "testing"
+
+ "go.virtual-secrets.dev/installer/apis/installer/v1alpha1"
+
+ schemachecker "kmodules.xyz/schema-checker"
+)
+
+func TestDefaultValues(t *testing.T) {
+ checker := schemachecker.New(os.DirFS("../../.."),
+ schemachecker.TestCase{Obj: v1alpha1.VirtualSecretsSpec{}},
+ )
+ checker.TestAll(t)
+}
diff --git a/apis/installer/v1alpha1/virtual_secrets_types.go b/apis/installer/v1alpha1/virtual_secrets_types.go
new file mode 100644
index 0000000..599f8b2
--- /dev/null
+++ b/apis/installer/v1alpha1/virtual_secrets_types.go
@@ -0,0 +1,90 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ core "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ ResourceKindVirtualSecrets = "VirtualSecrets"
+ ResourceVirtualSecrets = "virtualsecrets"
+ ResourceVirtualSecretss = "virtualsecretss"
+)
+
+// VirtualSecrets defines the schama for Virtual Secrets Operator installer.
+
+// +genclient
+// +genclient:skipVerbs=updateStatus
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=virtualsecretss,singular=virtualsecrets,categories={kubevault,appscode}
+type VirtualSecrets struct {
+ metav1.TypeMeta `json:",inline,omitempty"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec VirtualSecretsSpec `json:"spec,omitempty"`
+}
+
+// VirtualSecretsSpec is the schema for Identity Server values file
+type VirtualSecretsSpec struct {
+ //+optional
+ NameOverride string `json:"nameOverride"`
+ //+optional
+ FullnameOverride string `json:"fullnameOverride"`
+ ReplicaCount int32 `json:"replicaCount"`
+ RegistryFQDN string `json:"registryFQDN"`
+ Image Container `json:"image"`
+ ImagePullPolicy string `json:"imagePullPolicy"`
+ //+optional
+ ImagePullSecrets []string `json:"imagePullSecrets"`
+ //+optional
+ CriticalAddon bool `json:"criticalAddon"`
+ //+optional
+ LogLevel int32 `json:"logLevel"`
+ //+optional
+ Annotations map[string]string `json:"annotations"`
+ //+optional
+ PodAnnotations map[string]string `json:"podAnnotations"`
+ //+optional
+ NodeSelector map[string]string `json:"nodeSelector"`
+ // If specified, the pod's tolerations.
+ // +optional
+ Tolerations []core.Toleration `json:"tolerations"`
+ // If specified, the pod's scheduling constraints
+ // +optional
+ Affinity *core.Affinity `json:"affinity"`
+ // PodSecurityContext holds pod-level security attributes and common container settings.
+ // Optional: Defaults to empty. See type description for default values of each field.
+ // +optional
+ PodSecurityContext *core.PodSecurityContext `json:"podSecurityContext"`
+ ServiceAccount ServiceAccountSpec `json:"serviceAccount"`
+ Apiserver EASSpec `json:"apiserver"`
+ Monitoring EASMonitoring `json:"monitoring"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VirtualSecretsList is a list of VirtualSecretss
+type VirtualSecretsList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ // Items is a list of VirtualSecrets CRD objects
+ Items []VirtualSecrets `json:"items,omitempty"`
+}
diff --git a/apis/installer/v1alpha1/zz_generated.deepcopy.go b/apis/installer/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..d1c6ab7
--- /dev/null
+++ b/apis/installer/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,365 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Container) DeepCopyInto(out *Container) {
+ *out = *in
+ out.ImageRef = in.ImageRef
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.SecurityContext != nil {
+ in, out := &in.SecurityContext, &out.SecurityContext
+ *out = new(v1.SecurityContext)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
+func (in *Container) DeepCopy() *Container {
+ if in == nil {
+ return nil
+ }
+ out := new(Container)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EASHealthcheckSpec) DeepCopyInto(out *EASHealthcheckSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EASHealthcheckSpec.
+func (in *EASHealthcheckSpec) DeepCopy() *EASHealthcheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EASHealthcheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EASMonitoring) DeepCopyInto(out *EASMonitoring) {
+ *out = *in
+ if in.ServiceMonitor != nil {
+ in, out := &in.ServiceMonitor, &out.ServiceMonitor
+ *out = new(ServiceMonitorLabels)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EASMonitoring.
+func (in *EASMonitoring) DeepCopy() *EASMonitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(EASMonitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EASSpec) DeepCopyInto(out *EASSpec) {
+ *out = *in
+ out.Healthcheck = in.Healthcheck
+ out.ServingCerts = in.ServingCerts
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EASSpec.
+func (in *EASSpec) DeepCopy() *EASSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EASSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HealthcheckSpec) DeepCopyInto(out *HealthcheckSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckSpec.
+func (in *HealthcheckSpec) DeepCopy() *HealthcheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HealthcheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRef) DeepCopyInto(out *ImageRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRef.
+func (in *ImageRef) DeepCopy() *ImageRef {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Monitoring) DeepCopyInto(out *Monitoring) {
+ *out = *in
+ if in.ServiceMonitor != nil {
+ in, out := &in.ServiceMonitor, &out.ServiceMonitor
+ *out = new(ServiceMonitorLabels)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring.
+func (in *Monitoring) DeepCopy() *Monitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(Monitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountSpec) DeepCopyInto(out *ServiceAccountSpec) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSpec.
+func (in *ServiceAccountSpec) DeepCopy() *ServiceAccountSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceMonitorLabels) DeepCopyInto(out *ServiceMonitorLabels) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorLabels.
+func (in *ServiceMonitorLabels) DeepCopy() *ServiceMonitorLabels {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceMonitorLabels)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServingCerts) DeepCopyInto(out *ServingCerts) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingCerts.
+func (in *ServingCerts) DeepCopy() *ServingCerts {
+ if in == nil {
+ return nil
+ }
+ out := new(ServingCerts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VirtualSecrets) DeepCopyInto(out *VirtualSecrets) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualSecrets.
+func (in *VirtualSecrets) DeepCopy() *VirtualSecrets {
+ if in == nil {
+ return nil
+ }
+ out := new(VirtualSecrets)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VirtualSecrets) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VirtualSecretsList) DeepCopyInto(out *VirtualSecretsList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]VirtualSecrets, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualSecretsList.
+func (in *VirtualSecretsList) DeepCopy() *VirtualSecretsList {
+ if in == nil {
+ return nil
+ }
+ out := new(VirtualSecretsList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VirtualSecretsList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VirtualSecretsSpec) DeepCopyInto(out *VirtualSecretsSpec) {
+ *out = *in
+ in.Image.DeepCopyInto(&out.Image)
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.PodAnnotations != nil {
+ in, out := &in.PodAnnotations, &out.PodAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(v1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PodSecurityContext != nil {
+ in, out := &in.PodSecurityContext, &out.PodSecurityContext
+ *out = new(v1.PodSecurityContext)
+ (*in).DeepCopyInto(*out)
+ }
+ in.ServiceAccount.DeepCopyInto(&out.ServiceAccount)
+ out.Apiserver = in.Apiserver
+ in.Monitoring.DeepCopyInto(&out.Monitoring)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualSecretsSpec.
+func (in *VirtualSecretsSpec) DeepCopy() *VirtualSecretsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VirtualSecretsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebHookSpec) DeepCopyInto(out *WebHookSpec) {
+ *out = *in
+ out.Healthcheck = in.Healthcheck
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookSpec.
+func (in *WebHookSpec) DeepCopy() *WebHookSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(WebHookSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/catalog/copy-images.sh b/catalog/copy-images.sh
new file mode 100755
index 0000000..7ef5003
--- /dev/null
+++ b/catalog/copy-images.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+if [ -z "${IMAGE_REGISTRY}" ]; then
+ echo "IMAGE_REGISTRY is not set"
+ exit 1
+fi
+
+OS=$(uname -o)
+if [ "${OS}" = "GNU/Linux" ]; then
+ OS=Linux
+fi
+ARCH=$(uname -m)
+if [ "${ARCH}" = "aarch64" ]; then
+ ARCH=arm64
+fi
+curl -sL "https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_${OS}_${ARCH}.tar.gz" >/tmp/go-containerregistry.tar.gz
+tar -zxvf /tmp/go-containerregistry.tar.gz -C /tmp/
+mv /tmp/crane .
+
+CMD="./crane"
+
+$CMD cp --allow-nondistributable-artifacts --insecure ghcr.io/virtual-secrets/virtual-secrets-operator:v0.0.1 $IMAGE_REGISTRY/virtual-secrets/virtual-secrets-operator:v0.0.1
diff --git a/catalog/export-images.sh b/catalog/export-images.sh
new file mode 100755
index 0000000..f7b5e8a
--- /dev/null
+++ b/catalog/export-images.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+mkdir -p images
+
+OS=$(uname -o)
+if [ "${OS}" = "GNU/Linux" ]; then
+ OS=Linux
+fi
+ARCH=$(uname -m)
+if [ "${ARCH}" = "aarch64" ]; then
+ ARCH=arm64
+fi
+curl -sL "https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_${OS}_${ARCH}.tar.gz" >/tmp/go-containerregistry.tar.gz
+tar -zxvf /tmp/go-containerregistry.tar.gz -C /tmp/
+mv /tmp/crane images
+
+CMD="./images/crane"
+
+$CMD pull --allow-nondistributable-artifacts --insecure ghcr.io/virtual-secrets/virtual-secrets-operator:v0.0.1 images/virtual-secrets-virtual-secrets-operator-v0.0.1.tar
+
+tar -czvf images.tar.gz images
diff --git a/catalog/imagelist.yaml b/catalog/imagelist.yaml
new file mode 100644
index 0000000..84b31b3
--- /dev/null
+++ b/catalog/imagelist.yaml
@@ -0,0 +1 @@
+- ghcr.io/virtual-secrets/virtual-secrets-operator:v0.0.1
diff --git a/catalog/import-images.sh b/catalog/import-images.sh
new file mode 100755
index 0000000..5c8f32a
--- /dev/null
+++ b/catalog/import-images.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+if [ -z "${IMAGE_REGISTRY}" ]; then
+ echo "IMAGE_REGISTRY is not set"
+ exit 1
+fi
+
+TARBALL=${1:-}
+tar -zxvf $TARBALL
+
+CMD="./crane"
+
+$CMD push --allow-nondistributable-artifacts --insecure images/virtual-secrets-virtual-secrets-operator-v0.0.1.tar $IMAGE_REGISTRY/virtual-secrets/virtual-secrets-operator:v0.0.1
diff --git a/catalog/import-into-k3s.sh b/catalog/import-into-k3s.sh
new file mode 100755
index 0000000..cce60e5
--- /dev/null
+++ b/catalog/import-into-k3s.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+if [ -z "${IMAGE_REGISTRY}" ]; then
+ echo "IMAGE_REGISTRY is not set"
+ exit 1
+fi
+
+TARBALL=${1:-}
+tar -zxvf $TARBALL
+
+k3s ctr images import images/virtual-secrets-virtual-secrets-operator-v0.0.1.tar
diff --git a/charts/virtual-secrets/.helmignore b/charts/virtual-secrets/.helmignore
new file mode 100644
index 0000000..e03134c
--- /dev/null
+++ b/charts/virtual-secrets/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.terraform
+*.tfstate*
diff --git a/charts/virtual-secrets/Chart.yaml b/charts/virtual-secrets/Chart.yaml
new file mode 100755
index 0000000..9521dce
--- /dev/null
+++ b/charts/virtual-secrets/Chart.yaml
@@ -0,0 +1,13 @@
+apiVersion: v2
+name: virtual-secrets
+description: A Helm chart for Virtual Secrets by AppsCode
+type: application
+version: v2025.1.1
+appVersion: v0.0.1
+home: https://virtual-secrets.dev
+icon: https://cdn.appscode.com/images/products/kubevault/icons/android-icon-192x192.png
+sources:
+- https://github.com/virtual-secrets
+maintainers:
+- name: appscode
+ email: support@appscode.com
diff --git a/charts/virtual-secrets/README.md b/charts/virtual-secrets/README.md
new file mode 100644
index 0000000..f73b9ce
--- /dev/null
+++ b/charts/virtual-secrets/README.md
@@ -0,0 +1,95 @@
+# Virtual Secrets
+
+[Virtual Secrets by AppsCode](https://github.com/virtual-secrets) - Virtual Secrets by AppsCode
+
+## TL;DR;
+
+```bash
+$ helm repo add appscode https://charts.appscode.com/stable/
+$ helm repo update
+$ helm search repo appscode/virtual-secrets --version=v2025.1.1
+$ helm upgrade -i virtual-secrets appscode/virtual-secrets -n kubevault --create-namespace --version=v2025.1.1
+```
+
+## Introduction
+
+This chart deploys a Virtual Secrets on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.26+
+
+## Installing the Chart
+
+To install/upgrade the chart with the release name `virtual-secrets`:
+
+```bash
+$ helm upgrade -i virtual-secrets appscode/virtual-secrets -n kubevault --create-namespace --version=v2025.1.1
+```
+
+The command deploys a Virtual Secrets on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall the `virtual-secrets`:
+
+```bash
+$ helm uninstall virtual-secrets -n kubevault
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the `virtual-secrets` chart and their default values.
+
+| Parameter | Description | Default |
+|--------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
+| nameOverride | Overrides name template | ""
|
+| fullnameOverride | Overrides fullname template | ""
|
+| replicaCount | Number of UI Server replicas to create (only 1 is supported) | 1
|
+| registryFQDN | Docker registry fqdn used to pull docker images Set this to use docker registry hosted at ${registryFQDN}/${registry}/${image} | ghcr.io
|
+| image.registry | Docker registry used to pull operator image | virtual-secrets
|
+| image.repository | Name of operator container image | virtual-secrets-operator
|
+| image.tag | Operator container image tag | ""
|
+| image.resources | Compute Resources required by the operator container | {}
|
+| image.securityContext | Security options the operator container should run with | {}
|
+| imagePullSecrets | Specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
Example:
`helm template charts/stash \`
`--set imagePullSecrets[0].name=sec0 \`
`--set imagePullSecrets[1].name=sec1` | []
|
+| imagePullPolicy | Container image pull policy | Always
|
+| criticalAddon | If true, installs Stash operator as critical addon | false
|
+| logLevel | Log level for operator | 3
|
+| annotations | Annotations applied to operator deployment | {}
|
+| podAnnotations | Annotations passed to operator pod(s). | {}
|
+| nodeSelector | Node labels for pod assignment | {}
|
+| tolerations | Tolerations for pod assignment | []
|
+| affinity | Affinity rules for pod assignment | {}
|
+| podSecurityContext | Security options the operator pod should run with. | {"fsGroup":65535}
|
+| serviceAccount.create | Specifies whether a service account should be created | true
|
+| serviceAccount.annotations | Annotations to add to the service account | {}
|
+| serviceAccount.name | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
|
+| apiserver.groupPriorityMinimum | The minimum priority the webhook api group should have at least. Please see https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64 for more information on proper values of this field. | 10000
|
+| apiserver.versionPriority | The ordering of the webhook api inside of the group. Please see https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70 for more information on proper values of this field | 15
|
+| apiserver.useKubeapiserverFqdnForAks | If true, uses kube-apiserver FQDN for AKS cluster to workaround https://github.com/Azure/AKS/issues/522 (default true) | true
|
+| apiserver.healthcheck.enabled | If true, enables the readiness and liveliness probes for the operator pod. | false
|
+| apiserver.servingCerts.generate | If true, generates on install/upgrade the certs that allow the kube-apiserver (and potentially ServiceMonitor) to authenticate operators pods. Otherwise specify certs in `apiserver.servingCerts.{caCrt, serverCrt, serverKey}`. See also: [example terraform](https://github.com/kubeops/installer/blob/master/charts/virtual-secrets/example-terraform.tf) | true
|
+| apiserver.servingCerts.caCrt | CA certficate used by serving certificate of webhook server. | ""
|
+| apiserver.servingCerts.serverCrt | Serving certficate used by webhook server. | ""
|
+| apiserver.servingCerts.serverKey | Private key for the serving certificate used by webhook server. | ""
|
+| monitoring.agent | Name of monitoring agent (one of "prometheus.io", "prometheus.io/operator", "prometheus.io/builtin") | ""
|
+| monitoring.serviceMonitor.labels | Specify the labels for ServiceMonitor. Prometheus crd will select ServiceMonitor using these labels. Only usable when monitoring agent is `prometheus.io/operator`. | {}
|
+
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade -i`. For example:
+
+```bash
+$ helm upgrade -i virtual-secrets appscode/virtual-secrets -n kubevault --create-namespace --version=v2025.1.1 --set replicaCount=1
+```
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while
+installing the chart. For example:
+
+```bash
+$ helm upgrade -i virtual-secrets appscode/virtual-secrets -n kubevault --create-namespace --version=v2025.1.1 --values values.yaml
+```
diff --git a/charts/virtual-secrets/crds/virtual-secrets.dev_secrets.yaml b/charts/virtual-secrets/crds/virtual-secrets.dev_secrets.yaml
new file mode 100644
index 0000000..d501a28
--- /dev/null
+++ b/charts/virtual-secrets/crds/virtual-secrets.dev_secrets.yaml
@@ -0,0 +1,53 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: secrets.virtual-secrets.dev
+spec:
+ group: virtual-secrets.dev
+ names:
+ kind: Secret
+ listKind: SecretList
+ plural: secrets
+ singular: secret
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: Secret holds secret data of a certain type. The total bytes of
+ the values in the Data field must be less than MaxSecretSize bytes.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ data:
+ additionalProperties:
+ format: byte
+ type: string
+ description: Data contains the secret data. Each key must consist of alphanumeric
+ characters, '-', '_' or '.'. The serialized form of the secret data
+ is a base64 encoded string, representing the arbitrary (possibly non-string)
+ data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+ type: object
+ immutable:
+ description: Immutable, if set to true, ensures that data stored in the
+ Secret cannot be updated (only object metadata can be modified). If
+ not set to true, the field can be modified at any time. Defaulted to
+ nil.
+ type: boolean
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ type:
+ description: 'Used to facilitate programmatic handling of secret data.
+ More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types'
+ type: string
+ type: object
+ served: true
+ storage: true
diff --git a/charts/virtual-secrets/doc.yaml b/charts/virtual-secrets/doc.yaml
new file mode 100644
index 0000000..dfdb341
--- /dev/null
+++ b/charts/virtual-secrets/doc.yaml
@@ -0,0 +1,18 @@
+project:
+ name: Virtual Secrets by AppsCode
+ shortName: Virtual Secrets
+ url: https://github.com/virtual-secrets
+ description: Virtual Secrets by AppsCode
+ app: a Virtual Secrets
+repository:
+ url: https://charts.appscode.com/stable/
+ name: appscode
+chart:
+ name: virtual-secrets
+ values: -- generate from values file --
+ valuesExample: -- generate from values file --
+prerequisites:
+- Kubernetes 1.26+
+release:
+ name: virtual-secrets
+ namespace: kubevault
diff --git a/charts/virtual-secrets/templates/NOTES.txt b/charts/virtual-secrets/templates/NOTES.txt
new file mode 100644
index 0000000..adbf284
--- /dev/null
+++ b/charts/virtual-secrets/templates/NOTES.txt
@@ -0,0 +1,3 @@
+To verify that UI Server has started, run:
+
+ kubectl get deployment --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "virtual-secrets.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
diff --git a/charts/virtual-secrets/templates/_helpers.tpl b/charts/virtual-secrets/templates/_helpers.tpl
new file mode 100644
index 0000000..f8ebf07
--- /dev/null
+++ b/charts/virtual-secrets/templates/_helpers.tpl
@@ -0,0 +1,100 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "virtual-secrets.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "virtual-secrets.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "virtual-secrets.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "virtual-secrets.labels" -}}
+helm.sh/chart: {{ include "virtual-secrets.chart" . }}
+{{ include "virtual-secrets.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "virtual-secrets.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "virtual-secrets.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "virtual-secrets.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "virtual-secrets.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Returns the registry used for image docker image
+*/}}
+{{- define "image.registry" -}}
+{{- list .Values.registryFQDN .Values.image.registry | compact | join "/" }}
+{{- end }}
+
+{{- define "appscode.imagePullSecrets" -}}
+{{- with .Values.imagePullSecrets -}}
+imagePullSecrets:
+{{- toYaml . | nindent 2 }}
+{{- end }}
+{{- end }}
+
+{{/*
+Returns the enabled monitoring agent name
+*/}}
+{{- define "monitoring.agent" -}}
+{{- .Values.monitoring.agent }}
+{{- end }}
+
+{{/*
+Returns whether the ServiceMonitor will be labeled with custom label
+*/}}
+{{- define "monitoring.apply-servicemonitor-label" -}}
+{{- ternary "false" "true" ( empty .Values.monitoring.serviceMonitor.labels ) -}}
+{{- end }}
+
+{{/*
+Returns the ServiceMonitor labels
+*/}}
+{{- define "monitoring.servicemonitor-label" -}}
+{{- range $key, $val := .Values.monitoring.serviceMonitor.labels }}
+{{ $key }}: {{ $val }}
+{{- end }}
+{{- end }}
diff --git a/charts/virtual-secrets/templates/apiregistration.yaml b/charts/virtual-secrets/templates/apiregistration.yaml
new file mode 100644
index 0000000..2dbae41
--- /dev/null
+++ b/charts/virtual-secrets/templates/apiregistration.yaml
@@ -0,0 +1,78 @@
+{{- $caCrt := "" }}
+{{- $serverCrt := "" }}
+{{- $serverKey := "" }}
+{{- if .Values.apiserver.servingCerts.generate }}
+{{- $ca := genCA "ca" 3650 }}
+{{- $cn := include "virtual-secrets.fullname" . -}}
+{{- $altName1 := printf "%s.%s" $cn .Release.Namespace }}
+{{- $altName2 := printf "%s.%s.svc" $cn .Release.Namespace }}
+{{- $server := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }}
+{{- $caCrt = b64enc $ca.Cert }}
+{{- $serverCrt = b64enc $server.Cert }}
+{{- $serverKey = b64enc $server.Key }}
+{{- else }}
+{{- $caCrt = required "Required when apiserver.servingCerts.generate is false" .Values.apiserver.servingCerts.caCrt }}
+{{- $serverCrt = required "Required when apiserver.servingCerts.generate is false" .Values.apiserver.servingCerts.serverCrt }}
+{{- $serverKey = required "Required when apiserver.servingCerts.generate is false" .Values.apiserver.servingCerts.serverKey }}
+{{- end }}
+# register as aggregated apiserver
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ name: v1alpha1.reader.secrets-store.csi.x-k8s.io
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+spec:
+ group: reader.secrets-store.csi.x-k8s.io
+ version: v1alpha1
+ service:
+ namespace: {{ .Release.Namespace }}
+ name: {{ include "virtual-secrets.fullname" . }}
+ caBundle: {{ $caCrt }}
+ groupPriorityMinimum: {{ .Values.apiserver.groupPriorityMinimum }}
+ versionPriority: {{ .Values.apiserver.versionPriority }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}-apiserver-cert
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ $serverCrt }}
+ tls.key: {{ $serverKey }}
+---
+# to read the config for terminating authentication
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}-apiserver-extension-server-authentication-reader
+ namespace: kube-system
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+roleRef:
+ kind: Role
+ apiGroup: rbac.authorization.k8s.io
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: {{ include "virtual-secrets.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+---
+# to delegate authentication and authorization
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}-apiserver-auth-delegator
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+roleRef:
+ kind: ClusterRole
+ apiGroup: rbac.authorization.k8s.io
+ name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+ name: {{ include "virtual-secrets.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/virtual-secrets/templates/cluster-role-binding.yaml b/charts/virtual-secrets/templates/cluster-role-binding.yaml
new file mode 100644
index 0000000..4f238a8
--- /dev/null
+++ b/charts/virtual-secrets/templates/cluster-role-binding.yaml
@@ -0,0 +1,14 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "virtual-secrets.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "virtual-secrets.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/virtual-secrets/templates/cluster-role.yaml b/charts/virtual-secrets/templates/cluster-role.yaml
new file mode 100644
index 0000000..0502c5d
--- /dev/null
+++ b/charts/virtual-secrets/templates/cluster-role.yaml
@@ -0,0 +1,22 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - reader.secrets-store.csi.x-k8s.io
+ resources:
+ - "*"
+ verbs: ["*"]
+- apiGroups:
+ - secrets-store.csi.x-k8s.io
+ resources:
+ - secretproviderclasses
+ verbs: ["get", "list", "watch"]
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs: ["create"]
diff --git a/charts/virtual-secrets/templates/deployment.yaml b/charts/virtual-secrets/templates/deployment.yaml
new file mode 100644
index 0000000..1b57fb9
--- /dev/null
+++ b/charts/virtual-secrets/templates/deployment.yaml
@@ -0,0 +1,110 @@
+{{- $major := default "0" .Capabilities.KubeVersion.Major | trimSuffix "+" | int64 }}
+{{- $minor := default "0" .Capabilities.KubeVersion.Minor | trimSuffix "+" | int64 }}
+{{- $criticalAddon := and .Values.criticalAddon (or (eq .Release.Namespace "kube-system") (and (ge $major 1) (ge $minor 17))) -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+ {{- with .Values.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "virtual-secrets.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "virtual-secrets.selectorLabels" . | nindent 8 }}
+ annotations:
+ checksum/apiregistration.yaml: {{ include (print $.Template.BasePath "/apiregistration.yaml") . | sha256sum }}
+ {{- if $criticalAddon }}
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ {{- end }}
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- include "appscode.imagePullSecrets" . | nindent 6 }}
+ serviceAccountName: {{ include "virtual-secrets.serviceAccountName" . }}
+ containers:
+ - name: server
+ securityContext:
+ {{- toYaml .Values.image.securityContext | nindent 10 }}
+ image: {{ include "image.registry" . }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ args:
+ - run
+ - --v={{ .Values.logLevel }}
+ - --secure-port=8443
+ - --audit-log-path=-
+ - --tls-cert-file=/var/serving-cert/tls.crt
+ - --tls-private-key-file=/var/serving-cert/tls.key
+ - --use-kubeapiserver-fqdn-for-aks={{ .Values.apiserver.useKubeapiserverFqdnForAks }}
+ ports:
+ - containerPort: 8443
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.apiserver.healthcheck.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8443
+ scheme: HTTPS
+ initialDelaySeconds: 5
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8443
+ scheme: HTTPS
+ initialDelaySeconds: 5
+ {{- end }}
+ resources:
+ {{- toYaml .Values.image.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: /var/serving-cert
+ name: serving-cert
+ - mountPath: /tmp
+ name: tmp-dir
+ volumes:
+ - name: serving-cert
+ secret:
+ defaultMode: 420
+ secretName: {{ include "virtual-secrets.fullname" . }}-apiserver-cert
+ - name: tmp-dir
+ emptyDir: {}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ {{- if or .Values.tolerations $criticalAddon }}
+ tolerations:
+ {{- with .Values.tolerations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if $criticalAddon }}
+ - key: CriticalAddonsOnly
+ operator: Exists
+ {{- end -}}
+ {{- end -}}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if $criticalAddon }}
+ priorityClassName: system-cluster-critical
+ {{- end -}}
diff --git a/charts/virtual-secrets/templates/service.yaml b/charts/virtual-secrets/templates/service.yaml
new file mode 100644
index 0000000..0f6fb77
--- /dev/null
+++ b/charts/virtual-secrets/templates/service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+{{- if eq "prometheus.io/builtin" ( include "monitoring.agent" . ) }}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/path: "/metrics"
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: "https"
+{{- end }}
+spec:
+ ports:
+ # Port used to expose admission webhook apiserver
+ - name: api
+ port: 443
+ targetPort: 8443
+ selector:
+ {{- include "virtual-secrets.selectorLabels" . | nindent 4 }}
diff --git a/charts/virtual-secrets/templates/serviceaccount.yaml b/charts/virtual-secrets/templates/serviceaccount.yaml
new file mode 100644
index 0000000..b44e0bb
--- /dev/null
+++ b/charts/virtual-secrets/templates/serviceaccount.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "virtual-secrets.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "virtual-secrets.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/virtual-secrets/templates/servicemonitor.yaml b/charts/virtual-secrets/templates/servicemonitor.yaml
new file mode 100644
index 0000000..592a8dd
--- /dev/null
+++ b/charts/virtual-secrets/templates/servicemonitor.yaml
@@ -0,0 +1,30 @@
+{{- if eq "prometheus.io/operator" ( include "monitoring.agent" . ) }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "virtual-secrets.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- if eq "true" ( include "monitoring.apply-servicemonitor-label" . ) }}
+ {{- include "monitoring.servicemonitor-label" . | nindent 4 }}
+ {{- else }}
+ {{- include "virtual-secrets.selectorLabels" . | nindent 4 }}
+ {{- end }}
+spec:
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+ selector:
+ matchLabels:
+ {{- include "virtual-secrets.selectorLabels" . | nindent 6 }}
+ endpoints:
+ - port: api
+ bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+ scheme: https
+ tlsConfig:
+ ca:
+ secret:
+ name: {{ include "virtual-secrets.fullname" . }}-apiserver-cert
+ key: tls.crt
+ serverName: "{{ include "virtual-secrets.fullname" . }}.{{ .Release.Namespace }}.svc"
+{{- end }}
diff --git a/charts/virtual-secrets/templates/user-roles.yaml b/charts/virtual-secrets/templates/user-roles.yaml
new file mode 100644
index 0000000..45f0944
--- /dev/null
+++ b/charts/virtual-secrets/templates/user-roles.yaml
@@ -0,0 +1,17 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kubevault:secrets-store:reader
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation
+rules:
+- apiGroups:
+ - reader.secrets-store.csi.x-k8s.io
+ resources:
+ - secrets
+ verbs: ["get", "list"]
diff --git a/charts/virtual-secrets/values.openapiv3_schema.yaml b/charts/virtual-secrets/values.openapiv3_schema.yaml
new file mode 100644
index 0000000..abbd0e2
--- /dev/null
+++ b/charts/virtual-secrets/values.openapiv3_schema.yaml
@@ -0,0 +1,1464 @@
+properties:
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that
+ satisfy the affinity expressions specified by this field, but it may
+ choose a node that violates one or more of the expressions. The node
+ that is most preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.), compute
+ a sum by iterating through the elements of this field and adding "weight"
+ to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects
+ with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling
+ term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding
+ weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's
+ labels.
+ items:
+ description: A node selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set
+ of values. Valid operators are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator
+ is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. If the operator is Gt or Lt, the
+ values array must have a single element, which will
+ be interpreted as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements by node's
+ fields.
+ items:
+ description: A node selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set
+ of values. Valid operators are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator
+ is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. If the operator is Gt or Lt, the
+ values array must have a single element, which will
+ be interpreted as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are
+ not met at scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are
+ ORed.
+ items:
+ description: A null or empty node selector term matches no objects.
+ The requirements of them are ANDed. The TopologySelectorTerm type
+ implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's
+ labels.
+ items:
+ description: A node selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set
+ of values. Valid operators are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator
+ is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. If the operator is Gt or Lt, the
+ values array must have a single element, which will
+ be interpreted as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements by node's
+ fields.
+ items:
+ description: A node selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set
+ of values. Valid operators are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator
+ is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. If the operator is Gt or Lt, the
+ values array must have a single element, which will
+ be interpreted as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this
+ pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that
+ satisfy the affinity expressions specified by this field, but it may
+ choose a node that violates one or more of the expressions. The node
+ that is most preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.), compute
+ a sum by iterating through the elements of this field and adding "weight"
+ to the sum if the node has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the
+ corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this
+ case pods. If it's null, this PodAffinityTerm matches with
+ no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: MatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are
+ used to lookup values from the incoming pod labels, those
+ key-value labels are merged with `labelSelector` as `key in
+ (value)` to select the group of existing pods which pods will
+ be taken into consideration for the incoming pod's pod (anti)
+ affinity. Keys that don't exist in the incoming pod labels
+ will be ignored. The default value is empty. The same key
+ is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't
+ set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity
+ feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: MismatchLabelKeys is a set of pod label keys to
+ select which pods will be taken into consideration. The keys
+ are used to lookup values from the incoming pod labels, those
+ key-value labels are merged with `labelSelector` as `key notin
+ (value)` to select the group of existing pods which pods will
+ be taken into consideration for the incoming pod's pod (anti)
+ affinity. Keys that don't exist in the incoming pod labels
+ will be ignored. The default value is empty. The same key
+ is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't
+ set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity
+ feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the
+ term applies to. The term is applied to the union of the namespaces
+ selected by this field and the ones listed in the namespaces
+ field. null selector and null or empty namespaces list means
+ "this pod's namespace". An empty selector ({}) matches all
+ namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace
+ names that the term applies to. The term is applied to the
+ union of the namespaces listed in this field and the ones
+ selected by namespaceSelector. null or empty namespaces list
+ and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: This pod should be co-located (affinity) or not
+ co-located (anti-affinity) with the pods matching the labelSelector
+ in the specified namespaces, where co-located is defined as
+ running on a node whose value of the label with key topologyKey
+ matches that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are
+ not met at scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update),
+ the system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to
+ each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be co-located
+ (affinity) or not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the label with key
+ matches that of any node on which a pod of the set of
+ pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case
+ pods. If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: MatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are used
+ to lookup values from the incoming pod labels, those key-value
+ labels are merged with `labelSelector` as `key in (value)` to
+ select the group of existing pods which pods will be taken into
+ consideration for the incoming pod's pod (anti) affinity. Keys
+ that don't exist in the incoming pod labels will be ignored. The
+ default value is empty. The same key is forbidden to exist in
+ both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot
+ be set when labelSelector isn't set. This is an alpha field and
+ requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: MismatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are used
+ to lookup values from the incoming pod labels, those key-value
+ labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken
+ into consideration for the incoming pod's pod (anti) affinity.
+ Keys that don't exist in the incoming pod labels will be ignored.
+ The default value is empty. The same key is forbidden to exist
+ in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys
+ cannot be set when labelSelector isn't set. This is an alpha field
+ and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term
+ applies to. The term is applied to the union of the namespaces
+ selected by this field and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's
+ namespace". An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names
+ that the term applies to. The term is applied to the union of
+ the namespaces listed in this field and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means
+ "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located
+ (anti-affinity) with the pods matching the labelSelector in the
+ specified namespaces, where co-located is defined as running on
+ a node whose value of the label with key topologyKey matches that
+ of any node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting
+ this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that
+ satisfy the anti-affinity expressions specified by this field, but it
+ may choose a node that violates one or more of the expressions. The
+ node that is most preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding
+ podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the
+ corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this
+ case pods. If it's null, this PodAffinityTerm matches with
+ no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: MatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are
+ used to lookup values from the incoming pod labels, those
+ key-value labels are merged with `labelSelector` as `key in
+ (value)` to select the group of existing pods which pods will
+ be taken into consideration for the incoming pod's pod (anti)
+ affinity. Keys that don't exist in the incoming pod labels
+ will be ignored. The default value is empty. The same key
+ is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't
+ set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity
+ feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: MismatchLabelKeys is a set of pod label keys to
+ select which pods will be taken into consideration. The keys
+ are used to lookup values from the incoming pod labels, those
+ key-value labels are merged with `labelSelector` as `key notin
+ (value)` to select the group of existing pods which pods will
+ be taken into consideration for the incoming pod's pod (anti)
+ affinity. Keys that don't exist in the incoming pod labels
+ will be ignored. The default value is empty. The same key
+ is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't
+ set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity
+ feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the
+ term applies to. The term is applied to the union of the namespaces
+ selected by this field and the ones listed in the namespaces
+ field. null selector and null or empty namespaces list means
+ "this pod's namespace". An empty selector ({}) matches all
+ namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty. This
+ array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace
+ names that the term applies to. The term is applied to the
+ union of the namespaces listed in this field and the ones
+ selected by namespaceSelector. null or empty namespaces list
+ and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: This pod should be co-located (affinity) or not
+ co-located (anti-affinity) with the pods matching the labelSelector
+ in the specified namespaces, where co-located is defined as
+ running on a node whose value of the label with key topologyKey
+ matches that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field
+ are not met at scheduling time, the pod will not be scheduled onto the
+ node. If the anti-affinity requirements specified by this field cease
+ to be met at some point during pod execution (e.g. due to a pod label
+ update), the system may or may not try to eventually evict the pod from
+ its node. When there are multiple elements, the lists of nodes corresponding
+ to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be co-located
+ (affinity) or not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the label with key
+ matches that of any node on which a pod of the set of
+ pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case
+ pods. If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: MatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are used
+ to lookup values from the incoming pod labels, those key-value
+ labels are merged with `labelSelector` as `key in (value)` to
+ select the group of existing pods which pods will be taken into
+ consideration for the incoming pod's pod (anti) affinity. Keys
+ that don't exist in the incoming pod labels will be ignored. The
+ default value is empty. The same key is forbidden to exist in
+ both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot
+ be set when labelSelector isn't set. This is an alpha field and
+ requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: MismatchLabelKeys is a set of pod label keys to select
+ which pods will be taken into consideration. The keys are used
+ to lookup values from the incoming pod labels, those key-value
+ labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken
+ into consideration for the incoming pod's pod (anti) affinity.
+ Keys that don't exist in the incoming pod labels will be ignored.
+ The default value is empty. The same key is forbidden to exist
+ in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys
+ cannot be set when labelSelector isn't set. This is an alpha field
+ and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term
+ applies to. The term is applied to the union of the namespaces
+ selected by this field and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's
+ namespace". An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: namespaces specifies a static list of namespace names
+ that the term applies to. The term is applied to the union of
+ the namespaces listed in this field and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means
+ "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located
+ (anti-affinity) with the pods matching the labelSelector in the
+ specified namespaces, where co-located is defined as running on
+ a node whose value of the label with key topologyKey matches that
+ of any node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ apiserver:
+ properties:
+ groupPriorityMinimum:
+ format: int32
+ type: integer
+ healthcheck:
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ servingCerts:
+ properties:
+ caCrt:
+ type: string
+ generate:
+ type: boolean
+ serverCrt:
+ type: string
+ serverKey:
+ type: string
+ required:
+ - generate
+ type: object
+ useKubeapiserverFqdnForAks:
+ type: boolean
+ versionPriority:
+ format: int32
+ type: integer
+ required:
+ - groupPriorityMinimum
+ - healthcheck
+ - servingCerts
+ - useKubeapiserverFqdnForAks
+ - versionPriority
+ type: object
+ criticalAddon:
+ type: boolean
+ fullnameOverride:
+ type: string
+ image:
+ properties:
+ registry:
+ type: string
+ repository:
+ type: string
+ resources:
+ description: Compute Resources required by the sidecar container.
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims,\
+ \ that are used by this container. \n This is an alpha field and requires\
+ \ enabling the DynamicResourceAllocation feature gate. \n This field\
+ \ is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims
+ of the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources
+ allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources
+ required. If Requests is omitted for a container, it defaults to Limits
+ if that is explicitly specified, otherwise to an implementation-defined
+ value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ securityContext:
+ description: Security options the pod should run with.
+ properties:
+ allowPrivilegeEscalation:
+ description: 'AllowPrivilegeEscalation controls whether a process can
+ gain more privileges than its parent process. This bool directly controls
+ if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation
+ is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.'
+ type: boolean
+ appArmorProfile:
+ description: appArmorProfile is the AppArmor options to use by this container.
+ If set, this profile overrides the pod's appArmorProfile. Note that
+ this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: localhostProfile indicates a profile loaded on the node
+ that should be used. The profile must be preconfigured on the node
+ to work. Must match the loaded name of the profile. Must be set
+ if and only if type is "Localhost".
+ type: string
+ type:
+ description: 'type indicates which kind of AppArmor profile will be
+ applied. Valid options are: Localhost - a profile pre-loaded on
+ the node. RuntimeDefault - the container runtime''s default profile.
+ Unconfined - no AppArmor enforcement.'
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: The capabilities to add/drop when running containers. Defaults
+ to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: Run container in privileged mode. Processes in privileged
+ containers are essentially equivalent to root on the host. Defaults
+ to false. Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults
+ for readonly paths and masked paths. This requires the ProcMountType
+ feature flag to be enabled. Note that this field cannot be set when
+ spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: Whether this container has a read-only root filesystem. Default
+ is false. Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: The GID to run the entrypoint of the container process. Uses
+ runtime default if unset. May also be set in PodSecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence. Note that this field cannot be
+ set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that
+ it does not run as UID 0 (root) and fail to start the container if it
+ does. If unset or false, no such validation will be performed. May also
+ be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container process. Defaults
+ to user specified in image metadata if unspecified. May also be set
+ in PodSecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence. Note that this
+ field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: The SELinux context to be applied to the container. If unspecified,
+ the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext takes
+ precedence. Note that this field cannot be set when spec.os.name is
+ windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: The seccomp options to use by this container. If seccomp
+ options are provided at both the pod & container level, the container
+ options override the pod options. Note that this field cannot be set
+ when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: localhostProfile indicates a profile defined in a file
+ on the node should be used. The profile must be preconfigured on
+ the node to work. Must be a descending path, relative to the kubelet's
+ configured seccomp profile location. Must be set if type is "Localhost".
+ Must NOT be set for any other type.
+ type: string
+ type:
+ description: "type indicates which kind of seccomp profile will be\
+ \ applied. Valid options are: \n Localhost - a profile defined in\
+ \ a file on the node should be used. RuntimeDefault - the container\
+ \ runtime default profile should be used. Unconfined - no profile\
+ \ should be applied."
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence. Note that this field cannot be
+ set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents
+ of the GMSA credential spec named by the GMSACredentialSpecName
+ field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential
+ spec to use.
+ type: string
+ hostProcess:
+ description: HostProcess determines if a container should be run as
+ a 'Host Process' container. All of a Pod's containers must have
+ the same effective HostProcess value (it is not allowed to have
+ a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be
+ set to true.
+ type: boolean
+ runAsUserName:
+ description: The UserName in Windows to run the entrypoint of the
+ container process. Defaults to the user specified in image metadata
+ if unspecified. May also be set in PodSecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ tag:
+ type: string
+ required:
+ - registry
+ - repository
+ - tag
+ type: object
+ imagePullPolicy:
+ type: string
+ imagePullSecrets:
+ items:
+ type: string
+ type: array
+ logLevel:
+ format: int32
+ type: integer
+ monitoring:
+ properties:
+ agent:
+ enum:
+ - prometheus.io
+ - prometheus.io/operator
+ - prometheus.io/builtin
+ type: string
+ serviceMonitor:
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ required:
+ - agent
+ - serviceMonitor
+ type: object
+ nameOverride:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ podAnnotations:
+ additionalProperties:
+ type: string
+ type: object
+ podSecurityContext:
+ description: 'PodSecurityContext holds pod-level security attributes and common
+ container settings. Optional: Defaults to empty. See type description for default
+ values of each field.'
+ properties:
+ appArmorProfile:
+ description: appArmorProfile is the AppArmor options to use by the containers
+ in this pod. Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: localhostProfile indicates a profile loaded on the node that
+ should be used. The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile. Must be set if and only if
+ type is "Localhost".
+ type: string
+ type:
+ description: 'type indicates which kind of AppArmor profile will be applied.
+ Valid options are: Localhost - a profile pre-loaded on the node. RuntimeDefault
+ - the container runtime''s default profile. Unconfined - no AppArmor
+ enforcement.'
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: "A special supplemental group that applies to all containers\
+ \ in a pod. Some volume types allow the Kubelet to change the ownership\
+ \ of that volume to be owned by the pod: \n 1. The owning GID will be the\
+ \ FSGroup 2. The setgid bit is set (new files created in the volume will\
+ \ be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n\
+ \ If unset, the Kubelet will not modify the ownership and permissions of\
+ \ any volume. Note that this field cannot be set when spec.os.name is windows."
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: 'fsGroupChangePolicy defines behavior of changing ownership and
+ permission of the volume before being exposed inside Pod. This field will
+ only apply to volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified,
+ "Always" is used. Note that this field cannot be set when spec.os.name is
+ windows.'
+ type: string
+ runAsGroup:
+ description: The GID to run the entrypoint of the container process. Uses
+ runtime default if unset. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container. Note that this field cannot be set
+ when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root user. If
+ true, the Kubelet will validate the image at runtime to ensure that it does
+ not run as UID 0 (root) and fail to start the container if it does. If unset
+ or false, no such validation will be performed. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container process. Defaults
+ to user specified in image metadata if unspecified. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence for that container. Note that this field
+ cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: The SELinux context to be applied to all containers. If unspecified,
+ the container runtime will allocate a random SELinux context for each container. May
+ also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: The seccomp options to use by the containers in this pod. Note
+ that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: localhostProfile indicates a profile defined in a file on
+ the node should be used. The profile must be preconfigured on the node
+ to work. Must be a descending path, relative to the kubelet's configured
+ seccomp profile location. Must be set if type is "Localhost". Must NOT
+ be set for any other type.
+ type: string
+ type:
+ description: "type indicates which kind of seccomp profile will be applied.\
+ \ Valid options are: \n Localhost - a profile defined in a file on the\
+ \ node should be used. RuntimeDefault - the container runtime default\
+ \ profile should be used. Unconfined - no profile should be applied."
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: A list of groups applied to the first process run in each container,
+ in addition to the container's primary GID, the fsGroup (if specified),
+ and group memberships defined in the container image for the uid of the
+ container process. If unspecified, no additional groups are added to any
+ container. Note that group memberships defined in the container image for
+ the uid of the container process are still effective, even if they are not
+ included in this list. Note that this field cannot be set when spec.os.name
+ is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used for the pod. Pods
+ with unsupported sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: The Windows specific settings applied to all containers. If unspecified,
+ the options within a container's SecurityContext will be used. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence. Note that this field cannot be set when spec.os.name is
+ linux.
+ properties:
+ gmsaCredentialSpec:
+ description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa)
+ inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName
+ field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential
+ spec to use.
+ type: string
+ hostProcess:
+ description: HostProcess determines if a container should be run as a
+ 'Host Process' container. All of a Pod's containers must have the same
+ effective HostProcess value (it is not allowed to have a mix of HostProcess
+ containers and non-HostProcess containers). In addition, if HostProcess
+ is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: The UserName in Windows to run the entrypoint of the container
+ process. Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext takes
+ precedence.
+ type: string
+ type: object
+ type: object
+ registryFQDN:
+ type: string
+ replicaCount:
+ format: int32
+ type: integer
+ serviceAccount:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ create:
+ type: boolean
+ name:
+ type: string
+ required:
+ - create
+ type: object
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that
+ matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match
+ all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule
+ and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty
+ means match all taint keys. If the key is empty, operator must be Exists;
+ this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid
+ operators are Exists and Equal. Defaults to Equal. Exists is equivalent
+ to wildcard for value, so that a pod can tolerate all taints of a particular
+ category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration
+ (which must be of effect NoExecute, otherwise this field is ignored) tolerates
+ the taint. By default, it is not set, which means tolerate the taint forever
+ (do not evict). Zero and negative values will be treated as 0 (evict immediately)
+ by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the
+ operator is Exists, the value should be empty, otherwise just a regular
+ string.
+ type: string
+ type: object
+ type: array
+required:
+- apiserver
+- image
+- imagePullPolicy
+- monitoring
+- registryFQDN
+- replicaCount
+- serviceAccount
+type: object
diff --git a/charts/virtual-secrets/values.yaml b/charts/virtual-secrets/values.yaml
new file mode 100644
index 0000000..1d33b74
--- /dev/null
+++ b/charts/virtual-secrets/values.yaml
@@ -0,0 +1,98 @@
+# Default values for virtual-secrets.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# Overrides name template
+nameOverride: ""
+# Overrides fullname template
+fullnameOverride: ""
+# Number of UI Server replicas to create (only 1 is supported)
+replicaCount: 1
+# Docker registry fqdn used to pull docker images
+# Set this to use docker registry hosted at ${registryFQDN}/${registry}/${image}
+registryFQDN: ghcr.io
+image:
+ # Docker registry used to pull operator image
+ registry: virtual-secrets
+ # Name of operator container image
+ repository: virtual-secrets-operator
+ # Operator container image tag
+ tag: ""
+ # Compute Resources required by the operator container
+ resources: {}
+ # Security options the operator container should run with
+ securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+# Specify an array of imagePullSecrets.
+# Secrets must be manually created in the namespace.
+#
+# Example:
+# helm template charts/stash \
+# --set imagePullSecrets[0].name=sec0 \
+# --set imagePullSecrets[1].name=sec1
+imagePullSecrets: []
+# Container image pull policy
+imagePullPolicy: Always
+# If true, installs Stash operator as critical addon
+criticalAddon: false
+# Log level for operator
+logLevel: 3
+# Annotations applied to operator deployment
+annotations: {}
+# Annotations passed to operator pod(s).
+podAnnotations: {}
+# Node labels for pod assignment
+nodeSelector: {}
+# Tolerations for pod assignment
+tolerations: []
+# Affinity rules for pod assignment
+affinity: {}
+# Security options the operator pod should run with.
+podSecurityContext: # +doc-gen:break
+ fsGroup: 65535
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+apiserver:
+ # The minimum priority the webhook api group should have at least. Please see
+ # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64
+ # for more information on proper values of this field.
+ groupPriorityMinimum: 10000
+ # The ordering of the webhook api inside of the group. Please see
+ # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70
+ # for more information on proper values of this field
+ versionPriority: 15
+ # If true, uses kube-apiserver FQDN for AKS cluster to workaround https://github.com/Azure/AKS/issues/522 (default true)
+ useKubeapiserverFqdnForAks: true
+ healthcheck:
+ # If true, enables the readiness and liveliness probes for the operator pod.
+ enabled: false
+ servingCerts:
+ # If true, generates on install/upgrade the certs that allow the kube-apiserver (and potentially ServiceMonitor)
+ # to authenticate operators pods. Otherwise specify certs in `apiserver.servingCerts.{caCrt, serverCrt, serverKey}`.
+ # See also: [example terraform](https://github.com/kubeops/installer/blob/master/charts/virtual-secrets/example-terraform.tf)
+ generate: true
+ # CA certficate used by serving certificate of webhook server.
+ caCrt: ""
+ # Serving certficate used by webhook server.
+ serverCrt: ""
+ # Private key for the serving certificate used by webhook server.
+ serverKey: ""
+monitoring:
+ # Name of monitoring agent (one of "prometheus.io", "prometheus.io/operator", "prometheus.io/builtin")
+ agent: ""
+ serviceMonitor:
+ # Specify the labels for ServiceMonitor.
+ # Prometheus crd will select ServiceMonitor using these labels.
+ # Only usable when monitoring agent is `prometheus.io/operator`.
+ labels: {}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..cfdd3e4
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,81 @@
+module go.virtual-secrets.dev/installer
+
+go 1.22.1
+
+toolchain go1.23.3
+
+require (
+ github.com/gogo/protobuf v1.3.2
+ github.com/google/gofuzz v1.2.0
+ k8s.io/api v0.30.2
+ k8s.io/apimachinery v0.30.2
+ kmodules.xyz/image-packer v0.0.0-20241031223913-c4d4413efcd1
+ kmodules.xyz/schema-checker v0.4.2
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/docker/cli v27.1.1+incompatible // indirect
+ github.com/docker/distribution v2.8.2+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.7.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.1 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/gobeam/stringy v0.0.5 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-containerregistry v0.19.1 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/compress v1.17.2 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-isatty v0.0.16 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/sergi/go-diff v1.2.0 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/vbatts/tar-split v0.11.3 // indirect
+ github.com/yudai/gojsondiff v1.0.0 // indirect
+ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
+ golang.org/x/net v0.29.0 // indirect
+ golang.org/x/oauth2 v0.22.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.25.0 // indirect
+ golang.org/x/term v0.24.0 // indirect
+ golang.org/x/text v0.18.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ gomodules.xyz/go-sh v0.1.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/client-go v0.30.2 // indirect
+ k8s.io/klog/v2 v2.130.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 // indirect
+ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
+ kmodules.xyz/client-go v0.30.31 // indirect
+ kubeops.dev/scanner v0.0.19 // indirect
+ sigs.k8s.io/controller-runtime v0.18.4 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+)
+
+replace (
+ github.com/imdario/mergo => github.com/imdario/mergo v0.3.6
+ sigs.k8s.io/yaml => github.com/kmodules/yaml v1.4.1-0.20231224133800-a4e3f1abb174
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..be0a63d
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,246 @@
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
+github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
+github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
+github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
+github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
+github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
+github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
+github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
+github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/gobeam/stringy v0.0.5 h1:TvxQGSAqr/qF0SBVxa8Q67WWIo7bCWS0bM101WOd52g=
+github.com/gobeam/stringy v0.0.5/go.mod h1:W3620X9dJHf2FSZF5fRnWekHcHQjwmCz8ZQ2d1qloqE=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
+github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
+github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/kmodules/yaml v1.4.1-0.20231224133800-a4e3f1abb174 h1:HpxLHVeA0XsGH4vdKSGQBg2MAtKUS5grY5AIJRe5eZ4=
+github.com/kmodules/yaml v1.4.1-0.20231224133800-a4e3f1abb174/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
+github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
+github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
+github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
+github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
+github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
+github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
+github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
+github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
+github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
+github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
+github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
+golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
+golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/encoding v0.0.8 h1:r2Koq0BJ4HQCCjPTHuti0ItJDXqWJoLRHcm14Ayyp10=
+gomodules.xyz/encoding v0.0.8/go.mod h1:tn9zeeM1vHMxwVIwJQo7gGfJSCOklnU11tZ+3gSbj08=
+gomodules.xyz/go-sh v0.1.0 h1:1BJAuGREh2RhePt7HRrpmjnkbgfpXlCzc42SiyZ5dkc=
+gomodules.xyz/go-sh v0.1.0/go.mod h1:N8IrjNiYppUI/rxENYrWD6FOrSxSyEZnIekPEWM7LP0=
+gomodules.xyz/testing v0.0.4 h1:XGKt4B64mBe7P9kPR0Rz1nCQpWoSpBEFdTGkfU1RLe4=
+gomodules.xyz/testing v0.0.4/go.mod h1:hD6aXtv9eVycPwS01zv+QTl5BrK2DXQgr6bHqnrW+44=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
+gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
+k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
+k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
+k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
+k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
+k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
+k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 h1:T5TEV4a+pEjc+j9Xui3MGGeoDLIN6uzZrx8NYotFMgQ=
+k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+kmodules.xyz/client-go v0.30.31 h1:P+ZslW5QcgMnMoxo1ZMJrNNcwRwA1BiFct1JvcRQEj0=
+kmodules.xyz/client-go v0.30.31/go.mod h1:CAu+JlA8RVGtj6LQHu0Q1w2mnFUajuti49c7T1AvGdM=
+kmodules.xyz/image-packer v0.0.0-20241031223913-c4d4413efcd1 h1:fSBR/KNvqjZEJ+TDLk/1AsGBek975fbpEufVFEDqNp0=
+kmodules.xyz/image-packer v0.0.0-20241031223913-c4d4413efcd1/go.mod h1:BETKW3rlM9I7WeFhZKM81He4Ptpmtvid/Uue+1UyJM8=
+kmodules.xyz/schema-checker v0.4.2 h1:tAbxEtM759GxDM/3sQi/+OKKwUsEF+Ih4KReV3Eb/Xw=
+kmodules.xyz/schema-checker v0.4.2/go.mod h1:N7ETPwfY4LbTHbydC9jjssPmYKKgZ8DC0IuLCU55wwc=
+kubeops.dev/scanner v0.0.19 h1:J8C94k4j3NY3Y8UGHcG4nCZtmpSqPneCmkuvGNUOv4s=
+kubeops.dev/scanner v0.0.19/go.mod h1:FAKPsS+FhrOPwsbwXJOE1vLvIFWBcBcSXpaAztezvu4=
+sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
+sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
diff --git a/hack/build.sh b/hack/build.sh
new file mode 100755
index 0000000..8b4e22a
--- /dev/null
+++ b/hack/build.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+if [ -z "${OS:-}" ]; then
+ echo "OS must be set"
+ exit 1
+fi
+if [ -z "${ARCH:-}" ]; then
+ echo "ARCH must be set"
+ exit 1
+fi
+if [ -z "${VERSION:-}" ]; then
+ echo "VERSION must be set"
+ exit 1
+fi
+
+export CGO_ENABLED=0
+export GOARCH="${ARCH}"
+export GOOS="${OS}"
+export GO111MODULE=on
+export GOFLAGS="-mod=vendor"
+
+go install \
+ -installsuffix "static" \
+ -ldflags " \
+ -X main.Version=${VERSION} \
+ -X main.VersionStrategy=${version_strategy:-} \
+ -X main.GitTag=${git_tag:-} \
+ -X main.GitBranch=${git_branch:-} \
+ -X main.CommitHash=${commit_hash:-} \
+ -X main.CommitTimestamp=${commit_timestamp:-} \
+ -X main.GoVersion=$(go version | cut -d " " -f 3) \
+ -X main.Compiler=$(go env CC) \
+ -X main.Platform=${OS}/${ARCH} \
+ " \
+ ./...
diff --git a/hack/crd-patch.json b/hack/crd-patch.json
new file mode 100644
index 0000000..d7889dc
--- /dev/null
+++ b/hack/crd-patch.json
@@ -0,0 +1,13 @@
+[
+ {
+ "op": "add",
+ "path": "/spec/validation/openAPIV3Schema/properties/metadata/properties",
+ "value": {
+ "name": {
+ "maxLength": 63,
+ "pattern": "^[a-z]([-a-z0-9]*[a-z0-9])?$",
+ "type": "string"
+ }
+ }
+ }
+]
diff --git a/hack/e2e.sh b/hack/e2e.sh
new file mode 100755
index 0000000..03c234e
--- /dev/null
+++ b/hack/e2e.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+export CGO_ENABLED=0
+export GO111MODULE=on
+export GOFLAGS="-mod=vendor"
+
+GINKGO_ARGS=${GINKGO_ARGS:-}
+TEST_ARGS=${TEST_ARGS:-}
+DOCKER_REGISTRY=${DOCKER_REGISTRY:-}
+
+echo "Running e2e tests:"
+cmd="ginkgo -r --v --progress --trace ${GINKGO_ARGS} test -- --docker-registry=${DOCKER_REGISTRY} ${TEST_ARGS}"
+echo "$cmd"
+$cmd
diff --git a/hack/fmt.sh b/hack/fmt.sh
new file mode 100755
index 0000000..5da91c0
--- /dev/null
+++ b/hack/fmt.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+export CGO_ENABLED=0
+export GO111MODULE=on
+export GOFLAGS="-mod=vendor"
+
+TARGETS="$@"
+
+if [ -n "$TARGETS" ]; then
+ echo "Running reimport.py"
+ cmd="reimport3.py ${REPO_PKG} ${TARGETS}"
+ $cmd
+ echo
+
+ echo "Running goimports:"
+ cmd="goimports -w ${TARGETS}"
+ echo "$cmd"
+ $cmd
+ echo
+
+ echo "Running gofmt:"
+ cmd="gofmt -s -w ${TARGETS}"
+ echo "$cmd"
+ $cmd
+ echo
+fi
+
+echo "Running shfmt:"
+cmd="find . -path ./vendor -prune -o -name '*.sh' -exec shfmt -l -w -ci -i 4 {} \;"
+echo "$cmd"
+eval "$cmd" # xref: https://stackoverflow.com/a/5615748/244009
+echo
diff --git a/hack/import_hacks.go b/hack/import_hacks.go
new file mode 100644
index 0000000..c32839f
--- /dev/null
+++ b/hack/import_hacks.go
@@ -0,0 +1,26 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package hack
+
+import (
+ _ "github.com/gogo/protobuf/gogoproto"
+ _ "k8s.io/apimachinery/pkg/api/resource"
+ _ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ _ "k8s.io/apimachinery/pkg/runtime"
+ _ "k8s.io/apimachinery/pkg/runtime/schema"
+ _ "k8s.io/apimachinery/pkg/util/intstr"
+)
diff --git a/hack/kubernetes/kind.yaml b/hack/kubernetes/kind.yaml
new file mode 100644
index 0000000..1bfcdd5
--- /dev/null
+++ b/hack/kubernetes/kind.yaml
@@ -0,0 +1,15 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+ - role: control-plane
+ kubeadmConfigPatches:
+ - |
+ apiVersion: kubeadm.k8s.io/v1beta2
+ kind: ClusterConfiguration
+ metadata:
+ name: config
+ apiServer:
+ extraArgs:
+ enable-admission-plugins: "NodeRestriction,OwnerReferencesPermissionEnforcement"
+ - role: worker
+ - role: worker
diff --git a/hack/license/bash.txt b/hack/license/bash.txt
new file mode 100644
index 0000000..4328d2a
--- /dev/null
+++ b/hack/license/bash.txt
@@ -0,0 +1,13 @@
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hack/license/dockerfile.txt b/hack/license/dockerfile.txt
new file mode 100644
index 0000000..47377ce
--- /dev/null
+++ b/hack/license/dockerfile.txt
@@ -0,0 +1,14 @@
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/hack/license/go.txt b/hack/license/go.txt
new file mode 100644
index 0000000..82f480a
--- /dev/null
+++ b/hack/license/go.txt
@@ -0,0 +1,16 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
diff --git a/hack/license/makefile.txt b/hack/license/makefile.txt
new file mode 100644
index 0000000..47377ce
--- /dev/null
+++ b/hack/license/makefile.txt
@@ -0,0 +1,14 @@
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/hack/scripts/ct.sh b/hack/scripts/ct.sh
new file mode 100755
index 0000000..6f23117
--- /dev/null
+++ b/hack/scripts/ct.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+for dir in charts/*/; do
+ dir=${dir%*/}
+ dir=${dir##*/}
+ num_files=$(find charts/${dir}/templates -type f | wc -l)
+ echo $dir
+ if [ $num_files -le 1 ] ||
+ [[ "$dir" = "kubevault" ]] ||
+ [[ "$dir" = "secrets-store-reader" ]] ||
+ [[ "$dir" =~ "-crds" ]]; then
+ make ct CT_COMMAND=lint TEST_CHARTS=charts/$dir
+ else
+ ns=app-$(date +%s | head -c 6)
+ kubectl create ns $ns
+ kubectl label ns $ns pod-security.kubernetes.io/enforce=restricted
+ make ct TEST_CHARTS=charts/$dir KUBE_NAMESPACE=$ns
+ kubectl delete ns $ns || true
+ fi
+done
diff --git a/hack/scripts/import-crds.sh b/hack/scripts/import-crds.sh
new file mode 100755
index 0000000..92337bd
--- /dev/null
+++ b/hack/scripts/import-crds.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+crd_dir=${1:-}
+
+api_repo_url=https://github.com/virtual-secrets/operator.git
+api_repo_tag=${VIRTUAL_SECRETS_OPERATOR_TAG:-master}
+
+if [ "$#" -ne 1 ]; then
+ if [ "${api_repo_tag}" == "master" ]; then
+ echo "Error: missing path_to_input_crds_directory"
+ echo "Usage: import-crds.sh "
+ exit 1
+ fi
+
+ tmp_dir=$(mktemp -d -t api-XXXXXXXXXX)
+ # always cleanup temp dir
+ # ref: https://opensource.com/article/20/6/bash-trap
+ trap \
+ "{ rm -rf "${tmp_dir}"; }" \
+ SIGINT SIGTERM ERR EXIT
+
+ mkdir -p ${tmp_dir}
+ pushd $tmp_dir
+ git clone $api_repo_url
+ repo_dir=$(ls -b1)
+ cd $repo_dir
+ git checkout $api_repo_tag
+ popd
+ crd_dir=${tmp_dir}/${repo_dir}/crds
+fi
+
+crd-importer \
+ --input=${crd_dir} \
+ --out=./charts/virtual-secrets/crds
diff --git a/hack/scripts/open-pr.sh b/hack/scripts/open-pr.sh
new file mode 100755
index 0000000..679da98
--- /dev/null
+++ b/hack/scripts/open-pr.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+SCRIPT_ROOT=$(realpath $(dirname "${BASH_SOURCE[0]}")/../..)
+SCRIPT_NAME=$(basename "${BASH_SOURCE[0]}")
+pushd $SCRIPT_ROOT
+
+# http://redsymbol.net/articles/bash-exit-traps/
+function cleanup() {
+ popd
+}
+trap cleanup EXIT
+
+git add --all
+if git diff -s --exit-code HEAD; then
+ echo "CRDs are already up-to-date!"
+ exit 0
+fi
+
+pr_branch=${GITHUB_REPOSITORY}@${GITHUB_SHA:0:8}
+git checkout -b $pr_branch
+git commit -a -s -m "Update crds for $pr_branch"
+git push -u origin HEAD
+hub pull-request \
+ --labels automerge \
+ --message "Update crds for $pr_branch" \
+ --message "$(git show -s --format=%b)"
diff --git a/hack/scripts/update-catalog.sh b/hack/scripts/update-catalog.sh
new file mode 100755
index 0000000..e5cca1d
--- /dev/null
+++ b/hack/scripts/update-catalog.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+image-packer list --root-dir=charts --output-dir=catalog
+
+image-packer generate-scripts --insecure --allow-nondistributable-artifacts \
+ --output-dir=catalog \
+ --src=catalog/imagelist.yaml
+
+make add-license fmt
diff --git a/hack/scripts/update-chart-dependencies.sh b/hack/scripts/update-chart-dependencies.sh
new file mode 100755
index 0000000..5531722
--- /dev/null
+++ b/hack/scripts/update-chart-dependencies.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+helm dependency update charts/virtual-secrets
diff --git a/hack/scripts/update-release-tracker.sh b/hack/scripts/update-release-tracker.sh
new file mode 100755
index 0000000..181aebf
--- /dev/null
+++ b/hack/scripts/update-release-tracker.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+# ref: https://gist.github.com/joshisa/297b0bc1ec0dcdda0d1625029711fa24
+parse_url() {
+ proto="$(echo $1 | grep :// | sed -e's,^\(.*://\).*,\1,g')"
+ # remove the protocol
+ url="$(echo ${1/$proto/})"
+
+ IFS='/' # / is set as delimiter
+ read -ra PARTS <<<"$url" # str is read into an array as tokens separated by IFS
+ if [ ${PARTS[0]} != 'github.com' ] || [ ${#PARTS[@]} -ne 5 ]; then
+ echo "failed to parse relase-tracker: $url"
+ exit 1
+ fi
+ export RELEASE_TRACKER_OWNER=${PARTS[1]}
+ export RELEASE_TRACKER_REPO=${PARTS[2]}
+ export RELEASE_TRACKER_PR=${PARTS[4]}
+}
+
+RELEASE_TRACKER=${RELEASE_TRACKER:-}
+GITHUB_BASE_REF=${GITHUB_BASE_REF:-}
+
+while IFS=$': \r\t' read -r -u9 marker v; do
+ case $marker in
+ Release-tracker)
+ export RELEASE_TRACKER=$(echo $v | tr -d '\r\t')
+ ;;
+ Release)
+ export RELEASE=$(echo $v | tr -d '\r\t')
+ ;;
+ esac
+done 9< <(git show -s --format=%b)
+
+[ ! -z "$RELEASE_TRACKER" ] || {
+ echo "Release-tracker url not found."
+ exit 0
+}
+
+[ ! -z "$GITHUB_BASE_REF" ] || {
+ echo "GitHub base ref not found."
+ exit 0
+}
+
+parse_url $RELEASE_TRACKER
+api_url="repos/${RELEASE_TRACKER_OWNER}/${RELEASE_TRACKER_REPO}/issues/${RELEASE_TRACKER_PR}/comments"
+
+case $GITHUB_BASE_REF in
+ master)
+ msg="/ready-to-tag github.com/${GITHUB_REPOSITORY} ${GITHUB_SHA}"
+ ;;
+ *)
+ msg="/cherry-picked github.com/${GITHUB_REPOSITORY} ${GITHUB_BASE_REF} ${GITHUB_SHA}"
+ ;;
+esac
+
+hub api "$api_url" -f body="$msg"
diff --git a/hack/test.sh b/hack/test.sh
new file mode 100755
index 0000000..b407425
--- /dev/null
+++ b/hack/test.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Copyright AppsCode Inc. and Contributors
+#
+# Licensed under the AppsCode Community License 1.0.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eou pipefail
+
+export CGO_ENABLED=1
+export GO111MODULE=on
+export GOFLAGS="-mod=vendor"
+
+TARGETS=$(for d in "$@"; do echo ./$d/...; done)
+
+echo "Running tests:"
+go test -race -installsuffix "static" ${TARGETS}
+echo
diff --git a/lintconf.yaml b/lintconf.yaml
new file mode 100644
index 0000000..860d7cb
--- /dev/null
+++ b/lintconf.yaml
@@ -0,0 +1,43 @@
+# adapted from: https://github.com/helm/chart-testing/raw/master/etc/lintconf.yaml
+---
+rules:
+ braces:
+ min-spaces-inside: 0
+ max-spaces-inside: 0
+ min-spaces-inside-empty: -1
+ max-spaces-inside-empty: -1
+ brackets:
+ min-spaces-inside: 0
+ max-spaces-inside: 0
+ min-spaces-inside-empty: -1
+ max-spaces-inside-empty: -1
+ colons:
+ max-spaces-before: 0
+ max-spaces-after: 1
+ commas:
+ max-spaces-before: 0
+ min-spaces-after: 1
+ max-spaces-after: 1
+ comments:
+ require-starting-space: true
+ min-spaces-from-content: 1
+ document-end: disable
+ document-start: disable # No --- to start a file
+ empty-lines:
+ max: 2
+ max-start: 0
+ max-end: 0
+ hyphens:
+ max-spaces-after: 1
+ indentation:
+ spaces: consistent
+ indent-sequences: whatever # - list indentation will handle both indentation and without
+ check-multi-line-strings: false
+ key-duplicates: enable
+ line-length: disable # Lines can be any length
+ new-line-at-end-of-file: enable
+ new-lines:
+ type: unix
+ trailing-spaces: enable
+ truthy:
+ level: warning
diff --git a/tests/check-charts_test.go b/tests/check-charts_test.go
new file mode 100644
index 0000000..e29cc9b
--- /dev/null
+++ b/tests/check-charts_test.go
@@ -0,0 +1,97 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the AppsCode Community License 1.0.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://github.com/appscode/licenses/raw/1.0.0/AppsCode-Community-1.0.0.md
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "kmodules.xyz/image-packer/pkg/lib"
+ "sigs.k8s.io/yaml"
+)
+
+func Test_checkImages(t *testing.T) {
+ if err := checkImages(); err != nil {
+ t.Errorf("checkImages() error = %v", err)
+ }
+}
+
+func checkImages() error {
+ dir, err := rootDir()
+ if err != nil {
+ return err
+ }
+
+ images, err := ListImages([]string{
+ filepath.Join(dir, "catalog", "imagelist.yaml"),
+ })
+ if err != nil {
+ return err
+ }
+
+ var missing []string
+ for _, img := range images {
+ _, found, err := lib.ImageDigest(img)
+ if err != nil || !found {
+ missing = append(missing, img)
+ continue
+ }
+ fmt.Println("✔ " + img)
+ }
+
+ if len(missing) > 0 {
+ fmt.Println("----------------------------------------")
+ fmt.Println("Missing Images:")
+ fmt.Println(strings.Join(missing, "\n"))
+ return fmt.Errorf("missing %d images", len(missing))
+ }
+
+ return nil
+}
+
+func ListImages(files []string) ([]string, error) {
+ imgs := sets.New[string]()
+ for _, filename := range files {
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var images []string
+ err = yaml.Unmarshal(data, &images)
+ if err != nil {
+ return nil, err
+ }
+ imgs.Insert(images...)
+ }
+ return sets.List(imgs), nil
+}
+
+func rootDir() (string, error) {
+ _, file, _, ok := runtime.Caller(1)
+ if !ok {
+ return "", errors.New("failed to locate root dir")
+ }
+
+ return filepath.Clean(filepath.Join(filepath.Dir(file), "..")), nil
+}
diff --git a/vendor/github.com/codegangsta/inject/.gitignore b/vendor/github.com/codegangsta/inject/.gitignore
new file mode 100644
index 0000000..df3df8a
--- /dev/null
+++ b/vendor/github.com/codegangsta/inject/.gitignore
@@ -0,0 +1,2 @@
+inject
+inject.test
diff --git a/vendor/github.com/codegangsta/inject/LICENSE b/vendor/github.com/codegangsta/inject/LICENSE
new file mode 100644
index 0000000..eb68a0e
--- /dev/null
+++ b/vendor/github.com/codegangsta/inject/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Jeremy Saenz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/codegangsta/inject/README.md b/vendor/github.com/codegangsta/inject/README.md
new file mode 100644
index 0000000..679abe0
--- /dev/null
+++ b/vendor/github.com/codegangsta/inject/README.md
@@ -0,0 +1,92 @@
+# inject
+--
+ import "github.com/codegangsta/inject"
+
+Package inject provides utilities for mapping and injecting dependencies in
+various ways.
+
+Language Translations:
+* [简体中文](translations/README_zh_cn.md)
+
+## Usage
+
+#### func InterfaceOf
+
+```go
+func InterfaceOf(value interface{}) reflect.Type
+```
+InterfaceOf dereferences a pointer to an Interface type. It panics if value is
+not an pointer to an interface.
+
+#### type Applicator
+
+```go
+type Applicator interface {
+ // Maps dependencies in the Type map to each field in the struct
+ // that is tagged with 'inject'. Returns an error if the injection
+ // fails.
+ Apply(interface{}) error
+}
+```
+
+Applicator represents an interface for mapping dependencies to a struct.
+
+#### type Injector
+
+```go
+type Injector interface {
+ Applicator
+ Invoker
+ TypeMapper
+ // SetParent sets the parent of the injector. If the injector cannot find a
+ // dependency in its Type map it will check its parent before returning an
+ // error.
+ SetParent(Injector)
+}
+```
+
+Injector represents an interface for mapping and injecting dependencies into
+structs and function arguments.
+
+#### func New
+
+```go
+func New() Injector
+```
+New returns a new Injector.
+
+#### type Invoker
+
+```go
+type Invoker interface {
+ // Invoke attempts to call the interface{} provided as a function,
+ // providing dependencies for function arguments based on Type. Returns
+ // a slice of reflect.Value representing the returned values of the function.
+ // Returns an error if the injection fails.
+ Invoke(interface{}) ([]reflect.Value, error)
+}
+```
+
+Invoker represents an interface for calling functions via reflection.
+
+#### type TypeMapper
+
+```go
+type TypeMapper interface {
+ // Maps the interface{} value based on its immediate type from reflect.TypeOf.
+ Map(interface{}) TypeMapper
+ // Maps the interface{} value based on the pointer of an Interface provided.
+ // This is really only useful for mapping a value as an interface, as interfaces
+ // cannot at this time be referenced directly without a pointer.
+ MapTo(interface{}, interface{}) TypeMapper
+ // Provides a possibility to directly insert a mapping based on type and value.
+ // This makes it possible to directly map type arguments not possible to instantiate
+ // with reflect like unidirectional channels.
+ Set(reflect.Type, reflect.Value) TypeMapper
+ // Returns the Value that is mapped to the current type. Returns a zeroed Value if
+ // the Type has not been mapped.
+ Get(reflect.Type) reflect.Value
+}
+```
+
+TypeMapper represents an interface for mapping interface{} values based on type.
diff --git a/vendor/github.com/codegangsta/inject/inject.go b/vendor/github.com/codegangsta/inject/inject.go
new file mode 100644
index 0000000..3ff713c
--- /dev/null
+++ b/vendor/github.com/codegangsta/inject/inject.go
@@ -0,0 +1,187 @@
+// Package inject provides utilities for mapping and injecting dependencies in various ways.
+package inject
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Injector represents an interface for mapping and injecting dependencies into structs
+// and function arguments.
+type Injector interface {
+ Applicator
+ Invoker
+ TypeMapper
+ // SetParent sets the parent of the injector. If the injector cannot find a
+ // dependency in its Type map it will check its parent before returning an
+ // error.
+ SetParent(Injector)
+}
+
+// Applicator represents an interface for mapping dependencies to a struct.
+type Applicator interface {
+ // Maps dependencies in the Type map to each field in the struct
+ // that is tagged with 'inject'. Returns an error if the injection
+ // fails.
+ Apply(interface{}) error
+}
+
+// Invoker represents an interface for calling functions via reflection.
+type Invoker interface {
+ // Invoke attempts to call the interface{} provided as a function,
+ // providing dependencies for function arguments based on Type. Returns
+ // a slice of reflect.Value representing the returned values of the function.
+ // Returns an error if the injection fails.
+ Invoke(interface{}) ([]reflect.Value, error)
+}
+
+// TypeMapper represents an interface for mapping interface{} values based on type.
+type TypeMapper interface {
+ // Maps the interface{} value based on its immediate type from reflect.TypeOf.
+ Map(interface{}) TypeMapper
+ // Maps the interface{} value based on the pointer of an Interface provided.
+ // This is really only useful for mapping a value as an interface, as interfaces
+ // cannot at this time be referenced directly without a pointer.
+ MapTo(interface{}, interface{}) TypeMapper
+ // Provides a possibility to directly insert a mapping based on type and value.
+ // This makes it possible to directly map type arguments not possible to instantiate
+ // with reflect like unidirectional channels.
+ Set(reflect.Type, reflect.Value) TypeMapper
+ // Returns the Value that is mapped to the current type. Returns a zeroed Value if
+ // the Type has not been mapped.
+ Get(reflect.Type) reflect.Value
+}
+
+type injector struct {
+ values map[reflect.Type]reflect.Value
+ parent Injector
+}
+
+// InterfaceOf dereferences a pointer to an Interface type.
+// It panics if value is not an pointer to an interface.
+func InterfaceOf(value interface{}) reflect.Type {
+ t := reflect.TypeOf(value)
+
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Interface {
+ panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)")
+ }
+
+ return t
+}
+
+// New returns a new Injector.
+func New() Injector {
+ return &injector{
+ values: make(map[reflect.Type]reflect.Value),
+ }
+}
+
+// Invoke attempts to call the interface{} provided as a function,
+// providing dependencies for function arguments based on Type.
+// Returns a slice of reflect.Value representing the returned values of the function.
+// Returns an error if the injection fails.
+// It panics if f is not a function
+func (inj *injector) Invoke(f interface{}) ([]reflect.Value, error) {
+ t := reflect.TypeOf(f)
+
+ var in = make([]reflect.Value, t.NumIn()) //Panic if t is not kind of Func
+ for i := 0; i < t.NumIn(); i++ {
+ argType := t.In(i)
+ val := inj.Get(argType)
+ if !val.IsValid() {
+ return nil, fmt.Errorf("Value not found for type %v", argType)
+ }
+
+ in[i] = val
+ }
+
+ return reflect.ValueOf(f).Call(in), nil
+}
+
+// Maps dependencies in the Type map to each field in the struct
+// that is tagged with 'inject'.
+// Returns an error if the injection fails.
+func (inj *injector) Apply(val interface{}) error {
+ v := reflect.ValueOf(val)
+
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ if v.Kind() != reflect.Struct {
+ return nil // Should not panic here ?
+ }
+
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ structField := t.Field(i)
+ if f.CanSet() && (structField.Tag == "inject" || structField.Tag.Get("inject") != "") {
+ ft := f.Type()
+ v := inj.Get(ft)
+ if !v.IsValid() {
+ return fmt.Errorf("Value not found for type %v", ft)
+ }
+
+ f.Set(v)
+ }
+
+ }
+
+ return nil
+}
+
+// Maps the concrete value of val to its dynamic type using reflect.TypeOf,
+// It returns the TypeMapper registered in.
+func (i *injector) Map(val interface{}) TypeMapper {
+ i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
+ return i
+}
+
+func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper {
+ i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val)
+ return i
+}
+
+// Maps the given reflect.Type to the given reflect.Value and returns
+// the Typemapper the mapping has been registered in.
+func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
+ i.values[typ] = val
+ return i
+}
+
+func (i *injector) Get(t reflect.Type) reflect.Value {
+ val := i.values[t]
+
+ if val.IsValid() {
+ return val
+ }
+
+ // no concrete types found, try to find implementors
+ // if t is an interface
+ if t.Kind() == reflect.Interface {
+ for k, v := range i.values {
+ if k.Implements(t) {
+ val = v
+ break
+ }
+ }
+ }
+
+ // Still no type found, try to look it up on the parent
+ if !val.IsValid() && i.parent != nil {
+ val = i.parent.Get(t)
+ }
+
+ return val
+
+}
+
+func (i *injector) SetParent(parent Injector) {
+ i.parent = parent
+}
diff --git a/vendor/github.com/codegangsta/inject/update_readme.sh b/vendor/github.com/codegangsta/inject/update_readme.sh
new file mode 100644
index 0000000..497f9a5
--- /dev/null
+++ b/vendor/github.com/codegangsta/inject/update_readme.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+go get github.com/robertkrimen/godocdown/godocdown
+godocdown > README.md
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
new file mode 100644
index 0000000..b071cea
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -0,0 +1,690 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+)
+
+type options struct {
+ chunkSize int
+ compressionLevel int
+ prioritizedFiles []string
+ missedPrioritizedFiles *[]string
+ compression Compression
+ ctx context.Context
+ minChunkSize int
+}
+
+type Option func(o *options) error
+
+// WithChunkSize option specifies the chunk size of eStargz blob to build.
+func WithChunkSize(chunkSize int) Option {
+ return func(o *options) error {
+ o.chunkSize = chunkSize
+ return nil
+ }
+}
+
+// WithCompressionLevel option specifies the gzip compression level.
+// The default is gzip.BestCompression.
+// This option will be ignored if WithCompression option is used.
+// See also: https://godoc.org/compress/gzip#pkg-constants
+func WithCompressionLevel(level int) Option {
+ return func(o *options) error {
+ o.compressionLevel = level
+ return nil
+ }
+}
+
+// WithPrioritizedFiles option specifies the list of prioritized files.
+// These files must be complete paths that are absolute or relative to "/"
+// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
+// are treated as "/foo/bar".
+func WithPrioritizedFiles(files []string) Option {
+ return func(o *options) error {
+ o.prioritizedFiles = files
+ return nil
+ }
+}
+
+// WithAllowPrioritizeNotFound makes Build continue the execution even if some
+// of prioritized files specified by WithPrioritizedFiles option aren't found
+// in the input tar. Instead, this records all missed file names to the passed
+// slice.
+func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
+ return func(o *options) error {
+ if missedFiles == nil {
+ return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
+ }
+ o.missedPrioritizedFiles = missedFiles
+ return nil
+ }
+}
+
+// WithCompression specifies compression algorithm to be used.
+// Default is gzip.
+func WithCompression(compression Compression) Option {
+ return func(o *options) error {
+ o.compression = compression
+ return nil
+ }
+}
+
+// WithContext specifies a context that can be used for clean canceleration.
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.ctx = ctx
+ return nil
+ }
+}
+
+// WithMinChunkSize option specifies the minimal number of bytes of data
+// must be written in one gzip stream.
+// By increasing this number, one gzip stream can contain multiple files
+// and it hopefully leads to smaller result blob.
+// NOTE: This adds a TOC property that old reader doesn't understand.
+func WithMinChunkSize(minChunkSize int) Option {
+ return func(o *options) error {
+ o.minChunkSize = minChunkSize
+ return nil
+ }
+}
+
+// Blob is an eStargz blob.
+type Blob struct {
+ io.ReadCloser
+ diffID digest.Digester
+ tocDigest digest.Digest
+}
+
+// DiffID returns the digest of uncompressed blob.
+// It is only valid to call DiffID after Close.
+func (b *Blob) DiffID() digest.Digest {
+ return b.diffID.Digest()
+}
+
+// TOCDigest returns the digest of uncompressed TOC JSON.
+func (b *Blob) TOCDigest() digest.Digest {
+ return b.tocDigest
+}
+
+// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
+// or plain tar) passed through the argument. If there are some prioritized files are listed in
+// the option, these files are grouped as "prioritized" and can be used for runtime optimization
+// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
+// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
+func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
+ var opts options
+ opts.compressionLevel = gzip.BestCompression // BestCompression by default
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+ if opts.compression == nil {
+ opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
+ }
+ layerFiles := newTempFiles()
+ ctx := opts.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-done:
+ // nop
+ case <-ctx.Done():
+ layerFiles.CleanupAll()
+ }
+ }()
+ defer func() {
+ if rErr != nil {
+ if err := layerFiles.CleanupAll(); err != nil {
+ rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
+ }
+ }
+ if cErr := ctx.Err(); cErr != nil {
+ rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
+ }
+ }()
+ tarBlob, err := decompressBlob(tarBlob, layerFiles)
+ if err != nil {
+ return nil, err
+ }
+ entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
+ if err != nil {
+ return nil, err
+ }
+ var tarParts [][]*entry
+ if opts.minChunkSize > 0 {
+ // Each entry needs to know the size of the current gzip stream so they
+ // cannot be processed in parallel.
+ tarParts = [][]*entry{entries}
+ } else {
+ tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
+ }
+ writers := make([]*Writer, len(tarParts))
+ payloads := make([]*os.File, len(tarParts))
+ var mu sync.Mutex
+ var eg errgroup.Group
+ for i, parts := range tarParts {
+ i, parts := i, parts
+ // builds verifiable stargz sub-blobs
+ eg.Go(func() error {
+ esgzFile, err := layerFiles.TempFile("", "esgzdata")
+ if err != nil {
+ return err
+ }
+ sw := NewWriterWithCompressor(esgzFile, opts.compression)
+ sw.ChunkSize = opts.chunkSize
+ sw.MinChunkSize = opts.minChunkSize
+ if sw.needsOpenGzEntries == nil {
+ sw.needsOpenGzEntries = make(map[string]struct{})
+ }
+ for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
+ sw.needsOpenGzEntries[f] = struct{}{}
+ }
+ if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
+ return err
+ }
+ mu.Lock()
+ writers[i] = sw
+ payloads[i] = esgzFile
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ rErr = err
+ return nil, err
+ }
+ tocAndFooter, tocDgst, err := closeWithCombine(writers...)
+ if err != nil {
+ rErr = err
+ return nil, err
+ }
+ var rs []io.Reader
+ for _, p := range payloads {
+ fs, err := fileSectionReader(p)
+ if err != nil {
+ return nil, err
+ }
+ rs = append(rs, fs)
+ }
+ diffID := digest.Canonical.Digester()
+ pr, pw := io.Pipe()
+ go func() {
+ r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer r.Close()
+ if _, err := io.Copy(diffID.Hash(), r); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ pw.Close()
+ }()
+ return &Blob{
+ ReadCloser: readCloser{
+ Reader: pr,
+ closeFunc: layerFiles.CleanupAll,
+ },
+ tocDigest: tocDgst,
+ diffID: diffID,
+ }, nil
+}
+
+// closeWithCombine takes unclosed Writers and close them. This also returns the
+// toc that combined all Writers into.
+// Writers doesn't write TOC and footer to the underlying writers so they can be
+// combined into a single eStargz and tocAndFooter returned by this function can
+// be appended at the tail of that combined blob.
+func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
+ if len(ws) == 0 {
+ return nil, "", fmt.Errorf("at least one writer must be passed")
+ }
+ for _, w := range ws {
+ if w.closed {
+ return nil, "", fmt.Errorf("writer must be unclosed")
+ }
+ defer func(w *Writer) { w.closed = true }(w)
+ if err := w.closeGz(); err != nil {
+ return nil, "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return nil, "", err
+ }
+ }
+ var (
+ mtoc = new(JTOC)
+ currentOffset int64
+ )
+ mtoc.Version = ws[0].toc.Version
+ for _, w := range ws {
+ for _, e := range w.toc.Entries {
+ // Recalculate Offset of non-empty files/chunks
+ if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
+ e.Offset += currentOffset
+ }
+ mtoc.Entries = append(mtoc.Entries, e)
+ }
+ if w.toc.Version > mtoc.Version {
+ mtoc.Version = w.toc.Version
+ }
+ currentOffset += w.cw.n
+ }
+
+ return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
+}
+
+func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
+ buf := new(bytes.Buffer)
+ tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ return buf, tocDigest, nil
+}
+
+// divideEntries divides passed entries to the parts at least the number specified by the
+// argument.
+func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
+ var estimatedSize int64
+ for _, e := range entries {
+ estimatedSize += e.header.Size
+ }
+ unitSize := estimatedSize / int64(minPartsNum)
+ var (
+ nextEnd = unitSize
+ offset int64
+ )
+ set = append(set, []*entry{})
+ for _, e := range entries {
+ set[len(set)-1] = append(set[len(set)-1], e)
+ offset += e.header.Size
+ if offset > nextEnd {
+ set = append(set, []*entry{})
+ nextEnd += unitSize
+ }
+ }
+ return
+}
+
+var errNotFound = errors.New("not found")
+
+// sortEntries reads the specified tar blob and returns a list of tar entries.
+// If some of prioritized files are specified, the list starts from these
+// files with keeping the order specified by the argument.
+func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
+
+ // Import tar file.
+ intar, err := importTar(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to sort: %w", err)
+ }
+
+ // Sort the tar file respecting to the prioritized files list.
+ sorted := &tarFile{}
+ for _, l := range prioritized {
+ if err := moveRec(l, intar, sorted); err != nil {
+ if errors.Is(err, errNotFound) && missedPrioritized != nil {
+ *missedPrioritized = append(*missedPrioritized, l)
+ continue // allow not found
+ }
+ return nil, fmt.Errorf("failed to sort tar entries: %w", err)
+ }
+ }
+ if len(prioritized) == 0 {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ } else {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ }
+
+ // Dump all entry and concatinate them.
+ return append(sorted.dump(), intar.dump()...), nil
+}
+
+// readerFromEntries returns a reader of tar archive that contains entries passed
+// through the arguments.
+func readerFromEntries(entries ...*entry) io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ tw := tar.NewWriter(pw)
+ defer tw.Close()
+ for _, entry := range entries {
+ if err := tw.WriteHeader(entry.header); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
+ return
+ }
+ if _, err := io.Copy(tw, entry.payload); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
+ return
+ }
+ }
+ pw.Close()
+ }()
+ return pr
+}
+
+func importTar(in io.ReaderAt) (*tarFile, error) {
+ tf := &tarFile{}
+ pw, err := newCountReadSeeker(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make position watcher: %w", err)
+ }
+ tr := tar.NewReader(pw)
+
+ // Walk through all nodes.
+ for {
+ // Fetch and parse next header.
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
+ }
+ }
+ switch cleanEntryName(h.Name) {
+ case PrefetchLandmark, NoPrefetchLandmark:
+ // Ignore existing landmark
+ continue
+ }
+
+ // Add entry. If it already exists, replace it.
+ if _, ok := tf.get(h.Name); ok {
+ tf.remove(h.Name)
+ }
+ tf.add(&entry{
+ header: h,
+ payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
+ })
+ }
+
+ return tf, nil
+}
+
+func moveRec(name string, in *tarFile, out *tarFile) error {
+ name = cleanEntryName(name)
+ if name == "" { // root directory. stop recursion.
+ if e, ok := in.get(name); ok {
+ // entry of the root directory exists. we should move it as well.
+ // this case will occur if tar entries are prefixed with "./", "/", etc.
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+ }
+
+ _, okIn := in.get(name)
+ _, okOut := out.get(name)
+ if !okIn && !okOut {
+ return fmt.Errorf("file: %q: %w", name, errNotFound)
+ }
+
+ parent, _ := path.Split(strings.TrimSuffix(name, "/"))
+ if err := moveRec(parent, in, out); err != nil {
+ return err
+ }
+ if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
+ if err := moveRec(e.header.Linkname, in, out); err != nil {
+ return err
+ }
+ }
+ if e, ok := in.get(name); ok {
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+}
+
+type entry struct {
+ header *tar.Header
+ payload io.ReadSeeker
+}
+
+type tarFile struct {
+ index map[string]*entry
+ stream []*entry
+}
+
+func (f *tarFile) add(e *entry) {
+ if f.index == nil {
+ f.index = make(map[string]*entry)
+ }
+ f.index[cleanEntryName(e.header.Name)] = e
+ f.stream = append(f.stream, e)
+}
+
+func (f *tarFile) remove(name string) {
+ name = cleanEntryName(name)
+ if f.index != nil {
+ delete(f.index, name)
+ }
+ var filtered []*entry
+ for _, e := range f.stream {
+ if cleanEntryName(e.header.Name) == name {
+ continue
+ }
+ filtered = append(filtered, e)
+ }
+ f.stream = filtered
+}
+
+func (f *tarFile) get(name string) (e *entry, ok bool) {
+ if f.index == nil {
+ return nil, false
+ }
+ e, ok = f.index[cleanEntryName(name)]
+ return
+}
+
+func (f *tarFile) dump() []*entry {
+ return f.stream
+}
+
+type readCloser struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (rc readCloser) Close() error {
+ return rc.closeFunc()
+}
+
+func fileSectionReader(file *os.File) (*io.SectionReader, error) {
+ info, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(file, 0, info.Size()), nil
+}
+
+func newTempFiles() *tempFiles {
+ return &tempFiles{}
+}
+
+type tempFiles struct {
+ files []*os.File
+ filesMu sync.Mutex
+ cleanupOnce sync.Once
+}
+
+func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
+ f, err := os.CreateTemp(dir, pattern)
+ if err != nil {
+ return nil, err
+ }
+ tf.filesMu.Lock()
+ tf.files = append(tf.files, f)
+ tf.filesMu.Unlock()
+ return f, nil
+}
+
+func (tf *tempFiles) CleanupAll() (err error) {
+ tf.cleanupOnce.Do(func() {
+ err = tf.cleanupAll()
+ })
+ return
+}
+
+func (tf *tempFiles) cleanupAll() error {
+ tf.filesMu.Lock()
+ defer tf.filesMu.Unlock()
+ var allErr []error
+ for _, f := range tf.files {
+ if err := f.Close(); err != nil {
+ allErr = append(allErr, err)
+ }
+ if err := os.Remove(f.Name()); err != nil {
+ allErr = append(allErr, err)
+ }
+ }
+ tf.files = nil
+ return errorutil.Aggregate(allErr)
+}
+
+func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
+ pos := int64(0)
+ return &countReadSeeker{r: r, cPos: &pos}, nil
+}
+
+type countReadSeeker struct {
+ r io.ReaderAt
+ cPos *int64
+
+ mu sync.Mutex
+}
+
+func (cr *countReadSeeker) Read(p []byte) (int, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ n, err := cr.r.ReadAt(p, *cr.cPos)
+ if err == nil {
+ *cr.cPos += int64(n)
+ }
+ return n, err
+}
+
+func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ switch whence {
+ default:
+ return 0, fmt.Errorf("Unknown whence: %v", whence)
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += *cr.cPos
+ case io.SeekEnd:
+ return 0, fmt.Errorf("Unsupported whence: %v", whence)
+ }
+
+ if offset < 0 {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ *cr.cPos = offset
+ return offset, nil
+}
+
+func (cr *countReadSeeker) currentPos() int64 {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ return *cr.cPos
+}
+
+func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
+ if org.Size() < 4 {
+ return org, nil
+ }
+ src := make([]byte, 4)
+ if _, err := org.Read(src); err != nil && err != io.EOF {
+ return nil, err
+ }
+ var dR io.Reader
+ if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
+ // gzip
+ dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dgR.Close()
+ dR = io.Reader(dgR)
+ } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
+ // zstd
+ dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dzR.Close()
+ dR = io.Reader(dzR)
+ } else {
+ // uncompressed
+ return io.NewSectionReader(org, 0, org.Size()), nil
+ }
+ b, err := tmp.TempFile("", "uncompresseddata")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(b, dR); err != nil {
+ return nil, err
+ }
+ return fileSectionReader(b)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
new file mode 100644
index 0000000..6de78b0
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errorutil
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Aggregate combines a list of errors into a single new error.
+func Aggregate(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ points := make([]string, len(errs)+1)
+ points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
+ for i, err := range errs {
+ points[i+1] = fmt.Sprintf("* %s", err)
+ }
+ return errors.New(strings.Join(points, "\n\t"))
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
new file mode 100644
index 0000000..f4d5546
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -0,0 +1,1223 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+// A Reader permits random access reads from a stargz file.
+type Reader struct {
+ sr *io.SectionReader
+ toc *JTOC
+ tocDigest digest.Digest
+
+ // m stores all non-chunk entries, keyed by name.
+ m map[string]*TOCEntry
+
+ // chunks stores all TOCEntry values for regular files that
+ // are split up. For a file with a single chunk, it's only
+ // stored in m.
+ chunks map[string][]*TOCEntry
+
+ decompressor Decompressor
+}
+
+type openOpts struct {
+ tocOffset int64
+ decompressors []Decompressor
+ telemetry *Telemetry
+}
+
+// OpenOption is an option used during opening the layer
+type OpenOption func(o *openOpts) error
+
+// WithTOCOffset option specifies the offset of TOC
+func WithTOCOffset(tocOffset int64) OpenOption {
+ return func(o *openOpts) error {
+ o.tocOffset = tocOffset
+ return nil
+ }
+}
+
+// WithDecompressors option specifies decompressors to use.
+// Default is gzip-based decompressor.
+func WithDecompressors(decompressors ...Decompressor) OpenOption {
+ return func(o *openOpts) error {
+ o.decompressors = decompressors
+ return nil
+ }
+}
+
+// WithTelemetry option specifies the telemetry hooks
+func WithTelemetry(telemetry *Telemetry) OpenOption {
+ return func(o *openOpts) error {
+ o.telemetry = telemetry
+ return nil
+ }
+}
+
+// MeasureLatencyHook is a func which takes start time and records the diff
+type MeasureLatencyHook func(time.Time)
+
+// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
+type Telemetry struct {
+ GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
+ GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
+ DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
+}
+
+// Open opens a stargz file for reading.
+// The behavior is configurable using options.
+//
+// Note that each entry name is normalized as the path that is relative to root.
+func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
+ var opts openOpts
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
+ decompressors := append(gzipCompressors, opts.decompressors...)
+
+ // Determine the size to fetch. Try to fetch as many bytes as possible.
+ fetchSize := maxFooterSize(sr.Size(), decompressors...)
+ if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize {
+ if maybeTocOffset > sr.Size() {
+ return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size())
+ }
+ fetchSize = sr.Size() - maybeTocOffset
+ }
+
+ start := time.Now() // before getting layer footer
+ footer := make([]byte, fetchSize)
+ if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil {
+ return nil, fmt.Errorf("error reading footer: %v", err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil {
+ opts.telemetry.GetFooterLatency(start)
+ }
+
+ var allErr []error
+ var found bool
+ var r *Reader
+ for _, d := range decompressors {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ maybeTocBytes := footer[:fOffset]
+ _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
+ if err != nil {
+ allErr = append(allErr, err)
+ continue
+ }
+ if tocOffset >= 0 && tocSize <= 0 {
+ tocSize = sr.Size() - tocOffset - fSize
+ }
+ if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
+ maybeTocBytes = maybeTocBytes[:tocSize]
+ }
+ r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
+ if err == nil {
+ found = true
+ break
+ }
+ allErr = append(allErr, err)
+ }
+ if !found {
+ return nil, errorutil.Aggregate(allErr)
+ }
+ if err := r.initFields(); err != nil {
+ return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
+ }
+ return r, nil
+}
+
+// OpenFooter extracts and parses footer from the given blob.
+// only supports gzip-based eStargz.
+func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
+ if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
+ return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
+ }
+ var footer [FooterSize]byte
+ if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
+ return 0, 0, fmt.Errorf("error reading footer: %v", err)
+ }
+ var allErr []error
+ for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ _, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
+ if err == nil {
+ return tocOffset, fSize, err
+ }
+ allErr = append(allErr, err)
+ }
+ return 0, 0, errorutil.Aggregate(allErr)
+}
+
+// initFields populates the Reader from r.toc after decoding it from
+// JSON.
+//
+// Unexported fields are populated and TOCEntry fields that were
+// implicit in the JSON are populated.
+func (r *Reader) initFields() error {
+ r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
+ r.chunks = make(map[string][]*TOCEntry)
+ var lastPath string
+ uname := map[int]string{}
+ gname := map[int]string{}
+ var lastRegEnt *TOCEntry
+ var chunkTopIndex int
+ for i, ent := range r.toc.Entries {
+ ent.Name = cleanEntryName(ent.Name)
+ switch ent.Type {
+ case "reg", "chunk":
+ if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
+ chunkTopIndex = i
+ }
+ ent.chunkTopIndex = chunkTopIndex
+ }
+ if ent.Type == "reg" {
+ lastRegEnt = ent
+ }
+ if ent.Type == "chunk" {
+ ent.Name = lastPath
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ if ent.ChunkSize == 0 && lastRegEnt != nil {
+ ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
+ }
+ } else {
+ lastPath = ent.Name
+
+ if ent.Uname != "" {
+ uname[ent.UID] = ent.Uname
+ } else {
+ ent.Uname = uname[ent.UID]
+ }
+ if ent.Gname != "" {
+ gname[ent.GID] = ent.Gname
+ } else {
+ ent.Gname = uname[ent.GID]
+ }
+
+ ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
+
+ if ent.Type == "dir" {
+ ent.NumLink++ // Parent dir links to this directory
+ }
+ r.m[ent.Name] = ent
+ }
+ if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
+ r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ }
+ if ent.ChunkSize == 0 && ent.Size != 0 {
+ ent.ChunkSize = ent.Size
+ }
+ }
+
+ // Populate children, add implicit directories:
+ for _, ent := range r.toc.Entries {
+ if ent.Type == "chunk" {
+ continue
+ }
+ // add "foo/":
+ // add "foo" child to "" (creating "" if necessary)
+ //
+ // add "foo/bar/":
+ // add "bar" child to "foo" (creating "foo" if necessary)
+ //
+ // add "foo/bar.txt":
+ // add "bar.txt" child to "foo" (creating "foo" if necessary)
+ //
+ // add "a/b/c/d/e/f.txt":
+ // create "a/b/c/d/e" node
+ // add "f.txt" child to "e"
+
+ name := ent.Name
+ pdirName := parentDir(name)
+ if name == pdirName {
+ // This entry and its parent are the same.
+ // Ignore this for avoiding infinite loop of the reference.
+ // The example case where this can occur is when tar contains the root
+ // directory itself (e.g. "./", "/").
+ continue
+ }
+ pdir := r.getOrCreateDir(pdirName)
+ ent.NumLink++ // at least one name(ent.Name) references this entry.
+ if ent.Type == "hardlink" {
+ org, err := r.getSource(ent)
+ if err != nil {
+ return err
+ }
+ org.NumLink++ // original entry is referenced by this ent.Name.
+ ent = org
+ }
+ pdir.addChild(path.Base(name), ent)
+ }
+
+ lastOffset := r.sr.Size()
+ for i := len(r.toc.Entries) - 1; i >= 0; i-- {
+ e := r.toc.Entries[i]
+ if e.isDataType() {
+ e.nextOffset = lastOffset
+ }
+ if e.Offset != 0 && e.InnerOffset == 0 {
+ lastOffset = e.Offset
+ }
+ }
+
+ return nil
+}
+
+func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
+ if ent.Type == "hardlink" {
+ org, ok := r.m[cleanEntryName(ent.LinkName)]
+ if !ok {
+ return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ }
+ ent, err = r.getSource(org)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ent, nil
+}
+
+func parentDir(p string) string {
+ dir, _ := path.Split(p)
+ return strings.TrimSuffix(dir, "/")
+}
+
+func (r *Reader) getOrCreateDir(d string) *TOCEntry {
+ e, ok := r.m[d]
+ if !ok {
+ e = &TOCEntry{
+ Name: d,
+ Type: "dir",
+ Mode: 0755,
+ NumLink: 2, // The directory itself(.) and the parent link to this directory.
+ }
+ r.m[d] = e
+ if d != "" {
+ pdir := r.getOrCreateDir(parentDir(d))
+ pdir.addChild(path.Base(d), e)
+ }
+ }
+ return e
+}
+
+func (r *Reader) TOCDigest() digest.Digest {
+ return r.tocDigest
+}
+
+// VerifyTOC checks that the TOC JSON in the passed blob matches the
+// passed digests and that the TOC JSON contains digests for all chunks
+// contained in the blob. If the verification succceeds, this function
+// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
+func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
+ // Verify the digest of TOC JSON
+ if r.tocDigest != tocDigest {
+ return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
+ }
+ return r.Verifiers()
+}
+
+// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
+// because this doesn't verify TOC.
+func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
+ chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
+ regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
+ var chunkDigestMapIncomplete bool
+ var regDigestMapIncomplete bool
+ var containsChunk bool
+ for _, e := range r.toc.Entries {
+ if e.Type != "reg" && e.Type != "chunk" {
+ continue
+ }
+
+ // offset must be unique in stargz blob
+ _, dOK := chunkDigestMap[e.Offset]
+ _, rOK := regDigestMap[e.Offset]
+ if dOK || rOK {
+ return nil, fmt.Errorf("offset %d found twice", e.Offset)
+ }
+
+ if e.Type == "reg" {
+ if e.Size == 0 {
+ continue // ignores empty file
+ }
+
+ // record the digest of regular file payload
+ if e.Digest != "" {
+ d, err := digest.Parse(e.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
+ }
+ regDigestMap[e.Offset] = d
+ } else {
+ regDigestMapIncomplete = true
+ }
+ } else {
+ containsChunk = true // this layer contains "chunk" entries.
+ }
+
+ // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
+ // chunked file)
+ if e.ChunkDigest != "" {
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
+ }
+ chunkDigestMap[e.Offset] = d
+ } else {
+ chunkDigestMapIncomplete = true
+ }
+ }
+
+ if chunkDigestMapIncomplete {
+ // Though some chunk digests are not found, if this layer doesn't contain
+ // "chunk"s and all digest of "reg" files are recorded, we can use them instead.
+ if !containsChunk && !regDigestMapIncomplete {
+ return &verifier{digestMap: regDigestMap}, nil
+ }
+ return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON")
+ }
+
+ return &verifier{digestMap: chunkDigestMap}, nil
+}
+
+// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
+// offset of the chunk.
+type verifier struct {
+ digestMap map[int64]digest.Digest
+ digestMapMu sync.Mutex
+}
+
+// Verifier returns a content verifier specified by TOCEntry.
+func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
+ v.digestMapMu.Lock()
+ defer v.digestMapMu.Unlock()
+ d, ok := v.digestMap[ce.Offset]
+ if !ok {
+ return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
+ ce.Offset, ce.ChunkSize)
+ }
+ return d.Verifier(), nil
+}
+
+// ChunkEntryForOffset returns the TOCEntry containing the byte of the
+// named file at the given offset within the file.
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
+ name = cleanEntryName(name)
+ e, ok = r.Lookup(name)
+ if !ok || !e.isDataType() {
+ return nil, false
+ }
+ ents := r.chunks[name]
+ if len(ents) < 2 {
+ if offset >= e.ChunkSize {
+ return nil, false
+ }
+ return e, true
+ }
+ i := sort.Search(len(ents), func(i int) bool {
+ e := ents[i]
+ return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
+ })
+ if i == len(ents) {
+ return nil, false
+ }
+ return ents[i], true
+}
+
+// Lookup returns the Table of Contents entry for the given path.
+//
+// To get the root directory, use the empty string.
+// Path must be absolute path or one that is relative to root.
+func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
+ path = cleanEntryName(path)
+ if r == nil {
+ return
+ }
+ e, ok = r.m[path]
+ if ok && e.Type == "hardlink" {
+ var err error
+ e, err = r.getSource(e)
+ if err != nil {
+ return nil, false
+ }
+ }
+ return
+}
+
+// OpenFile returns the reader of the specified file payload.
+//
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) newFileReader(name string) (*fileReader, error) {
+ name = cleanEntryName(name)
+ ent, ok := r.Lookup(name)
+ if !ok {
+ // TODO: come up with some error plan. This is lazy:
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: os.ErrNotExist,
+ }
+ }
+ if ent.Type != "reg" {
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: errors.New("not a regular file"),
+ }
+ }
+ return &fileReader{
+ r: r,
+ size: ent.Size,
+ ents: r.getChunks(ent),
+ }, nil
+}
+
+func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ fr.preRead = preRead
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
+ if ents, ok := r.chunks[ent.Name]; ok {
+ return ents
+ }
+ return []*TOCEntry{ent}
+}
+
+type fileReader struct {
+ r *Reader
+ size int64
+ ents []*TOCEntry // 1 or more reg/chunk entries
+ preRead func(*TOCEntry, io.Reader) error
+}
+
+func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
+ if off >= fr.size {
+ return 0, io.EOF
+ }
+ if off < 0 {
+ return 0, errors.New("invalid offset")
+ }
+ var i int
+ if len(fr.ents) > 1 {
+ i = sort.Search(len(fr.ents), func(i int) bool {
+ return fr.ents[i].ChunkOffset >= off
+ })
+ if i == len(fr.ents) {
+ i = len(fr.ents) - 1
+ }
+ }
+ ent := fr.ents[i]
+ if ent.ChunkOffset > off {
+ if i == 0 {
+ return 0, errors.New("internal error; first chunk offset is non-zero")
+ }
+ ent = fr.ents[i-1]
+ }
+
+ // If ent is a chunk of a large file, adjust the ReadAt
+ // offset by the chunk's offset.
+ off -= ent.ChunkOffset
+
+ finalEnt := fr.ents[len(fr.ents)-1]
+ compressedOff := ent.Offset
+ // compressedBytesRemain is the number of compressed bytes in this
+ // file remaining, over 1+ chunks.
+ compressedBytesRemain := finalEnt.NextOffset() - compressedOff
+
+ sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain)
+
+ const maxRead = 2 << 20
+ var bufSize = maxRead
+ if compressedBytesRemain < maxRead {
+ bufSize = int(compressedBytesRemain)
+ }
+
+ br := bufio.NewReaderSize(sr, bufSize)
+ if _, err := br.Peek(bufSize); err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
+ }
+
+ dr, err := fr.r.decompressor.Reader(br)
+ if err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
+ }
+ defer dr.Close()
+
+ if fr.preRead == nil {
+ if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
+ return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
+ }
+ return io.ReadFull(dr, p)
+ }
+
+ var retN int
+ var retErr error
+ var found bool
+ var nr int64
+ for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
+ if !e.isDataType() {
+ continue
+ }
+ if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
+ break
+ }
+ if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
+ return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
+ }
+ nr = e.InnerOffset
+ if e == ent {
+ found = true
+ if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
+ return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
+ }
+ retN, retErr = io.ReadFull(dr, p)
+ nr += off + int64(retN)
+ continue
+ }
+ cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
+ if err := fr.preRead(e, cr); err != nil {
+ return 0, fmt.Errorf("failed to pre read: %w", err)
+ }
+ nr += cr.n
+ }
+ if !found {
+ return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
+ }
+ return retN, retErr
+}
+
+// A Writer writes stargz files.
+//
+// Use NewWriter to create a new Writer.
+type Writer struct {
+ bw *bufio.Writer
+ cw *countWriter
+ toc *JTOC
+ diffHash hash.Hash // SHA-256 of uncompressed tar
+
+ closed bool
+ gz io.WriteCloser
+ lastUsername map[int]string
+ lastGroupname map[int]string
+ compressor Compressor
+
+ uncompressedCounter *countWriteFlusher
+
+ // ChunkSize optionally controls the maximum number of bytes
+ // of data of a regular file that can be written in one gzip
+ // stream before a new gzip stream is started.
+ // Zero means to use a default, currently 4 MiB.
+ ChunkSize int
+
+ // MinChunkSize optionally controls the minimum number of bytes
+ // of data must be written in one gzip stream before a new gzip
+ // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
+ MinChunkSize int
+
+ needsOpenGzEntries map[string]struct{}
+}
+
+// currentCompressionWriter writes to the current w.gz field, which can
+// change throughout writing a tar entry.
+//
+// Additionally, it updates w's SHA-256 of the uncompressed bytes
+// of the tar file.
+type currentCompressionWriter struct{ w *Writer }
+
+func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
+ ccw.w.diffHash.Write(p)
+ if ccw.w.gz == nil {
+ if err := ccw.w.condOpenGz(); err != nil {
+ return 0, err
+ }
+ }
+ return ccw.w.gz.Write(p)
+}
+
+func (w *Writer) chunkSize() int {
+ if w.ChunkSize <= 0 {
+ return 4 << 20
+ }
+ return w.ChunkSize
+}
+
+// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
+// TOC JSON and footer are removed.
+func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
+ footerSize := c.FooterSize()
+ if sr.Size() < footerSize {
+ return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
+ }
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ return nil, err
+ }
+ blobPayloadSize, _, _, err := c.ParseFooter(footer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse footer: %w", err)
+ }
+ if blobPayloadSize < 0 {
+ blobPayloadSize = sr.Size()
+ }
+ return c.Reader(io.LimitReader(sr, blobPayloadSize))
+}
+
+// NewWriter returns a new stargz writer (gzip-based) writing to w.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterLevel(w, gzip.BestCompression)
+}
+
+// NewWriterLevel returns a new stargz writer (gzip-based) writing to w.
+// The compression level is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
+ return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
+}
+
+// NewWriterWithCompressor returns a new stargz writer writing to w.
+// The compression method is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
+ bw := bufio.NewWriter(w)
+ cw := &countWriter{w: bw}
+ return &Writer{
+ bw: bw,
+ cw: cw,
+ toc: &JTOC{Version: 1},
+ diffHash: sha256.New(),
+ compressor: c,
+ uncompressedCounter: &countWriteFlusher{},
+ }
+}
+
+// Close writes the stargz's table of contents and flushes all the
+// buffers, returning any error.
+func (w *Writer) Close() (digest.Digest, error) {
+ if w.closed {
+ return "", nil
+ }
+ defer func() { w.closed = true }()
+
+ if err := w.closeGz(); err != nil {
+ return "", err
+ }
+
+ // Write the TOC index and footer.
+ tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash)
+ if err != nil {
+ return "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return "", err
+ }
+
+ return tocDigest, nil
+}
+
+func (w *Writer) closeGz() error {
+ if w.closed {
+ return errors.New("write on closed Writer")
+ }
+ if w.gz != nil {
+ if err := w.gz.Close(); err != nil {
+ return err
+ }
+ w.gz = nil
+ }
+ return nil
+}
+
+func (w *Writer) flushGz() error {
+ if w.closed {
+ return errors.New("flush on closed Writer")
+ }
+ if w.gz != nil {
+ if f, ok := w.gz.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ }
+ return nil
+}
+
+// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
+// in which case it returns the empty string.
+func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
+ if name == "" {
+ return ""
+ }
+ if *mp == nil {
+ *mp = make(map[int]string)
+ }
+ if (*mp)[id] == name {
+ return ""
+ }
+ (*mp)[id] = name
+ return name
+}
+
+func (w *Writer) condOpenGz() (err error) {
+ if w.gz == nil {
+ w.gz, err = w.compressor.Writer(w.cw)
+ if w.gz != nil {
+ w.gz = w.uncompressedCounter.register(w.gz)
+ }
+ }
+ return
+}
+
+// AppendTar reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+func (w *Writer) AppendTar(r io.Reader) error {
+ return w.appendTar(r, false)
+}
+
+// AppendTarLossLess reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+//
+// The difference of this func with AppendTar is that this writes
+// the input tar stream into w without any modification (e.g. to header bytes).
+//
+// Note that if the input tar stream already contains TOC JSON, this returns
+// error because w cannot overwrite the TOC JSON to the one generated by w without
+// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
+// you shoud decompress it and remove TOC JSON in advance.
+func (w *Writer) AppendTarLossLess(r io.Reader) error {
+ return w.appendTar(r, true)
+}
+
+func (w *Writer) appendTar(r io.Reader, lossless bool) error {
+ var src io.Reader
+ br := bufio.NewReader(r)
+ if isGzip(br) {
+ zr, _ := gzip.NewReader(br)
+ src = zr
+ } else {
+ src = io.Reader(br)
+ }
+ dst := currentCompressionWriter{w}
+ var tw *tar.Writer
+ if !lossless {
+ tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
+ }
+ tr := tar.NewReader(src)
+ if lossless {
+ tr.RawAccounting = true
+ }
+ prevOffset := w.cw.n
+ var prevOffsetUncompressed int64
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ if lossless {
+ if remain := tr.RawBytes(); len(remain) > 0 {
+ // Collect the remaining null bytes.
+ // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
+ if _, err := dst.Write(remain); err != nil {
+ return err
+ }
+ }
+ }
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
+ }
+ if cleanEntryName(h.Name) == TOCTarName {
+ // It is possible for a layer to be "stargzified" twice during the
+ // distribution lifecycle. So we reserve "TOCTarName" here to avoid
+ // duplicated entries in the resulting layer.
+ if lossless {
+ // We cannot handle this in lossless way.
+ return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
+ }
+ continue
+ }
+
+ xattrs := make(map[string][]byte)
+ const xattrPAXRecordsPrefix = "SCHILY.xattr."
+ if h.PAXRecords != nil {
+ for k, v := range h.PAXRecords {
+ if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
+ xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
+ }
+ }
+ }
+ ent := &TOCEntry{
+ Name: h.Name,
+ Mode: h.Mode,
+ UID: h.Uid,
+ GID: h.Gid,
+ Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
+ Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
+ ModTime3339: formatModtime(h.ModTime),
+ Xattrs: xattrs,
+ }
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+ if tw != nil {
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ } else {
+ if _, err := dst.Write(tr.RawBytes()); err != nil {
+ return err
+ }
+ }
+ switch h.Typeflag {
+ case tar.TypeLink:
+ ent.Type = "hardlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeSymlink:
+ ent.Type = "symlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeDir:
+ ent.Type = "dir"
+ case tar.TypeReg:
+ ent.Type = "reg"
+ ent.Size = h.Size
+ case tar.TypeChar:
+ ent.Type = "char"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeBlock:
+ ent.Type = "block"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeFifo:
+ ent.Type = "fifo"
+ default:
+ return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
+ }
+
+ // We need to keep a reference to the TOC entry for regular files, so that we
+ // can fill the digest later.
+ var regFileEntry *TOCEntry
+ var payloadDigest digest.Digester
+ if h.Typeflag == tar.TypeReg {
+ regFileEntry = ent
+ payloadDigest = digest.Canonical.Digester()
+ }
+
+ if h.Typeflag == tar.TypeReg && ent.Size > 0 {
+ var written int64
+ totalSize := ent.Size // save it before we destroy ent
+ tee := io.TeeReader(tr, payloadDigest.Hash())
+ for written < totalSize {
+ chunkSize := int64(w.chunkSize())
+ remain := totalSize - written
+ if remain < chunkSize {
+ chunkSize = remain
+ } else {
+ ent.ChunkSize = chunkSize
+ }
+
+ // We flush the underlying compression writer here to correctly calculate "w.cw.n".
+ if err := w.flushGz(); err != nil {
+ return err
+ }
+ if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
+ if err := w.closeGz(); err != nil {
+ return err
+ }
+ ent.Offset = w.cw.n
+ prevOffset = ent.Offset
+ prevOffsetUncompressed = w.uncompressedCounter.n
+ } else {
+ ent.Offset = prevOffset
+ ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
+ }
+
+ ent.ChunkOffset = written
+ chunkDigest := digest.Canonical.Digester()
+
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+
+ teeChunk := io.TeeReader(tee, chunkDigest.Hash())
+ var out io.Writer
+ if tw != nil {
+ out = tw
+ } else {
+ out = dst
+ }
+ if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
+ return fmt.Errorf("error copying %q: %v", h.Name, err)
+ }
+ ent.ChunkDigest = chunkDigest.Digest().String()
+ w.toc.Entries = append(w.toc.Entries, ent)
+ written += chunkSize
+ ent = &TOCEntry{
+ Name: h.Name,
+ Type: "chunk",
+ }
+ }
+ } else {
+ w.toc.Entries = append(w.toc.Entries, ent)
+ }
+ if payloadDigest != nil {
+ regFileEntry.Digest = payloadDigest.Digest().String()
+ }
+ if tw != nil {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ }
+ }
+ remainDest := io.Discard
+ if lossless {
+ remainDest = dst // Preserve the remaining bytes in lossless mode
+ }
+ _, err := io.Copy(remainDest, src)
+ return err
+}
+
+func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
+ if ent.Type != "reg" {
+ return false
+ }
+ if w.needsOpenGzEntries == nil {
+ return false
+ }
+ _, ok := w.needsOpenGzEntries[ent.Name]
+ return ok
+}
+
+// DiffID returns the SHA-256 of the uncompressed tar bytes.
+// It is only valid to call DiffID after Close.
+func (w *Writer) DiffID() string {
+ return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
+}
+
+func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
+ for _, d := range decompressors {
+ if s := d.FooterSize(); res < s && s <= blobSize {
+ res = s
+ }
+ }
+ return
+}
+
+func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
+ if tocOff < 0 {
+ // This means that TOC isn't contained in the blob.
+ // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
+ // the external location.
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(nil)
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ if len(tocBytes) > 0 {
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err == nil {
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ }
+
+ start := time.Now()
+ tocBytes = make([]byte, tocSize)
+ if _, err := sr.ReadAt(tocBytes, tocOff); err != nil {
+ return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ start = time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+}
+
+func formatModtime(t time.Time) string {
+ if t.IsZero() || t.Unix() == 0 {
+ return ""
+ }
+ return t.UTC().Round(time.Second).Format(time.RFC3339)
+}
+
+func cleanEntryName(name string) string {
+ // Use path.Clean to consistently deal with path separators across platforms.
+ return strings.TrimPrefix(path.Clean("/"+name), "/")
+}
+
+// countWriter counts how many bytes have been written to its wrapped
+// io.Writer.
+type countWriter struct {
+ w io.Writer
+ n int64
+}
+
+func (cw *countWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.w.Write(p)
+ cw.n += int64(n)
+ return
+}
+
+type countWriteFlusher struct {
+ io.WriteCloser
+ n int64
+}
+
+func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
+ wc.WriteCloser = w
+ return wc
+}
+
+func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
+ n, err = wc.WriteCloser.Write(p)
+ wc.n += int64(n)
+ return
+}
+
+func (wc *countWriteFlusher) Flush() error {
+ if f, ok := wc.WriteCloser.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ return nil
+}
+
+func (wc *countWriteFlusher) Close() error {
+ err := wc.WriteCloser.Close()
+ wc.WriteCloser = nil
+ return err
+}
+
+// isGzip reports whether br is positioned right before an upcoming gzip stream.
+// It does not consume any bytes from br.
+func isGzip(br *bufio.Reader) bool {
+ const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ )
+ peek, _ := br.Peek(3)
+ return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
+
+type countReader struct {
+ r io.Reader
+ n int64
+}
+
+func (cr *countReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ cr.n += int64(n)
+ return
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
new file mode 100644
index 0000000..f24afe3
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -0,0 +1,237 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type gzipCompression struct {
+ *GzipCompressor
+ *GzipDecompressor
+}
+
+func newGzipCompressionWithLevel(level int) Compression {
+ return &gzipCompression{
+ &GzipCompressor{level},
+ &GzipDecompressor{},
+ }
+}
+
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{gzip.BestCompression}
+}
+
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{level}
+}
+
+type GzipCompressor struct {
+ compressionLevel int
+}
+
+func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
+ return gzip.NewWriterLevel(w, gc.compressionLevel)
+}
+
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
+ gw := io.Writer(gz)
+ if diffHash != nil {
+ gw = io.MultiWriter(gz, diffHash)
+ }
+ tw := tar.NewWriter(gw)
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: TOCTarName,
+ Size: int64(len(tocJSON)),
+ }); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(tocJSON); err != nil {
+ return "", err
+ }
+
+ if err := tw.Close(); err != nil {
+ return "", err
+ }
+ if err := gz.Close(); err != nil {
+ return "", err
+ }
+ if _, err := w.Write(gzipFooterBytes(off)); err != nil {
+ return "", err
+ }
+ return digest.FromBytes(tocJSON), nil
+}
+
+// gzipFooterBytes returns the 51 bytes footer.
+func gzipFooterBytes(tocOff int64) []byte {
+ buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
+ gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
+
+ // Extra header indicating the offset of TOCJSON
+ // https://tools.ietf.org/html/rfc1952#section-2.3.1.1
+ header := make([]byte, 4)
+ header[0], header[1] = 'S', 'G'
+ subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
+ binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
+ gz.Header.Extra = append(header, []byte(subfield)...)
+ gz.Close()
+ if buf.Len() != FooterSize {
+ panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
+ }
+ return buf.Bytes()
+}
+
+type GzipDecompressor struct{}
+
+func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != FooterSize {
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
+ if si1 != 'S' || si2 != 'G' {
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ }
+ if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ }
+ if string(subfield[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ }
+ tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *GzipDecompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+type LegacyGzipDecompressor struct{}
+
+func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != legacyFooterSize {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ if len(extra) != 16+len("STARGZ") {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
+ }
+ if string(extra[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
+ }
+ tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *LegacyGzipDecompressor) FooterSize() int64 {
+ return legacyFooterSize
+}
+
+func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ zr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
+ }
+ zr.Multistream(false)
+ tr := tar.NewReader(zr)
+ h, err := tr.Next()
+ if err != nil {
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ }
+ if h.Name != TOCTarName {
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
+ }
+ return readCloser{tr, zr.Close}, nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
new file mode 100644
index 0000000..0ca6fd7
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -0,0 +1,2366 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// TestingController is Compression with some helper methods necessary for testing.
+type TestingController interface {
+ Compression
+ TestStreams(t *testing.T, b []byte, streams []int64)
+ DiffIDOf(*testing.T, []byte) string
+ String() string
+}
+
+// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
+func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
+ t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
+ t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
+ t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
+}
+
+type TestingControllerFactory func() TestingController
+
+const (
+ uncompressedType int = iota
+ gzipType
+ zstdType
+)
+
+var srcCompressions = []int{
+ uncompressedType,
+ gzipType,
+ zstdType,
+}
+
+var allowedPrefix = [4]string{"", "./", "/", "../"}
+
+// testBuild tests the resulting stargz blob built by this pkg has the same
+// contents as the normal stargz blob.
+func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize []int
+ in []tarEntry
+ }{
+ {
+ name: "regfiles and directories",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "test1"),
+ dir("foo2/"),
+ file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})),
+ ),
+ },
+ {
+ name: "empty files",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "tttttt"),
+ file("foo_empty", ""),
+ file("foo2", "tttttt"),
+ file("foo_empty2", ""),
+ file("foo3", "tttttt"),
+ file("foo_empty3", ""),
+ file("foo4", "tttttt"),
+ file("foo_empty4", ""),
+ file("foo5", "tttttt"),
+ file("foo_empty5", ""),
+ file("foo6", "tttttt"),
+ ),
+ },
+ {
+ name: "various files",
+ chunkSize: 4,
+ minChunkSize: []int{0, 64000},
+ in: tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo1.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ {
+ name: "no contents",
+ chunkSize: 4,
+ in: tarOf(
+ file("baz.txt", ""),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ }
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
+ tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
+ // Test divideEntries()
+ entries, err := sortEntries(tarBlob, nil, nil) // identical order
+ if err != nil {
+ t.Fatalf("failed to parse tar: %v", err)
+ }
+ var merged []*entry
+ for _, part := range divideEntries(entries, 4) {
+ merged = append(merged, part...)
+ }
+ if !reflect.DeepEqual(entries, merged) {
+ for _, e := range entries {
+ t.Logf("Original: %v", e.header)
+ }
+ for _, e := range merged {
+ t.Logf("Merged: %v", e.header)
+ }
+ t.Errorf("divided entries couldn't be merged")
+ return
+ }
+
+ // Prepare sample data
+ cl1 := newCL()
+ wantBuf := new(bytes.Buffer)
+ sw := NewWriterWithCompressor(wantBuf, cl1)
+ sw.MinChunkSize = minChunkSize
+ sw.ChunkSize = tt.chunkSize
+ if err := sw.AppendTar(tarBlob); err != nil {
+ t.Fatalf("failed to append tar to want stargz: %v", err)
+ }
+ if _, err := sw.Close(); err != nil {
+ t.Fatalf("failed to prepare want stargz: %v", err)
+ }
+ wantData := wantBuf.Bytes()
+ want, err := Open(io.NewSectionReader(
+ bytes.NewReader(wantData), 0, int64(len(wantData))),
+ WithDecompressors(cl1),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the want stargz: %v", err)
+ }
+
+ // Prepare testing data
+ var opts []Option
+ if minChunkSize > 0 {
+ opts = append(opts, WithMinChunkSize(minChunkSize))
+ }
+ cl2 := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
+ if err != nil {
+ t.Fatalf("failed to build stargz: %v", err)
+ }
+ defer rc.Close()
+ gotBuf := new(bytes.Buffer)
+ if _, err := io.Copy(gotBuf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ gotData := gotBuf.Bytes()
+ got, err := Open(io.NewSectionReader(
+ bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
+ WithDecompressors(cl2),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the got stargz: %v", err)
+ }
+
+ // Check DiffID is properly calculated
+ rc.Close()
+ diffID := rc.DiffID()
+ wantDiffID := cl2.DiffIDOf(t, gotData)
+ if diffID.String() != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ // Compare as stargz
+ if !isSameVersion(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz hasn't same json")
+ return
+ }
+ if !isSameEntries(t, want, got) {
+ t.Errorf("built stargz isn't same as the original")
+ return
+ }
+
+ // Compare as tar.gz
+ if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz isn't same tar.gz")
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aGz, err := cla.Reader(bytes.NewReader(a))
+ if err != nil {
+ t.Fatalf("failed to read A")
+ }
+ defer aGz.Close()
+ bGz, err := clb.Reader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("failed to read B")
+ }
+ defer bGz.Close()
+
+ // Same as tar's Next() method but ignores landmarks and TOCJSON file
+ next := func(r *tar.Reader) (h *tar.Header, err error) {
+ for {
+ if h, err = r.Next(); err != nil {
+ return
+ }
+ if h.Name != PrefetchLandmark &&
+ h.Name != NoPrefetchLandmark &&
+ h.Name != TOCTarName {
+ return
+ }
+ }
+ }
+
+ aTar := tar.NewReader(aGz)
+ bTar := tar.NewReader(bGz)
+ for {
+ // Fetch and parse next header.
+ aH, aErr := next(aTar)
+ bH, bErr := next(bTar)
+ if aErr != nil || bErr != nil {
+ if aErr == io.EOF && bErr == io.EOF {
+ break
+ }
+ t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr)
+ }
+ if !reflect.DeepEqual(aH, bH) {
+ t.Logf("different header (A = %v; B = %v)", aH, bH)
+ return false
+
+ }
+ aFile, err := io.ReadAll(aTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of A")
+ }
+ bFile, err := io.ReadAll(bTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of B")
+ }
+ if !bytes.Equal(aFile, bFile) {
+ t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b))
+ return false
+ }
+ }
+
+ return true
+}
+
+func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
+ if err != nil {
+ t.Fatalf("failed to parse A: %v", err)
+ }
+ bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
+ if err != nil {
+ t.Fatalf("failed to parse B: %v", err)
+ }
+ t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC))
+ t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC))
+ return aJTOC.Version == bJTOC.Version
+}
+
+func isSameEntries(t *testing.T, a, b *Reader) bool {
+ aroot, ok := a.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of A")
+ }
+ broot, ok := b.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of B")
+ }
+ aEntry := stargzEntry{aroot, a}
+ bEntry := stargzEntry{broot, b}
+ return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
+}
+
+func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
+ buf := new(bytes.Buffer)
+ var w io.WriteCloser
+ var err error
+ if srcCompression == gzipType {
+ w = gzip.NewWriter(buf)
+ } else if srcCompression == zstdType {
+ w, err = zstd.NewWriter(buf)
+ if err != nil {
+ t.Fatalf("failed to init zstd writer: %v", err)
+ }
+ } else {
+ return src
+ }
+ src.Seek(0, io.SeekStart)
+ if _, err := io.Copy(w, src); err != nil {
+ t.Fatalf("failed to compress source")
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("failed to finalize compress source")
+ }
+ data := buf.Bytes()
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+
+}
+
+type stargzEntry struct {
+ e *TOCEntry
+ r *Reader
+}
+
+// contains checks if all child entries in "b" are also contained in "a".
+// This function also checks if the files/chunks contain the same contents among "a" and "b".
+func contains(t *testing.T, a, b stargzEntry) bool {
+ ae, ar := a.e, a.r
+ be, br := b.e, b.r
+ t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
+ if !equalEntry(ae, be) {
+ t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be)
+ return false
+ }
+ if ae.Type == "dir" {
+ t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name,
+ allChildrenName(ae), allChildrenName(be))
+ iscontain := true
+ ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool {
+ // Walk through all files on this stargz file.
+
+ if aChild.Name == PrefetchLandmark ||
+ aChild.Name == NoPrefetchLandmark {
+ return true // Ignore landmarks
+ }
+
+ // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory
+ // because this points to the root directory itself.
+ if aChild.Name == "" && ae.Name == "" {
+ return true
+ }
+
+ bChild, ok := be.LookupChild(aBaseName)
+ if !ok {
+ t.Logf("%q (base: %q): not found in b: %v",
+ ae.Name, aBaseName, allChildrenName(be))
+ iscontain = false
+ return false
+ }
+
+ childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r})
+ if !childcontain {
+ t.Logf("%q != %q: non-equal dir", ae.Name, be.Name)
+ iscontain = false
+ return false
+ }
+ return true
+ })
+ return iscontain
+ } else if ae.Type == "reg" {
+ af, err := ar.OpenFile(ae.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on A: %v", ae.Name, err)
+ }
+ bf, err := br.OpenFile(be.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on B: %v", be.Name, err)
+ }
+
+ var nr int64
+ for nr < ae.Size {
+ abytes, anext, aok := readOffset(t, af, nr, a)
+ bbytes, bnext, bok := readOffset(t, bf, nr, b)
+ if !aok && !bok {
+ break
+ } else if !(aok && bok) || anext != bnext {
+ t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
+ ae.Name, be.Name, nr, aok, bok, anext, bnext)
+ return false
+ }
+ nr = anext
+ if !bytes.Equal(abytes, bbytes) {
+ t.Logf("%q != %q: different contents %v vs %v",
+ ae.Name, be.Name, string(abytes), string(bbytes))
+ return false
+ }
+ }
+ return true
+ }
+
+ return true
+}
+
+func allChildrenName(e *TOCEntry) (children []string) {
+ e.ForeachChild(func(baseName string, _ *TOCEntry) bool {
+ children = append(children, baseName)
+ return true
+ })
+ return
+}
+
+func equalEntry(a, b *TOCEntry) bool {
+ // Here, we selectively compare fileds that we are interested in.
+ return a.Name == b.Name &&
+ a.Type == b.Type &&
+ a.Size == b.Size &&
+ a.ModTime3339 == b.ModTime3339 &&
+ a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time
+ a.LinkName == b.LinkName &&
+ a.Mode == b.Mode &&
+ a.UID == b.UID &&
+ a.GID == b.GID &&
+ a.Uname == b.Uname &&
+ a.Gname == b.Gname &&
+ (a.Offset >= 0) == (b.Offset >= 0) &&
+ (a.NextOffset() > 0) == (b.NextOffset() > 0) &&
+ a.DevMajor == b.DevMajor &&
+ a.DevMinor == b.DevMinor &&
+ a.NumLink == b.NumLink &&
+ reflect.DeepEqual(a.Xattrs, b.Xattrs) &&
+ // chunk-related infomations aren't compared in this function.
+ // ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ // ChunkSize int64 `json:"chunkSize,omitempty"`
+ // children map[string]*TOCEntry
+ a.Digest == b.Digest
+}
+
+func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
+ ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
+ if !ok {
+ return nil, 0, false
+ }
+ data := make([]byte, ce.ChunkSize)
+ t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset())
+ n, err := r.ReadAt(data, ce.ChunkOffset)
+ if err != nil {
+ t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v",
+ e.e.Name, ce.ChunkOffset, ce.ChunkSize, err)
+ }
+ if int64(n) != ce.ChunkSize {
+ t.Fatalf("unexpected copied data size %d; want %d",
+ n, ce.ChunkSize)
+ }
+ return data[:n], offset + ce.ChunkSize, true
+}
+
+func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
+ jtocData, err := json.Marshal(*tocJSON)
+ if err != nil {
+ t.Fatalf("failed to marshal TOC JSON: %v", err)
+ }
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil {
+ t.Fatalf("failed to read toc json blob: %v", err)
+ }
+ return buf.String()
+}
+
+const chunkSize = 3
+
+// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
+type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
+
+// testDigestAndVerify runs specified checks against sample stargz blobs.
+func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
+ checks []check
+ minChunkSize []int
+ }{
+ {
+ name: "no-regfile",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ dir("test/"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ dir("test2/"), // modified
+ ), allowedPrefix[0])),
+ },
+ },
+ {
+ name: "small-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "bbb", dgstMap),
+ )
+ },
+ minChunkSize: []int{0, 64000},
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", ""),
+ file("foo.txt", "M"), // modified
+ dir("test/"),
+ file("test/bar.txt", "bbb"),
+ ), allowedPrefix[0])),
+ // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO
+ checkVerifyBrokenContentFail("foo.txt"),
+ },
+ },
+ {
+ name: "big-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
+ file("foo.txt", "a"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ {
+ name: "with-non-regfiles",
+ minChunkSize: []int{0, 64000},
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ regDigest(t, "bar/foo2.txt", "b", dgstMap),
+ regDigest(t, "foo3.txt", "c", dgstMap),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ dir("test2/"),
+ link("test2/bazlink", "foo.txt"), // modified
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
+ // Get original tar file and chunk digests
+ dgstMap := make(map[string]digest.Digest)
+ tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
+
+ cl := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ tocDigest := rc.TOCDigest()
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ newStargz := buf.Bytes()
+ // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
+ dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
+
+ for _, check := range tt.checks {
+ check(t, newStargz, tocDigest, dgstMap, cl, newCL)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
+// digest and contains valid chunks. It walks all entries in the stargz and
+// checks all chunk digests stored to the TOC JSON match the actual contents.
+func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ digestMapTOC, err := listDigests(io.NewSectionReader(
+ bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ controller,
+ )
+ if err != nil {
+ t.Fatalf("failed to list digest: %v", err)
+ }
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ // Check the digest of TOC JSON based on the actual contents
+ // It's sure that TOC JSON exists in this archive because
+ // Open succeeded.
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(dgstr.Hash(), tr); err != nil {
+ t.Fatalf("failed to calculate digest of TOC JSON: %v",
+ err)
+ }
+ if dgstr.Digest() != tocDigest {
+ t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest())
+ }
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ // Get the original digest to make sure the file contents are kept unchanged
+ // from the original tar, during the whole conversion steps.
+ id := chunkID(h.Name, n, ce.ChunkSize)
+ want, ok := dgstMap[id]
+ if !ok {
+ t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v",
+ h.Name, n, ce.ChunkSize, dgstMap)
+ return
+ }
+ found[id] = true
+
+ // Check the file contents
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if want != dgstr.Digest() {
+ t.Errorf("Invalid contents in converted stargz %q: %q; want %q",
+ h.Name, dgstr.Digest(), want)
+ return
+ }
+
+ // Check the digest stored in TOC JSON
+ dgstTOC, ok := digestMapTOC[ce.Offset]
+ if !ok {
+ t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered",
+ h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset)
+ }
+ if want != dgstTOC {
+ t.Errorf("Invalid digest in TOCEntry %q: %q; want %q",
+ h.Name, dgstTOC, want)
+ return
+ }
+
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyTOC checks the verification works for the TOC JSON of the passed
+// stargz. It walks all entries in the stargz and checks the verifications for
+// all chunks work.
+func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Errorf("failed to verify stargz: %v", err)
+ return
+ }
+
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n)
+ }
+
+ found[chunkID(h.Name, n, ce.ChunkSize)] = true
+
+ // Check the file contents
+ if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if !v.Verified() {
+ t.Errorf("Invalid contents in converted stargz %q (should be succeeded)",
+ h.Name)
+ return
+ }
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
+// detected during the verification and the verification returns an error.
+func checkVerifyInvalidTOCEntryFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ funcs := map[string]rewriteFunc{
+ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var found bool
+ for _, e := range toc.Entries {
+ if cleanEntryName(e.Name) == filename {
+ if e.Type != "reg" && e.Type != "chunk" {
+ t.Fatalf("entry %q to break must be regfile or chunk", filename)
+ }
+ if e.ChunkDigest == "" {
+ t.Fatalf("entry %q is already invalid", filename)
+ }
+ e.ChunkDigest = ""
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf("rewrite target not found")
+ }
+ },
+ "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var (
+ sampleEntry *TOCEntry
+ targetEntry *TOCEntry
+ )
+ for _, e := range toc.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if cleanEntryName(e.Name) == filename {
+ targetEntry = e
+ } else {
+ sampleEntry = e
+ }
+ }
+ }
+ if sampleEntry == nil {
+ t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+ }
+ if targetEntry == nil {
+ t.Fatalf("rewrite target not found")
+ }
+ targetEntry.Offset = sampleEntry.Offset
+ },
+ }
+
+ for name, rFunc := range funcs {
+ t.Run(name, func(t *testing.T) {
+ newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, newSgz); err != nil {
+ t.Fatalf("failed to get converted stargz")
+ }
+ isgz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(newTocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ })
+ }
+ }
+}
+
+// checkVerifyInvalidStargzFail checks if the verification detects that the
+// given stargz file doesn't match to the expected digest and returns error.
+func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ cl := newController()
+ rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ mStargz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(tocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ }
+}
+
+// checkVerifyBrokenContentFail checks if the verifier detects broken contents
+// that doesn't match to the expected digest and returns error.
+func checkVerifyBrokenContentFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ // Parse stargz file
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Fatalf("failed to verify stargz: %v", err)
+ return
+ }
+
+ // Open the target file
+ sr, err := sgz.OpenFile(filename)
+ if err != nil {
+ t.Fatalf("failed to open file %q", filename)
+ }
+ ce, ok := sgz.ChunkEntryForOffset(filename, 0)
+ if !ok {
+ t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0)
+ return
+ }
+ if ce.ChunkSize == 0 {
+ t.Fatalf("file mustn't be empty")
+ return
+ }
+ data := make([]byte, ce.ChunkSize)
+ if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil {
+ t.Errorf("failed to get data of a chunk of %q(offset=%q)",
+ filename, ce.ChunkOffset)
+ }
+
+ // Check the broken chunk (must fail)
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Fatalf("failed to get verifier for %q", filename)
+ }
+ broken := append([]byte{^data[0]}, data[1:]...)
+ if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ filename, ce.ChunkOffset, ce.ChunkSize)
+ }
+ if v.Verified() {
+ t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)",
+ filename, data, broken)
+ }
+ }
+}
+
+func chunkID(name string, offset, size int64) string {
+ return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
+}
+
+type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
+
+func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
+ decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
+ if err != nil {
+ t.Fatalf("failed to extract TOC JSON: %v", err)
+ }
+
+ rewrite(t, decodedJTOC, sgz)
+
+ tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset)
+ if err != nil {
+ t.Fatalf("failed to create toc and footer: %v", err)
+ }
+
+ // Reconstruct stargz file with the modified TOC JSON
+ if _, err := sgz.Seek(0, io.SeekStart); err != nil {
+ t.Fatalf("failed to reset the seek position of stargz: %v", err)
+ }
+ return io.MultiReader(
+ io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON)
+ tocFooter, // Rewritten TOC and footer
+ ), tocDigest
+}
+
+func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) {
+ decodedJTOC, _, err := parseStargz(sgz, controller)
+ if err != nil {
+ return nil, err
+ }
+ digestMap := make(map[int64]digest.Digest)
+ for _, e := range decodedJTOC.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if e.Type == "reg" && e.Size == 0 {
+ continue // ignores empty file
+ }
+ if e.ChunkDigest == "" {
+ return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
+ e.Name, e.Offset)
+ }
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, err
+ }
+ digestMap[e.Offset] = d
+ }
+ }
+ return digestMap, nil
+}
+
+func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) {
+ fSize := controller.FooterSize()
+ footer := make([]byte, fSize)
+ if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
+ return nil, 0, fmt.Errorf("error reading footer: %w", err)
+ }
+ _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
+ }
+
+ // Decode the TOC JSON
+ var tocReader io.Reader
+ if tocOffset >= 0 {
+ tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
+ }
+ decodedJTOC, _, err = controller.ParseTOC(tocReader)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
+ }
+ return decodedJTOC, tocOffset, nil
+}
+
+func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
+ const content = "Some contents"
+ invalidUtf8 := "\xff\xfe\xfd"
+
+ xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
+ sampleOwner := owner{uid: 50, gid: 100}
+
+ data64KB := randomContents(64000)
+
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tarEntry
+ want []stargzCheck
+ wantNumGz int // expected number of streams
+
+ wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
+ wantFailOnLossLess bool
+ wantTOCVersion int // default = 1
+ }{
+ {
+ name: "empty",
+ in: tarOf(),
+ wantNumGz: 2, // (empty tar) + TOC + footer
+ want: checks(
+ numTOCEntries(0),
+ ),
+ },
+ {
+ name: "1dir_1empty_file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", ""),
+ ),
+ wantNumGz: 3, // dir, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", 0),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileDigest("foo/bar.txt", digestFor("")),
+ ),
+ },
+ {
+ name: "1dir_1file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", content, xAttrFile),
+ ),
+ wantNumGz: 4, // var dir, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ hasFileDigest("foo/bar.txt", digestFor(content)),
+ hasFileContentsRange("foo/bar.txt", 0, content),
+ hasFileContentsRange("foo/bar.txt", 1, content[1:]),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileXattrs("foo/bar.txt", "foo", "bar"),
+ hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8),
+ ),
+ },
+ {
+ name: "2meta_2file",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ },
+ {
+ name: "3dir",
+ in: tarOf(
+ dir("bar/"),
+ dir("foo/"),
+ dir("foo/bar/"),
+ ),
+ wantNumGz: 3, // 3 dirs, TOC, footer
+ want: checks(
+ hasDirLinkCount("bar/", 2),
+ hasDirLinkCount("foo/", 3),
+ hasDirLinkCount("foo/bar/", 2),
+ ),
+ },
+ {
+ name: "symlink",
+ in: tarOf(
+ dir("foo/"),
+ symlink("foo/bar", "../../x"),
+ ),
+ wantNumGz: 3, // metas + TOC + footer
+ want: checks(
+ numTOCEntries(2),
+ hasSymlink("foo/bar", "../../x"),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar"),
+ ),
+ },
+ {
+ name: "chunked_file",
+ chunkSize: 4,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
+ ),
+ wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
+ want: checks(
+ numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
+ hasDir("foo/"),
+ hasFileLen("foo/big.txt", len("This is such a big file")),
+ hasFileDigest("foo/big.txt", digestFor("This is such a big file")),
+ hasFileContentsRange("foo/big.txt", 0, "This is such a big file"),
+ hasFileContentsRange("foo/big.txt", 1, "his is such a big file"),
+ hasFileContentsRange("foo/big.txt", 2, "is is such a big file"),
+ hasFileContentsRange("foo/big.txt", 3, "s is such a big file"),
+ hasFileContentsRange("foo/big.txt", 4, " is such a big file"),
+ hasFileContentsRange("foo/big.txt", 5, "is such a big file"),
+ hasFileContentsRange("foo/big.txt", 6, "s such a big file"),
+ hasFileContentsRange("foo/big.txt", 7, " such a big file"),
+ hasFileContentsRange("foo/big.txt", 8, "such a big file"),
+ hasFileContentsRange("foo/big.txt", 9, "uch a big file"),
+ hasFileContentsRange("foo/big.txt", 10, "ch a big file"),
+ hasFileContentsRange("foo/big.txt", 11, "h a big file"),
+ hasFileContentsRange("foo/big.txt", 12, " a big file"),
+ hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""),
+ hasChunkEntries("foo/big.txt", 6),
+ ),
+ },
+ {
+ name: "recursive",
+ in: tarOf(
+ dir("/", sampleOwner),
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // dirs, bar.txt alone, TOC, footer
+ want: checks(
+ maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt"
+ ),
+ },
+ {
+ name: "block_char_fifo",
+ in: tarOf(
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "b",
+ Typeflag: tar.TypeBlock,
+ Devmajor: 123,
+ Devminor: 456,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "c",
+ Typeflag: tar.TypeChar,
+ Devmajor: 111,
+ Devminor: 222,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "f",
+ Typeflag: tar.TypeFifo,
+ Format: format,
+ })
+ }),
+ ),
+ wantNumGz: 3,
+ want: checks(
+ lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}),
+ lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}),
+ lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}),
+ ),
+ },
+ {
+ name: "modes",
+ in: tarOf(
+ dir("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ file("foo1/bar1", content, 0700|os.ModeSetuid),
+ file("foo1/bar2", content, 0755|os.ModeSetgid),
+ dir("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ file("foo2/bar3", content, 0755|os.ModeSticky),
+ dir("foo3/", 0755|os.ModeDir),
+ file("foo3/bar4", content, os.FileMode(0700)),
+ file("foo3/bar5", content, os.FileMode(0755)),
+ ),
+ wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer
+ want: checks(
+ hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ hasMode("foo1/bar1", 0700|os.ModeSetuid),
+ hasMode("foo1/bar2", 0755|os.ModeSetgid),
+ hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ hasMode("foo2/bar3", 0755|os.ModeSticky),
+ hasMode("foo3/", 0755|os.ModeDir),
+ hasMode("foo3/bar4", os.FileMode(0700)),
+ hasMode("foo3/bar5", os.FileMode(0755)),
+ ),
+ },
+ {
+ name: "lossy",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ wantFailOnLossLess: true,
+ },
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ file("foo22", "ccc"),
+ dir("bar/"),
+ file("bar/bar.txt", "aaa"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo22", len("ccc")),
+ hasFileLen("bar/bar.txt", len("aaa")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo22", digestFor("ccc")),
+ hasFileDigest("bar/bar.txt", digestFor("aaa")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo22", 0, "ccc"),
+ hasFileContentsRange("foo22", 1, "cc"),
+ hasFileContentsRange("foo22", 2, "c"),
+ hasFileContentsRange("bar/bar.txt", 0, "aaa"),
+ hasFileContentsRange("bar/bar.txt", 1, "aa"),
+ hasFileContentsRange("bar/bar.txt", 2, "a"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ dir("bar/"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
+ hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ }
+
+ for _, tt := range tests {
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, lossless := range []bool{true, false} {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
+ var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
+ origTarDgstr := digest.Canonical.Digester()
+ tr = io.TeeReader(tr, origTarDgstr.Hash())
+ var stargzBuf bytes.Buffer
+ cl1 := newCL()
+ w := NewWriterWithCompressor(&stargzBuf, cl1)
+ w.ChunkSize = tt.chunkSize
+ w.MinChunkSize = tt.minChunkSize
+ if lossless {
+ err := w.AppendTarLossLess(tr)
+ if tt.wantFailOnLossLess {
+ if err != nil {
+ return // expected to fail
+ }
+ t.Fatalf("Append wanted to fail on lossless")
+ }
+ if err != nil {
+ t.Fatalf("Append(lossless): %v", err)
+ }
+ } else {
+ if err := w.AppendTar(tr); err != nil {
+ t.Fatalf("Append: %v", err)
+ }
+ }
+ if _, err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ b := stargzBuf.Bytes()
+
+ if lossless {
+ // Check if the result blob reserves original tar metadata
+ rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
+ if err != nil {
+ t.Errorf("failed to decompress blob: %v", err)
+ return
+ }
+ defer rc.Close()
+ resultDgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
+ t.Errorf("failed to read result decompressed blob: %v", err)
+ return
+ }
+ if resultDgstr.Digest() != origTarDgstr.Digest() {
+ t.Errorf("lossy compression occurred: digest=%v; want %v",
+ resultDgstr.Digest(), origTarDgstr.Digest())
+ return
+ }
+ }
+
+ diffID := w.DiffID()
+ wantDiffID := cl1.DiffIDOf(t, b)
+ if diffID != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ telemetry, checkCalled := newCalledTelemetry()
+ sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+ r, err := Open(
+ sr,
+ WithDecompressors(cl1),
+ WithTelemetry(telemetry),
+ )
+ if err != nil {
+ t.Fatalf("stargz.Open: %v", err)
+ }
+ wantTOCVersion := 1
+ if tt.wantTOCVersion > 0 {
+ wantTOCVersion = tt.wantTOCVersion
+ }
+ if r.toc.Version != wantTOCVersion {
+ t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
+ }
+
+ footerSize := cl1.FooterSize()
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ t.Errorf("failed to read footer: %v", err)
+ }
+ _, tocOffset, _, err := cl1.ParseFooter(footer)
+ if err != nil {
+ t.Errorf("failed to parse footer: %v", err)
+ }
+ if err := checkCalled(tocOffset >= 0); err != nil {
+ t.Errorf("telemetry failure: %v", err)
+ }
+
+ wantNumGz := tt.wantNumGz
+ if lossless && tt.wantNumGzLossLess > 0 {
+ wantNumGz = tt.wantNumGzLossLess
+ }
+ streamOffsets := []int64{0}
+ prevOffset := int64(-1)
+ streams := 0
+ for _, e := range r.toc.Entries {
+ if e.Offset > prevOffset {
+ streamOffsets = append(streamOffsets, e.Offset)
+ prevOffset = e.Offset
+ streams++
+ }
+ }
+ streams++ // TOC
+ if tocOffset >= 0 {
+ // toc is in the blob
+ streamOffsets = append(streamOffsets, tocOffset)
+ }
+ streams++ // footer
+ streamOffsets = append(streamOffsets, footerOffset)
+ if streams != wantNumGz {
+ t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
+ }
+
+ t.Logf("testing streams: %+v", streamOffsets)
+ cl1.TestStreams(t, b, streamOffsets)
+
+ for _, want := range tt.want {
+ want.check(t, r)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+type chunkInfo struct {
+ name string
+ data string
+}
+
+func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
+ var getFooterLatencyCalled bool
+ var getTocLatencyCalled bool
+ var deserializeTocLatencyCalled bool
+ return &Telemetry{
+ func(time.Time) { getFooterLatencyCalled = true },
+ func(time.Time) { getTocLatencyCalled = true },
+ func(time.Time) { deserializeTocLatencyCalled = true },
+ }, func(needsGetTOC bool) error {
+ var allErr []error
+ if !getFooterLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
+ }
+ if needsGetTOC {
+ if !getTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
+ }
+ }
+ if !deserializeTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
+ }
+ return errorutil.Aggregate(allErr)
+ }
+}
+
+func digestFor(content string) string {
+ sum := sha256.Sum256([]byte(content))
+ return fmt.Sprintf("sha256:%x", sum)
+}
+
+type numTOCEntries int
+
+func (n numTOCEntries) check(t *testing.T, r *Reader) {
+ if r.toc == nil {
+ t.Fatal("nil TOC")
+ }
+ if got, want := len(r.toc.Entries), int(n); got != want {
+ t.Errorf("got %d TOC entries; want %d", got, want)
+ }
+ t.Logf("got TOC entries:")
+ for i, ent := range r.toc.Entries {
+ entj, _ := json.Marshal(ent)
+ t.Logf(" [%d]: %s\n", i, entj)
+ }
+ if t.Failed() {
+ t.FailNow()
+ }
+}
+
+func checks(s ...stargzCheck) []stargzCheck { return s }
+
+type stargzCheck interface {
+ check(t *testing.T, r *Reader)
+}
+
+type stargzCheckFn func(*testing.T, *Reader)
+
+func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
+
+func maxDepth(max int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup("")
+ if !ok {
+ t.Fatal("root directory not found")
+ }
+ d, err := getMaxDepth(t, e, 0, 10*max)
+ if err != nil {
+ t.Errorf("failed to get max depth (wanted %d): %v", max, err)
+ return
+ }
+ if d != max {
+ t.Errorf("invalid depth %d; want %d", d, max)
+ return
+ }
+ })
+}
+
+func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
+ if current > limit {
+ return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
+ current, limit)
+ }
+ max = current
+ e.ForeachChild(func(baseName string, ent *TOCEntry) bool {
+ t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name)
+ d, err := getMaxDepth(t, ent, current+1, limit)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if d > max {
+ max = d
+ }
+ return true
+ })
+ return
+}
+
+func hasFileLen(file string, wantLen int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ } else if ent.Size != int64(wantLen) {
+ t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen)
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileXattrs(file, name, value string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ }
+ if ent.Xattrs == nil {
+ t.Errorf("file %q has no xattrs", file)
+ return
+ }
+ valueFound, found := ent.Xattrs[name]
+ if !found {
+ t.Errorf("file %q has no xattr %q", file, name)
+ return
+ }
+ if string(valueFound) != value {
+ t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value)
+ }
+
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileDigest(file string, digest string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for file %q", file)
+ }
+ if ent.Digest != digest {
+ t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest)
+ }
+ })
+}
+
+func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ extraMap := make(map[string]chunkInfo)
+ for _, e := range extra {
+ extraMap[e.name] = e
+ }
+ var extraNames []string
+ for n := range extraMap {
+ extraNames = append(extraNames, n)
+ }
+ f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
+ t.Logf("On %q: got preread of %q", file, e.Name)
+ ex, ok := extraMap[e.Name]
+ if !ok {
+ t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
+ }
+ got, err := io.ReadAll(cr)
+ if err != nil {
+ t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
+ }
+ if ex.data != string(got) {
+ t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
+ }
+ delete(extraMap, e.Name)
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ if len(extraMap) != 0 {
+ var exNames []string
+ for _, ex := range extraMap {
+ exNames = append(exNames, ex.name)
+ }
+ t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
+ }
+ })
+}
+
+func hasFileContentsRange(file string, offset int, want string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ f, err := r.OpenFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ })
+}
+
+func hasChunkEntries(file string, wantChunks int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("no file for %q", file)
+ }
+ if ent.Type != "reg" {
+ t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type)
+ }
+ chunks := r.getChunks(ent)
+ if len(chunks) != wantChunks {
+ t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks)
+ return
+ }
+ f := chunks[0]
+
+ var gotChunks []*TOCEntry
+ var last *TOCEntry
+ for off := int64(0); off < f.Size; off++ {
+ e, ok := r.ChunkEntryForOffset(file, off)
+ if !ok {
+ t.Errorf("no ChunkEntryForOffset at %d", off)
+ return
+ }
+ if last != e {
+ gotChunks = append(gotChunks, e)
+ last = e
+ }
+ }
+ if !reflect.DeepEqual(chunks, gotChunks) {
+ t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks)
+ }
+
+ // And verify the NextOffset
+ for i := 0; i < len(gotChunks)-1; i++ {
+ ci := gotChunks[i]
+ cnext := gotChunks[i+1]
+ if ci.NextOffset() != cnext.Offset {
+ t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset)
+ }
+ }
+ })
+}
+
+func entryHasChildren(dir string, want ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ want := append([]string(nil), want...)
+ var got []string
+ ent, ok := r.Lookup(dir)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for dir node %q", dir)
+ }
+ for baseName := range ent.children {
+ got = append(got, baseName)
+ }
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("children of %q = %q; want %q", dir, got, want)
+ }
+ })
+}
+
+func hasDir(file string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasDirLinkCount(file string, count int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ return
+ }
+ if ent.NumLink != count {
+ t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasMode(file string, mode os.FileMode) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Stat().Mode() != mode {
+ t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode)
+ return
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasSymlink(file, target string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "symlink" {
+ t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type)
+ } else if ent.LinkName != target {
+ t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target)
+ }
+ return
+ }
+ }
+ t.Errorf("symlink %q not found", file)
+ })
+}
+
+func lookupMatch(name string, want *TOCEntry) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup(name)
+ if !ok {
+ t.Fatalf("failed to Lookup entry %q", name)
+ }
+ if !reflect.DeepEqual(e, want) {
+ t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want)
+ }
+
+ })
+}
+
+func hasEntryOwner(entry string, owner owner) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
+ if !ok {
+ t.Errorf("entry %q not found", entry)
+ return
+ }
+ if ent.UID != owner.uid || ent.GID != owner.gid {
+ t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid)
+ return
+ }
+ })
+}
+
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
+func viewContent(c []byte) string {
+ if len(c) < 100 {
+ return string(c)
+ }
+ return string(c[:50]) + "...(omit)..." + string(c[50:100])
+}
+
+func tarOf(s ...tarEntry) []tarEntry { return s }
+
+type tarEntry interface {
+ appendTar(tw *tar.Writer, prefix string, format tar.Format) error
+}
+
+type tarEntryFunc func(*tar.Writer, string, tar.Format) error
+
+func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
+ return f(tw, prefix, format)
+}
+
+func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+ format := tar.FormatUnknown
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case tar.Format:
+ format = v
+ default:
+ panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
+ }
+ }
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, ent := range ents {
+ if err := ent.appendTar(tw, prefix, format); err != nil {
+ t.Fatalf("building input tar: %v", err)
+ }
+ }
+ if err := tw.Close(); err != nil {
+ t.Errorf("closing write of input tar: %v", err)
+ }
+ data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+}
+
+func dir(name string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var o owner
+ mode := os.FileMode(0755)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if !strings.HasSuffix(name, "/") {
+ panic(fmt.Sprintf("missing trailing slash in dir %q ", name))
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeDir,
+ Name: prefix + name,
+ Mode: tm,
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ })
+ })
+}
+
+// xAttr are extended attributes to set on test files created with the file func.
+type xAttr map[string]string
+
+// owner is owner ot set on test files and directories with the file and dir functions.
+type owner struct {
+ uid int
+ gid int
+}
+
+func file(name, contents string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var xattrs xAttr
+ var o owner
+ mode := os.FileMode(0644)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case xAttr:
+ xattrs = v
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if strings.HasSuffix(name, "/") {
+ return fmt.Errorf("bogus trailing slash in file %q", name)
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ if len(xattrs) > 0 {
+ format = tar.FormatPAX // only PAX supports xattrs
+ }
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Mode: tm,
+ Xattrs: xattrs,
+ Size: int64(len(contents)),
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ _, err = io.WriteString(tw, contents)
+ return err
+ })
+}
+
+func symlink(name, target string) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeSymlink,
+ Name: prefix + name,
+ Linkname: target,
+ Mode: 0644,
+ Format: format,
+ })
+ })
+}
+
+func link(name string, linkname string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeLink,
+ Name: prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func chardev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeChar,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func blockdev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeBlock,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+func fifo(name string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeFifo,
+ Name: prefix + name,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func prefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func noPrefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
+ if digestMap == nil {
+ t.Fatalf("digest map mustn't be nil")
+ }
+ content := []byte(contentStr)
+
+ var n int64
+ for n < int64(len(content)) {
+ size := int64(chunkSize)
+ remain := int64(len(content)) - n
+ if remain < size {
+ size = remain
+ }
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil {
+ t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)",
+ string(content[n:n+size]), name, n, size)
+ }
+ digestMap[chunkID(name, n, size)] = dgstr.Digest()
+ n += size
+ }
+
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Size: int64(len(content)),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+func randomContents(n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = runes[rand.Intn(len(runes))]
+ }
+ return string(b)
+}
+
+func fileModeToTarMode(mode os.FileMode) (int64, error) {
+ h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
+ if err != nil {
+ return 0, err
+ }
+ return h.Mode, nil
+}
+
+// fileInfoOnlyMode is os.FileMode that populates only file mode.
+type fileInfoOnlyMode os.FileMode
+
+func (f fileInfoOnlyMode) Name() string { return "" }
+func (f fileInfoOnlyMode) Size() int64 { return 0 }
+func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
+func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
+func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
+func (f fileInfoOnlyMode) Sys() interface{} { return nil }
+
+func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
+ if len(streams) == 0 {
+ return // nop
+ }
+
+ wants := map[int64]struct{}{}
+ for _, s := range streams {
+ wants[s] = struct{}{}
+ }
+
+ len0 := len(b)
+ br := bytes.NewReader(b)
+ zr := new(gzip.Reader)
+ t.Logf("got gzip streams:")
+ numStreams := 0
+ for {
+ zoff := len0 - br.Len()
+ if err := zr.Reset(br); err != nil {
+ if err == io.EOF {
+ return
+ }
+ t.Fatalf("countStreams(gzip), Reset: %v", err)
+ }
+ zr.Multistream(false)
+ n, err := io.Copy(io.Discard, zr)
+ if err != nil {
+ t.Fatalf("countStreams(gzip), Copy: %v", err)
+ }
+ var extra string
+ if len(zr.Header.Extra) > 0 {
+ extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
+ }
+ t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
+ delete(wants, int64(zoff))
+ numStreams++
+ }
+}
+
+func GzipDiffIDOf(t *testing.T, b []byte) string {
+ h := sha256.New()
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("diffIDOf(gzip): %v", err)
+ }
+ defer zr.Close()
+ if _, err := io.Copy(h, zr); err != nil {
+ t.Fatalf("diffIDOf(gzip).Copy: %v", err)
+ }
+ return fmt.Sprintf("sha256:%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
new file mode 100644
index 0000000..57e0aa6
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -0,0 +1,342 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // TOCTarName is the name of the JSON file in the tar archive in the
+ // table of contents gzip stream.
+ TOCTarName = "stargz.index.json"
+
+ // FooterSize is the number of bytes in the footer
+ //
+ // The footer is an empty gzip stream with no compression and an Extra
+ // header of the form "%016xSTARGZ", where the 64 bit hex-encoded
+ // number is the offset to the gzip stream of JSON TOC.
+ //
+ // 51 comes from:
+ //
+ // 10 bytes gzip header
+ // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
+ // 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+ // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
+ // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
+ // 5 bytes flate header
+ // 8 bytes gzip footer
+ // (End of the eStargz blob)
+ //
+ // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
+ FooterSize = 51
+
+ // legacyFooterSize is the number of bytes in the legacy stargz footer.
+ //
+ // 47 comes from:
+ //
+ // 10 byte gzip header +
+ // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
+ // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
+ // 5 byte flate header
+ // 8 byte gzip footer (two little endian uint32s: digest, size)
+ legacyFooterSize = 47
+
+ // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
+ // digest of the TOC JSON.
+ // This annotation is valid only when it is specified in `.[]layers.annotations`
+ // of an image manifest.
+ TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
+
+ // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
+ // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
+ // to the runtime but current OCI image doesn't ship this information by default. So we store this
+ // to the special annotation.
+ StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
+
+ // PrefetchLandmark is a file entry which indicates the end position of
+ // prefetch in the stargz file.
+ PrefetchLandmark = ".prefetch.landmark"
+
+ // NoPrefetchLandmark is a file entry which indicates that no prefetch should
+ // occur in the stargz file.
+ NoPrefetchLandmark = ".no.prefetch.landmark"
+
+ landmarkContents = 0xf
+)
+
+// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
+type JTOC struct {
+ Version int `json:"version"`
+ Entries []*TOCEntry `json:"entries"`
+}
+
+// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
+type TOCEntry struct {
+ // Name is the tar entry's name. It is the complete path
+ // stored in the tar file, not just the base name.
+ Name string `json:"name"`
+
+ // Type is one of "dir", "reg", "symlink", "hardlink", "char",
+ // "block", "fifo", or "chunk".
+ // The "chunk" type is used for regular file data chunks past the first
+ // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
+ // ChunkOffset, and ChunkSize populated.
+ Type string `json:"type"`
+
+ // Size, for regular files, is the logical size of the file.
+ Size int64 `json:"size,omitempty"`
+
+ // ModTime3339 is the modification time of the tar entry. Empty
+ // means zero or unknown. Otherwise it's in UTC RFC3339
+ // format. Use the ModTime method to access the time.Time value.
+ ModTime3339 string `json:"modtime,omitempty"`
+ modTime time.Time
+
+ // LinkName, for symlinks and hardlinks, is the link target.
+ LinkName string `json:"linkName,omitempty"`
+
+ // Mode is the permission and mode bits.
+ Mode int64 `json:"mode,omitempty"`
+
+ // UID is the user ID of the owner.
+ UID int `json:"uid,omitempty"`
+
+ // GID is the group ID of the owner.
+ GID int `json:"gid,omitempty"`
+
+ // Uname is the username of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same UID.
+ Uname string `json:"userName,omitempty"`
+
+ // Gname is the group name of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same GID.
+ Gname string `json:"groupName,omitempty"`
+
+ // Offset, for regular files, provides the offset in the
+ // stargz file to the file's data bytes. See ChunkOffset and
+ // ChunkSize.
+ Offset int64 `json:"offset,omitempty"`
+
+ // InnerOffset is an optional field indicates uncompressed offset
+ // of this "reg" or "chunk" payload in a stream starts from Offset.
+ // This field enables to put multiple "reg" or "chunk" payloads
+ // in one chunk with having the same Offset but different InnerOffset.
+ InnerOffset int64 `json:"innerOffset,omitempty"`
+
+ nextOffset int64 // the Offset of the next entry with a non-zero Offset
+
+ // DevMajor is the major device number for "char" and "block" types.
+ DevMajor int `json:"devMajor,omitempty"`
+
+ // DevMinor is the major device number for "char" and "block" types.
+ DevMinor int `json:"devMinor,omitempty"`
+
+ // NumLink is the number of entry names pointing to this entry.
+ // Zero means one name references this entry.
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
+
+ // Xattrs are the extended attribute for the entry.
+ Xattrs map[string][]byte `json:"xattrs,omitempty"`
+
+ // Digest stores the OCI checksum for regular files payload.
+ // It has the form "sha256:abcdef01234....".
+ Digest string `json:"digest,omitempty"`
+
+ // ChunkOffset is non-zero if this is a chunk of a large,
+ // regular file. If so, the Offset is where the gzip header of
+ // ChunkSize bytes at ChunkOffset in Name begin.
+ //
+ // In serialized form, a "chunkSize" JSON field of zero means
+ // that the chunk goes to the end of the file. After reading
+ // from the stargz TOC, though, the ChunkSize is initialized
+ // to a non-zero file for when Type is either "reg" or
+ // "chunk".
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+
+ // ChunkDigest stores an OCI digest of the chunk. This must be formed
+ // as "sha256:0123abcd...".
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+
+ children map[string]*TOCEntry
+
+ // chunkTopIndex is index of the entry where Offset starts in the blob.
+ chunkTopIndex int
+}
+
+// ModTime returns the entry's modification time.
+func (e *TOCEntry) ModTime() time.Time { return e.modTime }
+
+// NextOffset returns the position (relative to the start of the
+// stargz file) of the next gzip boundary after e.Offset.
+func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
+
+func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
+ if e.children == nil {
+ e.children = make(map[string]*TOCEntry)
+ }
+ if child.Type == "dir" {
+ e.NumLink++ // Entry ".." in the subdirectory links to this directory
+ }
+ e.children[baseName] = child
+}
+
+// isDataType reports whether TOCEntry is a regular file or chunk (something that
+// contains regular file data).
+func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
+
+// Stat returns a FileInfo value representing e.
+func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
+
+// ForeachChild calls f for each child item. If f returns false, iteration ends.
+// If e is not a directory, f is not called.
+func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
+ for name, ent := range e.children {
+ if !f(name, ent) {
+ return
+ }
+ }
+}
+
+// LookupChild returns the directory e's child by its base name.
+func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
+ child, ok = e.children[baseName]
+ return
+}
+
+// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
+type fileInfo struct{ e *TOCEntry }
+
+var _ os.FileInfo = fileInfo{}
+
+func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
+func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
+func (fi fileInfo) Size() int64 { return fi.e.Size }
+func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
+func (fi fileInfo) Sys() interface{} { return fi.e }
+func (fi fileInfo) Mode() (m os.FileMode) {
+ // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
+ m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
+ (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
+ switch fi.e.Type {
+ case "dir":
+ m |= os.ModeDir
+ case "symlink":
+ m |= os.ModeSymlink
+ case "char":
+ m |= os.ModeDevice | os.ModeCharDevice
+ case "block":
+ m |= os.ModeDevice
+ case "fifo":
+ m |= os.ModeNamedPipe
+ }
+ return m
+}
+
+// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
+// in a eStargz blob.
+type TOCEntryVerifier interface {
+
+ // Verifier provides a content verifier that can be used for verifying the
+ // contents of the specified TOCEntry.
+ Verifier(ce *TOCEntry) (digest.Verifier, error)
+}
+
+// Compression provides the compression helper to be used creating and parsing eStargz.
+// This package provides gzip-based Compression by default, but any compression
+// algorithm (e.g. zstd) can be used as long as it implements Compression.
+type Compression interface {
+ Compressor
+ Decompressor
+}
+
+// Compressor represents the helper mothods to be used for creating eStargz.
+type Compressor interface {
+ // Writer returns WriteCloser to be used for writing a chunk to eStargz.
+ // Everytime a chunk is written, the WriteCloser is closed and Writer is
+ // called again for writing the next chunk.
+ //
+ // The returned writer should implement "Flush() error" function that flushes
+ // any pending compressed data to the underlying writer.
+ Writer(w io.Writer) (WriteFlushCloser, error)
+
+ // WriteTOCAndFooter is called to write JTOC to the passed Writer.
+ // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
+ // WriteTOCAndFooter can optionally write anything that affects DiffID calculation
+ // (e.g. uncompressed TOC JSON).
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob when it's parsed.
+ WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
+}
+
+// Decompressor represents the helper mothods to be used for parsing eStargz.
+type Decompressor interface {
+ // Reader returns ReadCloser to be used for decompressing file payload.
+ Reader(r io.Reader) (io.ReadCloser, error)
+
+ // FooterSize returns the size of the footer of this blob.
+ FooterSize() int64
+
+ // ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
+ // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
+ // the top until the TOC JSON).
+ //
+ // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
+ // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
+ //
+ // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
+ // footer (blob size - tocOff - FooterSize).
+ // If blobPayloadSize < 0, blobPayloadSize become the blob size.
+ ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
+
+ // ParseTOC parses TOC from the passed reader. The reader provides the partial contents
+ // of the underlying blob that has the range specified by ParseFooter method.
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob. This must match to the value returned from
+ // Compressor.WriteTOCAndFooter that is used when creating this blob.
+ //
+ // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
+ // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
+ // and return it.
+ ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
+}
+
+type WriteFlushCloser interface {
+ io.WriteCloser
+ Flush() error
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS
new file mode 100644
index 0000000..ad1abd4
--- /dev/null
+++ b/vendor/github.com/docker/cli/AUTHORS
@@ -0,0 +1,910 @@
+# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT.
+# This file lists all contributors to the repository.
+# See scripts/docs/generate-authors.sh to make modifications.
+
+A. Lester Buck III
+Aanand Prasad
+Aaron L. Xu
+Aaron Lehmann
+Aaron.L.Xu
+Abdur Rehman
+Abhinandan Prativadi
+Abin Shahab
+Abreto FU
+Ace Tang
+Addam Hardy
+Adolfo Ochagavía
+Adrian Plata
+Adrien Duermael
+Adrien Folie
+Adyanth Hosavalike
+Ahmet Alp Balkan
+Aidan Feldman
+Aidan Hobson Sayers
+AJ Bowen
+Akhil Mohan
+Akihiro Suda
+Akim Demaille
+Alan Thompson
+Alano Terblanche
+Albert Callarisa
+Alberto Roura
+Albin Kerouanton
+Aleksa Sarai
+Aleksander Piotrowski
+Alessandro Boch
+Alex Couture-Beil
+Alex Mavrogiannis
+Alex Mayer
+Alexander Boyd
+Alexander Chneerov
+Alexander Larsson
+Alexander Morozov
+Alexander Ryabov
+Alexandre González
+Alexey Igrychev
+Alexis Couvreur
+Alfred Landrum
+Ali Rostami
+Alicia Lauerman
+Allen Sun
+Alvin Deng
+Amen Belayneh
+Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
+Amir Goldstein
+Amit Krishnan
+Amit Shukla
+Amy Lindburg
+Anca Iordache
+Anda Xu
+Andrea Luzzardi
+Andreas Köhler
+Andres G. Aragoneses
+Andres Leon Rangel
+Andrew France
+Andrew Hsu
+Andrew Macpherson
+Andrew McDonnell
+Andrew Po
+Andrew-Zipperer
+Andrey Petrov
+Andrii Berehuliak
+André Martins
+Andy Goldstein
+Andy Rothfusz
+Anil Madhavapeddy
+Ankush Agarwal
+Anne Henmi
+Anton Polonskiy
+Antonio Murdaca
+Antonis Kalipetis
+Anusha Ragunathan
+Ao Li
+Arash Deshmeh
+Arko Dasgupta
+Arnaud Porterie
+Arnaud Rebillout
+Arthur Peka
+Ashly Mathew
+Ashwini Oruganti
+Aslam Ahemad
+Azat Khuyiyakhmetov
+Bardia Keyoumarsi
+Barnaby Gray
+Bastiaan Bakker
+BastianHofmann
+Ben Bodenmiller
+Ben Bonnefoy
+Ben Creasy
+Ben Firshman
+Benjamin Boudreau
+Benjamin Böhmke
+Benjamin Nater
+Benoit Sigoure
+Bhumika Bayani
+Bill Wang
+Bin Liu
+Bingshen Wang
+Bishal Das
+Bjorn Neergaard
+Boaz Shuster
+Boban Acimovic
+Bogdan Anton
+Boris Pruessmann
+Brad Baker
+Bradley Cicenas
+Brandon Mitchell
+Brandon Philips
+Brent Salisbury
+Bret Fisher
+Brian (bex) Exelbierd
+Brian Goff
+Brian Tracy
+Brian Wieder
+Bruno Sousa
+Bryan Bess
+Bryan Boreham
+Bryan Murphy
+bryfry
+Calvin Liu
+Cameron Spear
+Cao Weiwei
+Carlo Mion
+Carlos Alexandro Becker
+Carlos de Paula
+Casey Korver
+Ce Gao
+Cedric Davies
+Cezar Sa Espinola
+Chad Faragher
+Chao Wang
+Charles Chan
+Charles Law
+Charles Smith
+Charlie Drage
+Charlotte Mach
+ChaYoung You
+Chee Hau Lim
+Chen Chuanliang
+Chen Hanxiao
+Chen Mingjie
+Chen Qiu
+Chris Chinchilla
+Chris Couzens
+Chris Gavin
+Chris Gibson
+Chris McKinnel
+Chris Snow
+Chris Vermilion
+Chris Weyl
+Christian Persson
+Christian Stefanescu
+Christophe Robin
+Christophe Vidal
+Christopher Biscardi
+Christopher Crone
+Christopher Jones
+Christopher Petito <47751006+krissetto@users.noreply.github.com>
+Christopher Petito
+Christopher Svensson
+Christy Norman
+Chun Chen
+Clinton Kitson
+Coenraad Loubser
+Colin Hebert
+Collin Guarino
+Colm Hally
+Comical Derskeal <27731088+derskeal@users.noreply.github.com>
+Conner Crosby
+Corey Farrell
+Corey Quon
+Cory Bennet
+Cory Snider
+Craig Osterhout
+Craig Wilhite
+Cristian Staretu
+Daehyeok Mun
+Dafydd Crosby
+Daisuke Ito
+dalanlan
+Damien Nadé
+Dan Cotora
+Danial Gharib
+Daniel Artine
+Daniel Cassidy
+Daniel Dao
+Daniel Farrell
+Daniel Gasienica
+Daniel Goosen
+Daniel Helfand
+Daniel Hiltgen
+Daniel J Walsh
+Daniel Nephin
+Daniel Norberg
+Daniel Watkins
+Daniel Zhang
+Daniil Nikolenko
+Danny Berger
+Darren Shepherd
+Darren Stahl
+Dattatraya Kumbhar
+Dave Goodchild
+Dave Henderson
+Dave Tucker
+David Alvarez
+David Beitey
+David Calavera
+David Cramer
+David Dooling
+David Gageot
+David Karlsson
+David le Blanc
+David Lechner
+David Scott
+David Sheets
+David Williamson
+David Xia
+David Young
+Deng Guangxing
+Denis Defreyne
+Denis Gladkikh
+Denis Ollier
+Dennis Docter
+dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+Derek McGowan
+Des Preston
+Deshi Xiao
+Dharmit Shah
+Dhawal Yogesh Bhanushali