diff --git a/go.mod b/go.mod index 81c3f4c4e..c2dc64192 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace go.etcd.io/etcd/client/v3 v3.5.16 golang.org/x/crypto v0.27.0 golang.org/x/term v0.24.0 @@ -36,6 +36,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.16.1 k8c.io/machine-controller v1.60.0 + k8c.io/reconciler v0.5.0 k8s.io/api v0.31.1 k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 @@ -99,7 +100,7 @@ require ( github.com/google/btree v1.0.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.0 // indirect diff --git a/go.sum b/go.sum index 35417bc3b..7ae9c8e07 100644 --- a/go.sum +++ b/go.sum @@ -194,8 +194,8 @@ github.com/google/go-github/v65 v65.0.0/go.mod h1:DvrqWo5hvsdhJvHd4WyVF9ttANN3Bn github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea h1:VcIYpAGBae3Z6BVncE0OnTE/ZjlDXqtYhOZky88neLM= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -376,8 +376,9 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= @@ -565,6 +566,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8c.io/machine-controller v1.60.0 h1:0ShjXyAnv0hpo59UsV9VFjEfgyG/2XrljBaEUV6JzwM= k8c.io/machine-controller v1.60.0/go.mod h1:j9SHRLpzFj5wOMlhdPJL+ub08P8rvVvQOFtg7JaLYb4= +k8c.io/reconciler v0.5.0 h1:BHpelg1UfI/7oBFctqOq8sX6qzflXpl3SlvHe7e8wak= +k8c.io/reconciler v0.5.0/go.mod h1:pT1+SVcVXJQeBJhpJBXQ5XW64QnKKeYTnVlQf0dGE0k= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= diff --git a/pkg/clientutil/service.go b/pkg/clientutil/service.go new file mode 100644 index 000000000..c3ff5c1d6 --- /dev/null +++ b/pkg/clientutil/service.go @@ -0,0 +1,75 @@ +/* +Copyright 2025 The KubeOne Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientutil + +import ( + "context" + "time" + + "github.com/sirupsen/logrus" + + "k8c.io/kubeone/pkg/fail" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func CleanupLBs(ctx context.Context, logger logrus.FieldLogger, c client.Client) error { + serviceList := &corev1.ServiceList{} + if err := c.List(ctx, serviceList); err != nil { + return fail.KubeClient(err, "listing services") + } + + for _, service := range serviceList.Items { + // This service is already in deletion, nothing further needs to happen. + if service.DeletionTimestamp != nil { + continue + } + logger.Infof("Cleaning up LoadBalancer Services...") + // Only LoadBalancer services incur charges on cloud providers + if service.Spec.Type == corev1.ServiceTypeLoadBalancer { + logger.Debugf("Deleting LoadBalancer Service \"%s/%s\"", service.Namespace, service.Name) + if err := DeleteIfExists(ctx, c, &service); err != nil { + return err + } + } + } + + return nil +} + +func WaitCleanupLbs(ctx context.Context, logger logrus.FieldLogger, c client.Client) error { + logger.Infoln("Waiting for all LoadBalancer Services to get deleted...") + + return wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { + serviceList := &corev1.ServiceList{} + if err := c.List(ctx, serviceList); err != nil { + logger.Errorf("failed to list services, error: %v", err.Error()) + + return false, nil + } + for _, service := range serviceList.Items { + // Only LoadBalancer services incur charges on cloud providers + if service.Spec.Type == corev1.ServiceTypeLoadBalancer { + return false, nil + } + } + + return true, nil + }) +} diff --git a/pkg/clientutil/volumes.go b/pkg/clientutil/volumes.go new file mode 100644 index 000000000..0506cd1fc --- /dev/null +++ b/pkg/clientutil/volumes.go @@ -0,0 +1,167 @@ +/* +Copyright 2025 The KubeOne Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientutil + +import ( + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + + "k8c.io/kubeone/pkg/fail" + "k8c.io/reconciler/pkg/reconciling" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + annotationKeyDescription = "description" + + // AnnDynamicallyProvisioned is added to a PV that is dynamically provisioned by kubernetes + // Because the annotation is defined only at k8s.io/kubernetes, copying the content instead of vendoring + // https://github.com/kubernetes/kubernetes/blob/v1.21.0/pkg/controller/volume/persistentvolume/util/util.go#L65 + AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" +) + +var VolumeResources = []string{"persistentvolumes", "persistentvolumeclaims"} + +func CleanupUnretainedVolumes(ctx context.Context, logger logrus.FieldLogger, c client.Client) error { + // We disable the PV & PVC creation so nothing creates new PV's while we delete them + logger.Infoln("Creating ValidatingWebhookConfiguration to disable future PV & PVC creation...") + if err := disablePVCreation(ctx, c); err != nil { + return fail.KubeClient(err, "failed to disable future PV & PVC creation.") + } + + pvcList, pvList, err := getDynamicallyProvisionedUnretainedPvs(ctx, c) + if err != nil { + return err + } + + // Do not attempt to delete any pods when there are no PVs and PVCs + if (pvcList != nil && pvList != nil) && len(pvcList.Items) == 0 && len(pvList.Items) == 0 { + return nil + } + + // Delete all Pods that use PVs. We must keep the remaining pods, otherwise + // we end up in a deadlock when CSI is used + if err := cleanupPVCUsingPods(ctx, c); err != nil { + return fail.KubeClient(err, "failed to clean up PV using pod from user cluster.") + } + + // Delete PVC's + logger.Infoln("Deleting persistent volume claims...") + for _, pvc := range pvcList.Items { + if pvc.DeletionTimestamp == nil { + identifier := fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name) + logger.Infoln("Deleting PVC...", identifier) + + if err := DeleteIfExists(ctx, c, &pvc); err != nil { + return fail.KubeClient(err, "failed to delete PVC from user cluster.") + } + } + } + + return nil +} + +func disablePVCreation(ctx context.Context, c client.Client) error { + // Prevent re-creation of PVs and PVCs by using an intentionally defunct admissionWebhook + creatorGetters := []reconciling.NamedValidatingWebhookConfigurationReconcilerFactory{ + creationPreventingWebhook("", VolumeResources), + } + if err := reconciling.ReconcileValidatingWebhookConfigurations(ctx, creatorGetters, "", c); err != nil { + return fail.KubeClient(err, "failed to create ValidatingWebhookConfiguration to prevent creation of PVs/PVCs.") + } + + return nil +} + +func cleanupPVCUsingPods(ctx context.Context, c client.Client) error { + podList := &corev1.PodList{} + if err := c.List(ctx, podList); err != nil { + return fail.KubeClient(err, "failed to list Pods from user cluster.") + } + + var pvUsingPods []*corev1.Pod + for idx := range podList.Items { + pod := &podList.Items[idx] + if podUsesPV(pod) { + pvUsingPods = append(pvUsingPods, pod) + } + } + + for _, pod := range pvUsingPods { + if pod.DeletionTimestamp == nil { + if err := DeleteIfExists(ctx, c, pod); err != nil { + return fail.KubeClient(err, "failed to delete Pod.") + } + } + } + + return nil +} + +func podUsesPV(p *corev1.Pod) bool { + for _, volume := range p.Spec.Volumes { + if volume.VolumeSource.PersistentVolumeClaim != nil { + return true + } + } + + return false +} + +func getDynamicallyProvisionedUnretainedPvs(ctx context.Context, c client.Client) (*corev1.PersistentVolumeClaimList, *corev1.PersistentVolumeList, error) { + pvcList := &corev1.PersistentVolumeClaimList{} + if err := c.List(ctx, pvcList); err != nil { + return nil, nil, fail.KubeClient(err, "failed to list PVCs from user cluster.") + } + allPVList := &corev1.PersistentVolumeList{} + if err := c.List(ctx, allPVList); err != nil { + return nil, nil, fail.KubeClient(err, "failed to list PVs from user cluster.") + } + pvList := &corev1.PersistentVolumeList{} + for _, pv := range allPVList.Items { + // Check only dynamically provisioned PVs with delete reclaim policy to verify provisioner has done the cleanup + // this filters out everything else because we leave those be + if pv.Annotations[AnnDynamicallyProvisioned] != "" && pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete { + pvList.Items = append(pvList.Items, pv) + } + } + + return pvcList, pvList, nil +} + +func WaitCleanUpVolumes(ctx context.Context, logger logrus.FieldLogger, c client.Client) error { + logger.Infoln("Waiting for all dynamically provisioned and unretained volumes to get deleted...") + + return wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { + pvcList, pvList, err := getDynamicallyProvisionedUnretainedPvs(ctx, c) + if err != nil { + return false, nil + } + + if (pvcList != nil && pvList != nil) && len(pvcList.Items) == 0 && len(pvList.Items) == 0 { + return true, nil + } + + return false, nil + }) +} diff --git a/pkg/clientutil/webhook.go b/pkg/clientutil/webhook.go new file mode 100644 index 000000000..04ebd85a6 --- /dev/null +++ b/pkg/clientutil/webhook.go @@ -0,0 +1,88 @@ +/* +Copyright 2025 The KubeOne Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientutil + +import ( + "context" + "strings" + + "k8c.io/kubeone/pkg/fail" + "k8c.io/reconciler/pkg/reconciling" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// creationPreventingWebhook returns a ValidatingWebhookConfiguration that is intentionally defunct +// and will prevent all creation requests from succeeding. +func creationPreventingWebhook(apiGroup string, resources []string) reconciling.NamedValidatingWebhookConfigurationReconcilerFactory { + failurePolicy := admissionregistrationv1.Fail + sideEffects := admissionregistrationv1.SideEffectClassNone + + return func() (string, reconciling.ValidatingWebhookConfigurationReconciler) { + return "kubernetes-cluster-cleanup-" + strings.Join(resources, "-"), + func(vwc *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { + if vwc.Annotations == nil { + vwc.Annotations = map[string]string{} + } + vwc.Annotations[annotationKeyDescription] = "This webhook configuration exists to prevent creation of any new stateful resources in a cluster that is currently being terminated" + + // This only gets set when the APIServer supports it, so carry it over + var scope *admissionregistrationv1.ScopeType + if len(vwc.Webhooks) != 1 { + vwc.Webhooks = []admissionregistrationv1.ValidatingWebhook{{}} + } else if len(vwc.Webhooks[0].Rules) > 0 { + scope = vwc.Webhooks[0].Rules[0].Scope + } + // Must be a domain with at least three segments separated by dots + vwc.Webhooks[0].Name = "kubernetes.cluster.cleanup" + vwc.Webhooks[0].ClientConfig = admissionregistrationv1.WebhookClientConfig{ + URL: ptr.To("https://127.0.0.1:1"), + } + vwc.Webhooks[0].Rules = []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{admissionregistrationv1.Create}, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{apiGroup}, + APIVersions: []string{"*"}, + Resources: resources, + Scope: scope, + }, + }, + } + vwc.Webhooks[0].FailurePolicy = &failurePolicy + vwc.Webhooks[0].SideEffects = &sideEffects + vwc.Webhooks[0].AdmissionReviewVersions = []string{"v1"} + + return vwc, nil + } + } +} + +func DeletePreventingWebhook(ctx context.Context, c client.Client, resourceName string) error { + vwc := admissionregistrationv1.ValidatingWebhookConfiguration{} + if err := c.Get(ctx, types.NamespacedName{Name: resourceName}, &vwc); err != nil { + return fail.KubeClient(err, "failed to get ValidatingWebhookConfiguration") + } + if err := DeleteIfExists(ctx, c, &vwc); err != nil { + return err + } + + return nil +} diff --git a/pkg/cmd/reset.go b/pkg/cmd/reset.go index 794083613..51f70668f 100644 --- a/pkg/cmd/reset.go +++ b/pkg/cmd/reset.go @@ -31,9 +31,11 @@ import ( type resetOpts struct { globalOptions - AutoApprove bool `longflag:"auto-approve" shortflag:"y"` - DestroyWorkers bool `longflag:"destroy-workers"` - RemoveBinaries bool `longflag:"remove-binaries"` + AutoApprove bool `longflag:"auto-approve" shortflag:"y"` + DestroyWorkers bool `longflag:"destroy-workers"` + RemoveVolumes bool `longflag:"remove-volumes"` + RemoveLBServices bool `longflag:"remove-lb-services"` + RemoveBinaries bool `longflag:"remove-binaries"` } func (opts *resetOpts) BuildState() (*state.State, error) { @@ -43,6 +45,8 @@ func (opts *resetOpts) BuildState() (*state.State, error) { } s.DestroyWorkers = opts.DestroyWorkers + s.RemoveVolumes = opts.RemoveVolumes + s.RemoveLBServices = opts.RemoveLBServices s.RemoveBinaries = opts.RemoveBinaries return s, nil @@ -93,6 +97,18 @@ func resetCmd(rootFlags *pflag.FlagSet) *cobra.Command { false, "remove kubernetes binaries after resetting the cluster") + cmd.Flags().BoolVar( + &opts.RemoveVolumes, + longFlagName(opts, "RemoveVolumes"), + true, + "remove all dynamically provisioned and unretained volumes before resetting the cluster") + + cmd.Flags().BoolVar( + &opts.RemoveLBServices, + longFlagName(opts, "RemoveLBServices"), + true, + "remove all load balancers services before resetting the cluster") + return cmd } @@ -103,27 +119,40 @@ func runReset(opts *resetOpts) error { return err } - if opts.DestroyWorkers { + if opts.DestroyWorkers || opts.RemoveVolumes || opts.RemoveLBServices { if cErr := kubeconfig.BuildKubernetesClientset(s); cErr != nil { s.Logger.Errorln("Failed to build the Kubernetes clientset.") - s.Logger.Warnln("Unable to list and delete machine-controller managed nodes.") - s.Logger.Warnln("You can skip this phase by using '--destroy-workers=false' flag.") - s.Logger.Warnln("If there are worker nodes in the cluster, you might have to delete them manually.") + if opts.RemoveLBServices { + s.Logger.Warnln("Unable to list and delete load balancers in the cluster.") + s.Logger.Warnln("You can skip this phase by using '--cleanup-load-balancer=false'.") + s.Logger.Warnln("If there are load balancers in the cluster, you might have to delete them manually.") + } + if opts.RemoveVolumes { + s.Logger.Warnln("Unable to list and delete dynamically provisioned and unretained volumes in the cluster.") + s.Logger.Warnln("You can skip this phase by using '--cleanup-volumes=false'.") + s.Logger.Warnln("If there are unretained volumes in the cluster, you might have to delete them manually.") + } + if opts.DestroyWorkers { + s.Logger.Warnln("Unable to list and delete machine-controller managed nodes.") + s.Logger.Warnln("You can skip this phase by using '--destroy-workers=false' flag.") + s.Logger.Warnln("If there are worker nodes in the cluster, you might have to delete them manually.") + } return cErr } } - s.Logger.Warnln("This command will PERMANENTLY destroy the Kubernetes cluster running on the following nodes:") - - for _, node := range s.Cluster.ControlPlane.Hosts { - fmt.Printf("\t- reset control plane node %q (%s)\n", node.Hostname, node.PrivateAddress) + if opts.RemoveLBServices { + s.Logger.Warnln("remove-lb-services command will PERMANENTLY delete the load balancers from the cluster.") } - for _, node := range s.Cluster.StaticWorkers.Hosts { - fmt.Printf("\t- reset static worker nodes %q (%s)\n", node.Hostname, node.PrivateAddress) + + if opts.RemoveVolumes { + s.Logger.Warnln("remove-volumes command will PERMANENTLY delete the unretained volumes from the cluster.") } if opts.DestroyWorkers { + s.Logger.Warnln("destroy-workers command will PERMANENTLY destroy the Kubernetes cluster running on the following nodes:") + // Gather information about machine-controller managed nodes machines := clusterv1alpha1.MachineList{} if err = s.DynamicClient.List(s.Context, &machines); err != nil { @@ -142,6 +171,13 @@ func runReset(opts *resetOpts) error { s.Logger.Warnln("If there are worker nodes in the cluster, you might have to delete them manually.") } + for _, node := range s.Cluster.ControlPlane.Hosts { + fmt.Printf("\t- reset control plane node %q (%s)\n", node.Hostname, node.PrivateAddress) + } + for _, node := range s.Cluster.StaticWorkers.Hosts { + fmt.Printf("\t- reset static worker nodes %q (%s)\n", node.Hostname, node.PrivateAddress) + } + fmt.Printf("\nAfter the command is complete, there's NO way to recover the cluster or its data!\n") confirm, err := confirmCommand(opts.AutoApprove) diff --git a/pkg/state/context.go b/pkg/state/context.go index b92e2de92..f85290ec9 100644 --- a/pkg/state/context.go +++ b/pkg/state/context.go @@ -107,6 +107,8 @@ type State struct { Verbose bool BackupFile string DestroyWorkers bool + RemoveVolumes bool + RemoveLBServices bool RemoveBinaries bool ForceUpgrade bool ForceInstall bool diff --git a/pkg/tasks/reset.go b/pkg/tasks/reset.go index f696f2f9f..a83c4c631 100644 --- a/pkg/tasks/reset.go +++ b/pkg/tasks/reset.go @@ -17,6 +17,8 @@ limitations under the License. package tasks import ( + "strings" + kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone" "k8c.io/kubeone/pkg/clientutil" "k8c.io/kubeone/pkg/executor" @@ -29,6 +31,59 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) +// RemoveLBServices Deletes all the load balancers services from the cluster. +func RemoveLBServices(s *state.State) error { + if !s.RemoveLBServices { + return nil + } + var lastErr error + s.Logger.Infoln("Deleting LoadBalancer Services...") + lastErr = clientutil.CleanupLBs(s.Context, s.Logger, s.DynamicClient) + if lastErr != nil { + s.Logger.Warn("Unable to delete services of type load balancer.") + + return lastErr + } + + lastErr = clientutil.WaitCleanupLbs(s.Context, s.Logger, s.DynamicClient) + if lastErr != nil { + s.Logger.Warn("Waiting for all load balancer services to be deleted...") + + return lastErr + } + + return lastErr +} + +// RemoveVolumes Deletes all the dynamically provisioned and unretained volumes from the cluster. +func RemoveVolumes(s *state.State) error { + if !s.RemoveVolumes { + return nil + } + var lastErr error + s.Logger.Infoln("Deleting dynamically provisioned and unretained volumes...") + lastErr = clientutil.CleanupUnretainedVolumes(s.Context, s.Logger, s.DynamicClient) + if lastErr != nil { + s.Logger.Warn("Unable to delete volumes.") + s.Logger.Infoln("Deleting ValidatingWebhookConfiguration to enable future PV & PVC creation...") + if err := clientutil.DeletePreventingWebhook(s.Context, s.DynamicClient, + "kubernetes-cluster-cleanup-"+strings.Join(clientutil.VolumeResources, "-")); err != nil { + s.Logger.Warn("Unable to delete ValidatingWebhookConfiguration.") + } + + return lastErr + } + + lastErr = clientutil.WaitCleanUpVolumes(s.Context, s.Logger, s.DynamicClient) + if lastErr != nil { + s.Logger.Warn("Waiting for all dynamically provisioned and unretained volumes to be deleted...") + + return lastErr + } + + return lastErr +} + func destroyWorkers(s *state.State) error { if !s.DestroyWorkers { return nil diff --git a/pkg/tasks/tasks.go b/pkg/tasks/tasks.go index d383de98a..45b5561d7 100644 --- a/pkg/tasks/tasks.go +++ b/pkg/tasks/tasks.go @@ -391,6 +391,19 @@ func WithUpgrade(t Tasks, followers ...kubeoneapi.HostConfig) Tasks { func WithReset(t Tasks) Tasks { return t.append(Tasks{ + { + Fn: RemoveLBServices, + Operation: "remove load balancer services", + Predicate: func(s *state.State) bool { + return s.RemoveLBServices + }}, + { + Fn: RemoveVolumes, + Operation: "remove dynamically provisioned and unretained volumes", + Predicate: func(s *state.State) bool { + return s.RemoveVolumes + }, + }, {Fn: destroyWorkers, Operation: "destroying workers"}, {Fn: resetAllNodes, Operation: "resetting all nodes"}, {Fn: removeBinariesAllNodes, Operation: "removing kubernetes binaries from nodes"},