From 7b588e537b407f0db3e44adfc569fd82237424fb Mon Sep 17 00:00:00 2001 From: drivebyer Date: Mon, 25 Dec 2023 15:17:38 +0800 Subject: [PATCH 01/12] copy e2e test from setup Signed-off-by: drivebyer --- .../nodeport/redis-cluster/chainsaw-test.yaml | 96 +++++++++ .../nodeport/redis-cluster/cluster.yaml | 47 ++++ .../nodeport/redis-cluster/ready-cluster.yaml | 7 + .../nodeport/redis-cluster/ready-pvc.yaml | 181 ++++++++++++++++ .../nodeport/redis-cluster/ready-sts.yaml | 25 +++ .../nodeport/redis-cluster/ready-svc.yaml | 201 ++++++++++++++++++ 6 files changed, 557 insertions(+) create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-cluster.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-pvc.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-sts.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml new file mode 100644 index 000000000..9639fac86 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml @@ -0,0 +1,96 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json + +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: redis-cluster-setup +spec: + steps: + - try: + - apply: + file: cluster.yaml + - assert: + file: ready-cluster.yaml + - assert: + file: ready-sts.yaml + - assert: + file: ready-svc.yaml + - assert: + file: ready-pvc.yaml + + - name: Sleep for five minutes + try: + - sleep: + duration: 5m + + - name: Ping Cluster + try: + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 ping + check: + ($stdout=='PONG'): true + + - name: Try saving a key With Password + try: + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 set foo-0 bar-0 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 set foo-1 bar-1 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 set foo-2 bar-2 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 set foo-3 bar-3 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 set foo-4 bar-4 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 set foo-5 bar-5 + check: + ($stdout=='OK'): true \ No newline at end of file diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml new file mode 100644 index 000000000..ecf147104 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml @@ -0,0 +1,47 @@ +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +spec: + clusterSize: 3 + clusterVersion: v7 + persistenceEnabled: true + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:latest + imagePullPolicy: Always + resources: + requests: + cpu: 101m + memory: 128Mi + limits: + cpu: 101m + memory: 128Mi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.44.0 + imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + # storageClassName: standard + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + nodeConfVolume: true + nodeConfVolumeClaimTemplate: + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-cluster.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-cluster.yaml new file mode 100644 index 000000000..49e754e81 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +status: + readyFollowerReplicas: 3 + readyLeaderReplicas: 3 diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-pvc.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-pvc.yaml new file mode 100644 index 000000000..a89bfa22a --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-pvc.yaml @@ -0,0 +1,181 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-0 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-1 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-0 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-1 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-leader-redis-cluster-v1beta2-leader-0 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-leader-redis-cluster-v1beta2-leader-1 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-leader-redis-cluster-v1beta2-leader-2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-follower-redis-cluster-v1beta2-follower-0 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-follower-redis-cluster-v1beta2-follower-1 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-cluster-v1beta2-follower-redis-cluster-v1beta2-follower-2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-sts.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-sts.yaml new file mode 100644 index 000000000..1053eb784 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-sts.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis-cluster-v1beta2-leader + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + replicas: 3 + readyReplicas: 3 + +--- + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis-cluster-v1beta2-follower + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + replicas: 3 + readyReplicas: 3 diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml new file mode 100644 index 000000000..e8af234a6 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml @@ -0,0 +1,201 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader-additional + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-exporter + port: 9121 + protocol: TCP + targetPort: 9121 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader-headless + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + clusterIP: None + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-exporter + port: 9121 + protocol: TCP + targetPort: 9121 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower-additional + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower-headless + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + clusterIP: None + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} \ No newline at end of file From 0bf7a0c75f4fe8851671a425a1fb27c309ddfa59 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 11:07:19 +0800 Subject: [PATCH 02/12] support nodeport cluster Signed-off-by: drivebyer --- controllers/rediscluster_controller.go | 17 ++-- k8sutils/redis-cluster.go | 91 ++++++++++++++++++- k8sutils/services.go | 11 ++- .../nodeport/redis-cluster/chainsaw-test.yaml | 38 +++++++- .../nodeport/redis-cluster/cluster.yaml | 2 + 5 files changed, 143 insertions(+), 16 deletions(-) diff --git a/controllers/rediscluster_controller.go b/controllers/rediscluster_controller.go index 0a501170d..d49230b51 100644 --- a/controllers/rediscluster_controller.go +++ b/controllers/rediscluster_controller.go @@ -116,16 +116,16 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request } } - err = k8sutils.CreateRedisLeader(instance) - if err != nil { - return ctrl.Result{}, err - } if leaderReplicas != 0 { err = k8sutils.CreateRedisLeaderService(instance) if err != nil { return ctrl.Result{}, err } } + err = k8sutils.CreateRedisLeader(instance) + if err != nil { + return ctrl.Result{}, err + } err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "leader", instance.Spec.RedisLeader.PodDisruptionBudget) if err != nil { @@ -149,11 +149,6 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } } - - err = k8sutils.CreateRedisFollower(instance) - if err != nil { - return ctrl.Result{}, err - } // if we have followers create their service. if followerReplicas != 0 { err = k8sutils.CreateRedisFollowerService(instance) @@ -161,6 +156,10 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } } + err = k8sutils.CreateRedisFollower(instance) + if err != nil { + return ctrl.Result{}, err + } err = k8sutils.ReconcileRedisPodDisruptionBudget(instance, "follower", instance.Spec.RedisFollower.PodDisruptionBudget) if err != nil { return ctrl.Result{}, err diff --git a/k8sutils/redis-cluster.go b/k8sutils/redis-cluster.go index 0962d90ef..9373e290f 100644 --- a/k8sutils/redis-cluster.go +++ b/k8sutils/redis-cluster.go @@ -1,6 +1,11 @@ package k8sutils import ( + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/intstr" + commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/OT-CONTAINER-KIT/redis-operator/pkg/util" @@ -94,7 +99,7 @@ func generateRedisClusterInitContainerParams(cr *redisv1beta2.RedisCluster) init } // generateRedisClusterContainerParams generates Redis container information -func generateRedisClusterContainerParams(cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe) containerParameters { +func generateRedisClusterContainerParams(cr *redisv1beta2.RedisCluster, securityContext *corev1.SecurityContext, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe, role string) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -108,6 +113,50 @@ func generateRedisClusterContainerParams(cr *redisv1beta2.RedisCluster, security if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars } + if cr.Spec.KubernetesConfig.Service != nil && cr.Spec.KubernetesConfig.Service.ServiceType == "NodePort" { + envVars := util.Coalesce(containerProp.EnvVars, &[]corev1.EnvVar{}) + *envVars = append(*envVars, corev1.EnvVar{ + Name: "NODEPORT", + Value: "true", + }) + *envVars = append(*envVars, corev1.EnvVar{ + Name: "HOST_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.hostIP", + }, + }, + }) + + type ports struct { + announcePort int + announceBusPort int + } + nps := map[string]ports{} // pod name to ports + replicas := cr.Spec.GetReplicaCounts(role) + for i := 0; i < int(replicas); i++ { + svc, err := getService(cr.Namespace, cr.ObjectMeta.Name+"-"+role+"-"+strconv.Itoa(i)) + if err != nil { + log.Error(err, "Cannot get service for Redis", "Setup.Type", role) + } else { + nps[svc.Name] = ports{ + announcePort: int(svc.Spec.Ports[0].NodePort), + announceBusPort: int(svc.Spec.Ports[1].NodePort), + } + } + } + for name, np := range nps { + *envVars = append(*envVars, corev1.EnvVar{ + Name: "announce_port_" + strings.ReplaceAll(name, "-", "_"), + Value: strconv.Itoa(np.announcePort), + }) + *envVars = append(*envVars, corev1.EnvVar{ + Name: "announce_bus_port_" + strings.ReplaceAll(name, "-", "_"), + Value: strconv.Itoa(np.announceBusPort), + }) + } + containerProp.EnvVars = envVars + } if cr.Spec.Storage != nil { containerProp.AdditionalVolume = cr.Spec.Storage.VolumeMount.Volume containerProp.AdditionalMountPath = cr.Spec.Storage.VolumeMount.MountPath @@ -223,7 +272,7 @@ func (service RedisClusterSTS) CreateRedisClusterSetup(cr *redisv1beta2.RedisClu generateRedisClusterParams(cr, service.getReplicaCount(cr), service.ExternalConfig, service), redisClusterAsOwner(cr), generateRedisClusterInitContainerParams(cr), - generateRedisClusterContainerParams(cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe), + generateRedisClusterContainerParams(cr, service.SecurityContext, service.ReadinessProbe, service.LivenessProbe, service.RedisStateFulType), cr.Spec.Sidecars, ) if err != nil { @@ -268,6 +317,15 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re additionalServiceType := "ClusterIP" if cr.Spec.KubernetesConfig.Service != nil { additionalServiceType = cr.Spec.KubernetesConfig.Service.ServiceType + if additionalServiceType == "NodePort" { + // If NodePort is enabled, we need to create a service for every redis pod. + // Then use --cluster-announce-ip --cluster-announce-port --cluster-announce-bus-port to make cluster. + err = service.createOrUpdateClusterNodePortService(cr) + if err != nil { + logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + return err + } + } } err = CreateOrUpdateService(cr.Namespace, additionalObjectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, additionalServiceType, *cr.Spec.Port) if err != nil { @@ -276,3 +334,32 @@ func (service RedisClusterService) CreateRedisClusterService(cr *redisv1beta2.Re } return nil } + +func (service RedisClusterService) createOrUpdateClusterNodePortService(cr *redisv1beta2.RedisCluster) error { + replicas := cr.Spec.GetReplicaCounts(service.RedisServiceRole) + + for i := 0; i < int(replicas); i++ { + serviceName := cr.ObjectMeta.Name + "-" + service.RedisServiceRole + "-" + strconv.Itoa(i) + logger := serviceLogger(cr.Namespace, serviceName) + labels := getRedisLabels(cr.ObjectMeta.Name+"-"+service.RedisServiceRole, cluster, service.RedisServiceRole, map[string]string{ + "statefulset.kubernetes.io/pod-name": serviceName, + }) + annotations := generateServiceAnots(cr.ObjectMeta, nil, disableMetrics) + objectMetaInfo := generateObjectMetaInformation(serviceName, cr.Namespace, labels, annotations) + busPort := corev1.ServicePort{ + Name: "redis-bus", + Port: int32(*cr.Spec.Port + 10000), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(*cr.Spec.Port + 10000), + }, + } + err := CreateOrUpdateService(cr.Namespace, objectMetaInfo, redisClusterAsOwner(cr), disableMetrics, false, "NodePort", *cr.Spec.Port, busPort) + if err != nil { + logger.Error(err, "Cannot create nodeport service for Redis", "Setup.Type", service.RedisServiceRole) + return err + } + } + return nil +} diff --git a/k8sutils/services.go b/k8sutils/services.go index 30a50027b..593b538b1 100644 --- a/k8sutils/services.go +++ b/k8sutils/services.go @@ -30,7 +30,7 @@ var disableMetrics exporterPortProvider = func() (int, bool) { } // generateServiceDef generates service definition for Redis -func generateServiceDef(serviceMeta metav1.ObjectMeta, epp exporterPortProvider, ownerDef metav1.OwnerReference, headless bool, serviceType string, port int) *corev1.Service { +func generateServiceDef(serviceMeta metav1.ObjectMeta, epp exporterPortProvider, ownerDef metav1.OwnerReference, headless bool, serviceType string, port int, extra ...corev1.ServicePort) *corev1.Service { var PortName string if serviceMeta.Labels["role"] == "sentinel" { PortName = "sentinel-client" @@ -61,6 +61,11 @@ func generateServiceDef(serviceMeta metav1.ObjectMeta, epp exporterPortProvider, redisExporterService := enableMetricsPort(exporterPort) service.Spec.Ports = append(service.Spec.Ports, *redisExporterService) } + if len(extra) > 0 { + service.Spec.Ports = append(service.Spec.Ports, extra...) + service.Spec.PublishNotReadyAddresses = true + } + AddOwnerRefToObject(service, ownerDef) return service } @@ -150,9 +155,9 @@ func serviceLogger(namespace string, name string) logr.Logger { } // CreateOrUpdateService method will create or update Redis service -func CreateOrUpdateService(namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int) error { +func CreateOrUpdateService(namespace string, serviceMeta metav1.ObjectMeta, ownerDef metav1.OwnerReference, epp exporterPortProvider, headless bool, serviceType string, port int, extra ...corev1.ServicePort) error { logger := serviceLogger(namespace, serviceMeta.Name) - serviceDef := generateServiceDef(serviceMeta, epp, ownerDef, headless, serviceType, port) + serviceDef := generateServiceDef(serviceMeta, epp, ownerDef, headless, serviceType, port, extra...) storedService, err := getService(namespace, serviceMeta.Name) if err != nil { if errors.IsNotFound(err) { diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml index 9639fac86..12cf780b6 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml @@ -3,10 +3,11 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: - name: redis-cluster-setup + name: redis-cluster-nodeport spec: steps: - - try: + - name: Setup redis cluster use nodeport + try: - apply: file: cluster.yaml - assert: @@ -56,6 +57,39 @@ spec: check: ($stdout=='PONG'): true + - name: Check Cluster + try: + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check $(hostname -i):6379 + check: + ($stdout=='PONG'): true + - name: Try saving a key With Password try: - script: diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml index ecf147104..bbc01b530 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml @@ -10,6 +10,8 @@ spec: runAsUser: 1000 fsGroup: 1000 kubernetesConfig: + service: + serviceType: NodePort image: quay.io/opstree/redis:latest imagePullPolicy: Always resources: From 61273c232fa02649dc1a5d75de59a3e12c5d1acf Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 14:05:07 +0800 Subject: [PATCH 03/12] setup test check cluster Signed-off-by: drivebyer --- .../setup/redis-cluster/chainsaw-test.yaml | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml index 9639fac86..6b0157772 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml @@ -56,6 +56,40 @@ spec: check: ($stdout=='PONG'): true + - name: Check Cluster + try: + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered.' + check: + ($stdout=='All 16384 slots covered.'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + check: + ($stdout=='All 16384 slots covered.'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + check: + ($stdout=='All 16384 slots covered.'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + check: + ($stdout=='All 16384 slots covered.'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + check: + ($stdout=='All 16384 slots covered.'): true + - script: + content: | + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + check: + ($stdout=='All 16384 slots covered.'): true + + - name: Try saving a key With Password try: - script: From 5a4ef490ac6ef2d6bd09560053330b2ef9fc5cba Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 14:15:28 +0800 Subject: [PATCH 04/12] fix test Signed-off-by: drivebyer --- k8sutils/redis-cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/k8sutils/redis-cluster_test.go b/k8sutils/redis-cluster_test.go index 2e1d1ddb3..e2692951d 100644 --- a/k8sutils/redis-cluster_test.go +++ b/k8sutils/redis-cluster_test.go @@ -427,10 +427,10 @@ func Test_generateRedisClusterContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actualLeaderContainer := generateRedisClusterContainerParams(input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe) + actualLeaderContainer := generateRedisClusterContainerParams(input, input.Spec.RedisLeader.SecurityContext, input.Spec.RedisLeader.ReadinessProbe, input.Spec.RedisLeader.LivenessProbe, "leader") assert.EqualValues(t, expectedLeaderContainer, actualLeaderContainer, "Expected %+v, got %+v", expectedLeaderContainer, actualLeaderContainer) - actualFollowerContainer := generateRedisClusterContainerParams(input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe) + actualFollowerContainer := generateRedisClusterContainerParams(input, input.Spec.RedisFollower.SecurityContext, input.Spec.RedisFollower.ReadinessProbe, input.Spec.RedisFollower.LivenessProbe, "follower") assert.EqualValues(t, expectedFollowerContainer, actualFollowerContainer, "Expected %+v, got %+v", expectedFollowerContainer, actualFollowerContainer) } From cd14d52b18a8b34e8caa336b5ee09902f3d633c7 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 14:26:10 +0800 Subject: [PATCH 05/12] fix ip Signed-off-by: drivebyer --- .../v1beta2/setup/redis-cluster/chainsaw-test.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml index 6b0157772..5fde1c776 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml @@ -60,32 +60,32 @@ spec: try: - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered.' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered.' check: ($stdout=='All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: ($stdout=='All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: ($stdout=='All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: ($stdout=='All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: ($stdout=='All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check $(hostname -i):6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: ($stdout=='All 16384 slots covered.'): true From 49126d255a7ab4e133acc7455fc051f7453cfb94 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 14:39:35 +0800 Subject: [PATCH 06/12] fix check string Signed-off-by: drivebyer --- .../v1beta2/setup/redis-cluster/chainsaw-test.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml index 5fde1c776..f9f8b5130 100644 --- a/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml @@ -62,32 +62,32 @@ spec: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered.' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='All 16384 slots covered.'): true + ($stdout=='[OK] All 16384 slots covered.'): true - name: Try saving a key With Password From 1d7f4bbba7558e388a953f36da827a85ccea5115 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 14:54:01 +0800 Subject: [PATCH 07/12] enable nodeport e2e test Signed-off-by: drivebyer --- .github/workflows/e2e-chainsaw.yml | 1 + .../nodeport/redis-cluster/chainsaw-test.yaml | 24 +++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/e2e-chainsaw.yml b/.github/workflows/e2e-chainsaw.yml index 0bf22ebe2..595ceddb0 100644 --- a/.github/workflows/e2e-chainsaw.yml +++ b/.github/workflows/e2e-chainsaw.yml @@ -20,6 +20,7 @@ jobs: - ./tests/e2e-chainsaw/v1beta2/hostnetwork/ - ./tests/e2e-chainsaw/v1beta2/password/ - ./tests/e2e-chainsaw/v1beta2/ha-setup/ + - ./tests/e2e-chainsaw/v1beta2/nodeport/ steps: - name: Checkout code diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml index 12cf780b6..99d714121 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml @@ -61,34 +61,34 @@ spec: try: - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered.' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check $(hostname -i):6379 + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' check: - ($stdout=='PONG'): true + ($stdout=='[OK] All 16384 slots covered.'): true - name: Try saving a key With Password try: From 7c3c486889ea07c0b4071b755465ea643411d389 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 15:24:30 +0800 Subject: [PATCH 08/12] test: add nodeport service check Signed-off-by: drivebyer --- k8sutils/services.go | 1 - .../nodeport/redis-cluster/ready-svc.yaml | 174 +++++++++++++++++- 2 files changed, 171 insertions(+), 4 deletions(-) diff --git a/k8sutils/services.go b/k8sutils/services.go index 593b538b1..c00e2c0a5 100644 --- a/k8sutils/services.go +++ b/k8sutils/services.go @@ -63,7 +63,6 @@ func generateServiceDef(serviceMeta metav1.ObjectMeta, epp exporterPortProvider, } if len(extra) > 0 { service.Spec.Ports = append(service.Spec.Ports, extra...) - service.Spec.PublishNotReadyAddresses = true } AddOwnerRefToObject(service, ownerDef) diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml index e8af234a6..c56a06bd7 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/ready-svc.yaml @@ -26,7 +26,7 @@ spec: app: redis-cluster-v1beta2-leader redis_setup_type: cluster role: leader - type: ClusterIP + type: NodePort status: loadBalancer: {} --- @@ -163,7 +163,7 @@ spec: app: redis-cluster-v1beta2-follower redis_setup_type: cluster role: follower - type: ClusterIP + type: NodePort status: loadBalancer: {} --- @@ -198,4 +198,172 @@ spec: role: follower type: ClusterIP status: - loadBalancer: {} \ No newline at end of file + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-0 + name: redis-cluster-v1beta2-leader-0 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-0 + type: NodePort +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-1 + name: redis-cluster-v1beta2-leader-1 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-1 + type: NodePort +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-2 + name: redis-cluster-v1beta2-leader-2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-leader-2 + type: NodePort +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-0 + name: redis-cluster-v1beta2-follower-0 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-0 + type: NodePort +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-1 + name: redis-cluster-v1beta2-follower-1 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-1 + type: NodePort +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-2 + name: redis-cluster-v1beta2-follower-2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-bus + port: 16379 + protocol: TCP + targetPort: 16379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + statefulset.kubernetes.io/pod-name: redis-cluster-v1beta2-follower-2 + type: NodePort +status: + loadBalancer: {} From efd9ce6d1b12cbcd42d997217ff158c07d6222e0 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 15:41:58 +0800 Subject: [PATCH 09/12] test: test with redis 6 Signed-off-by: drivebyer --- .../e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml index bbc01b530..2f5490bf6 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml @@ -4,7 +4,7 @@ metadata: name: redis-cluster-v1beta2 spec: clusterSize: 3 - clusterVersion: v7 + clusterVersion: v6 persistenceEnabled: true podSecurityContext: runAsUser: 1000 @@ -12,7 +12,7 @@ spec: kubernetesConfig: service: serviceType: NodePort - image: quay.io/opstree/redis:latest + image: ghcr.io/ksmartdata/ot-redis:vv6.2.5-101 imagePullPolicy: Always resources: requests: From d8de27dc744a034be3bcafb9017ce0fb082daa9a Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 19:03:49 +0800 Subject: [PATCH 10/12] test: change version && add REDIS_MAJOR_VERSION env Signed-off-by: drivebyer --- k8sutils/redis-cluster.go | 1 + k8sutils/statefulset.go | 14 ++++++++++++-- k8sutils/statefulset_test.go | 6 +++++- .../v1beta2/nodeport/redis-cluster/cluster.yaml | 2 +- 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/k8sutils/redis-cluster.go b/k8sutils/redis-cluster.go index 9373e290f..92bd9f64c 100644 --- a/k8sutils/redis-cluster.go +++ b/k8sutils/redis-cluster.go @@ -36,6 +36,7 @@ func generateRedisClusterParams(cr *redisv1beta2.RedisCluster, replicas int32, e res := statefulSetParameters{ Replicas: &replicas, ClusterMode: true, + ClusterVersion: cr.Spec.ClusterVersion, NodeConfVolume: cr.Spec.Storage.NodeConfVolume, NodeSelector: params.NodeSelector, PodSecurityContext: cr.Spec.PodSecurityContext, diff --git a/k8sutils/statefulset.go b/k8sutils/statefulset.go index 750498464..5b216cca2 100644 --- a/k8sutils/statefulset.go +++ b/k8sutils/statefulset.go @@ -32,6 +32,7 @@ const ( type statefulSetParameters struct { Replicas *int32 ClusterMode bool + ClusterVersion *string NodeConfVolume bool NodeSelector map[string]string PodSecurityContext *corev1.PodSecurityContext @@ -240,6 +241,7 @@ func generateStatefulSetsDef(stsMeta metav1.ObjectMeta, params statefulSetParame params.NodeConfVolume, params.EnableMetrics, params.ExternalConfig, + params.ClusterVersion, containerParams.AdditionalMountPath, sidecars, ), @@ -344,7 +346,7 @@ func createPVCTemplate(volumeName string, stsMeta metav1.ObjectMeta, storageSpec } // generateContainerDef generates container definition for Redis -func generateContainerDef(name string, containerParams containerParameters, clusterMode, nodeConfVolume, enableMetrics bool, externalConfig *string, mountpath []corev1.VolumeMount, sidecars []redisv1beta2.Sidecar) []corev1.Container { +func generateContainerDef(name string, containerParams containerParameters, clusterMode, nodeConfVolume, enableMetrics bool, externalConfig, clusterVersion *string, mountpath []corev1.VolumeMount, sidecars []redisv1beta2.Sidecar) []corev1.Container { containerDefinition := []corev1.Container{ { Name: name, @@ -361,6 +363,7 @@ func generateContainerDef(name string, containerParams containerParameters, clus containerParams.ACLConfig, containerParams.EnvVars, containerParams.Port, + clusterVersion, ), ReadinessProbe: getProbeInfo(containerParams.ReadinessProbe), LivenessProbe: getProbeInfo(containerParams.LivenessProbe), @@ -588,12 +591,19 @@ func getProbeInfo(probe *commonapi.Probe) *corev1.Probe { // getEnvironmentVariables returns all the required Environment Variables func getEnvironmentVariables(role string, enabledPassword *bool, secretName *string, secretKey *string, persistenceEnabled *bool, tlsConfig *redisv1beta2.TLSConfig, - aclConfig *redisv1beta2.ACLConfig, envVar *[]corev1.EnvVar, port *int) []corev1.EnvVar { + aclConfig *redisv1beta2.ACLConfig, envVar *[]corev1.EnvVar, port *int, clusterVersion *string) []corev1.EnvVar { envVars := []corev1.EnvVar{ {Name: "SERVER_MODE", Value: role}, {Name: "SETUP_MODE", Value: role}, } + if clusterVersion != nil { + envVars = append(envVars, corev1.EnvVar{ + Name: "REDIS_MAJOR_VERSION", + Value: *clusterVersion, + }) + } + var redisHost string if role == "sentinel" { redisHost = "redis://localhost:" + strconv.Itoa(sentinelPort) diff --git a/k8sutils/statefulset_test.go b/k8sutils/statefulset_test.go index 925c3f1a3..879c3242e 100644 --- a/k8sutils/statefulset_test.go +++ b/k8sutils/statefulset_test.go @@ -230,6 +230,7 @@ func TestGetEnvironmentVariables(t *testing.T) { aclConfig *redisv1beta2.ACLConfig envVar *[]corev1.EnvVar port *int + clusterVersion *string expectedEnvironment []corev1.EnvVar }{ { @@ -257,6 +258,7 @@ func TestGetEnvironmentVariables(t *testing.T) { envVar: &[]corev1.EnvVar{ {Name: "TEST_ENV", Value: "test-value"}, }, + clusterVersion: pointer.String("v6"), expectedEnvironment: []corev1.EnvVar{ {Name: "ACL_MODE", Value: "true"}, {Name: "PERSISTENCE_ENABLED", Value: "true"}, @@ -276,6 +278,7 @@ func TestGetEnvironmentVariables(t *testing.T) { {Name: "SERVER_MODE", Value: "sentinel"}, {Name: "SETUP_MODE", Value: "sentinel"}, {Name: "TEST_ENV", Value: "test-value"}, + {Name: "REDIS_MAJOR_VERSION", Value: "v6"}, }, }, { @@ -289,6 +292,7 @@ func TestGetEnvironmentVariables(t *testing.T) { aclConfig: nil, envVar: nil, port: nil, + clusterVersion: nil, expectedEnvironment: []corev1.EnvVar{ {Name: "REDIS_ADDR", Value: "redis://localhost:6379"}, {Name: "SERVER_MODE", Value: "redis"}, @@ -363,7 +367,7 @@ func TestGetEnvironmentVariables(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actualEnvironment := getEnvironmentVariables(tt.role, tt.enabledPassword, tt.secretName, - tt.secretKey, tt.persistenceEnabled, tt.tlsConfig, tt.aclConfig, tt.envVar, tt.port) + tt.secretKey, tt.persistenceEnabled, tt.tlsConfig, tt.aclConfig, tt.envVar, tt.port, tt.clusterVersion) assert.ElementsMatch(t, tt.expectedEnvironment, actualEnvironment) }) diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml index 2f5490bf6..805d02a3c 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/cluster.yaml @@ -12,7 +12,7 @@ spec: kubernetesConfig: service: serviceType: NodePort - image: ghcr.io/ksmartdata/ot-redis:vv6.2.5-101 + image: quay.io/opstree/redis:v6.2.14 imagePullPolicy: Always resources: requests: From d2346a38c044af79ddd145e0680ee1ab6823490f Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 19:06:09 +0800 Subject: [PATCH 11/12] test: fix check syntax Signed-off-by: drivebyer --- .../nodeport/redis-cluster/chainsaw-test.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml index 99d714121..2ed8bbb82 100644 --- a/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml +++ b/tests/e2e-chainsaw/v1beta2/nodeport/redis-cluster/chainsaw-test.yaml @@ -61,34 +61,34 @@ spec: try: - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered.' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - script: content: | - kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered' + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli --cluster check 127.0.0.1:6379 check: - ($stdout=='[OK] All 16384 slots covered.'): true + (contains($stdout, '[OK] All 16384 slots covered.')): true - name: Try saving a key With Password try: From b0001cb07b173f2d1af2cca0153e3baf59b96f67 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 27 Dec 2023 20:24:25 +0800 Subject: [PATCH 12/12] test: fix env Signed-off-by: drivebyer --- .../v1beta2/hostnetwork/redis-cluster/ready-pod.yaml | 12 ++++++++++++ .../v1beta2/hostnetwork/redis-cluster/ready-pod.yaml | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/tests/e2e-chainsaw/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml b/tests/e2e-chainsaw/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml index e789ed06f..b26382e6d 100644 --- a/tests/e2e-chainsaw/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml +++ b/tests/e2e-chainsaw/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml @@ -13,6 +13,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -39,6 +41,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -65,6 +69,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -91,6 +97,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -117,6 +125,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -143,6 +153,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE diff --git a/tests/e2e/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml b/tests/e2e/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml index e789ed06f..b26382e6d 100644 --- a/tests/e2e/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml +++ b/tests/e2e/v1beta2/hostnetwork/redis-cluster/ready-pod.yaml @@ -13,6 +13,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -39,6 +41,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -65,6 +69,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -91,6 +97,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -117,6 +125,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE @@ -143,6 +153,8 @@ spec: value: "true" - name: REDIS_ADDR value: "redis://localhost:6379" + - name: REDIS_MAJOR_VERSION + value: "v6" - name: REDIS_PORT value: "6380" - name: SERVER_MODE