Skip to content

Commit

Permalink
add service acount and adopt helper changes
Browse files Browse the repository at this point in the history
  • Loading branch information
rmfitzpatrick committed Oct 31, 2023
1 parent 54ce561 commit 5890581
Show file tree
Hide file tree
Showing 6 changed files with 212 additions and 275 deletions.
81 changes: 25 additions & 56 deletions tests/general/discoverymode/k8s_observer_discovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"time"

"github.com/stretchr/testify/require"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand All @@ -49,33 +48,29 @@ func TestK8sObserver(t *testing.T) {
cluster.Create()
cluster.LoadLocalCollectorImageIfNecessary()

namespace, serviceAccount := createNamespaceAndServiceAccount(cluster)

configMap, configMapManifest := configToConfigMapManifest(t, "k8s-otlp-exporter-no-internal-prometheus.yaml", namespace)
sout, serr, err := cluster.Apply(configMapManifest)
tc.Logger.Debug("applying ConfigMap", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)

redisName, redisUID := createRedis(cluster, "target.redis", namespace, serviceAccount)
namespace := manifests.Namespace{Name: "test-namespace"}
serviceAccount := manifests.ServiceAccount{Name: "some.serviceaccount", Namespace: "test-namespace"}
configMap := manifests.ConfigMap{
Name: "collector.config", Namespace: namespace.Name,
Data: configMapData(t, "k8s-otlp-exporter-no-internal-prometheus.yaml"),
}
sout, serr, err := cluster.Apply(manifests.RenderAll(t, namespace, serviceAccount, configMap))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

crManifest, crbManifest := clusterRoleAndBindingManifests(t, namespace, serviceAccount)
sout, serr, err = cluster.Apply(crManifest)
tc.Logger.Debug("applying ClusterRole", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
redisName, redisUID := createRedis(cluster, "target.redis", namespace.Name, serviceAccount.Name)

sout, serr, err = cluster.Apply(crbManifest)
tc.Logger.Debug("applying ClusterRoleBinding", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
clusterRole, clusterRoleBinding := clusterRoleAndBinding(namespace.Name, serviceAccount.Name)
sout, serr, err = cluster.Apply(manifests.RenderAll(t, clusterRole, clusterRoleBinding))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

daemonSet, dsManifest := daemonSetManifest(cluster, namespace, serviceAccount, configMap)
sout, serr, err = cluster.Apply(dsManifest)
tc.Logger.Debug("applying DaemonSet", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
ds := daemonSet(cluster, namespace.Name, serviceAccount.Name, configMap.Name)
sout, serr, err = cluster.Apply(ds.Render(t))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
rPod, err := cluster.Clientset.CoreV1().Pods(namespace).Get(ctx, redisName, metav1.GetOptions{})
rPod, err := cluster.Clientset.CoreV1().Pods(namespace.Name).Get(ctx, redisName, metav1.GetOptions{})
require.NoError(t, err)
tc.Logger.Debug(fmt.Sprintf("redis is: %s\n", rPod.Status.Phase))
return rPod.Status.Phase == corev1.PodRunning
Expand All @@ -85,8 +80,8 @@ func TestK8sObserver(t *testing.T) {
require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", daemonSet),
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", ds.Name),
})
require.NoError(t, err)
if len(dsPods.Items) > 0 {
Expand All @@ -104,7 +99,7 @@ func TestK8sObserver(t *testing.T) {
require.NoError(t, tc.OTLPReceiverSink.AssertAllMetricsReceived(t, *expectedMetrics, 30*time.Second))

stdout, stderr, err := cluster.Kubectl(
"exec", "-n", namespace, collectorPodName, "--", "bash", "-c",
"exec", "-n", namespace.Name, collectorPodName, "--", "bash", "-c",
`SPLUNK_DEBUG_CONFIG_SERVER=false \
SPLUNK_DISCOVERY_EXTENSIONS_host_observer_ENABLED=false \
SPLUNK_DISCOVERY_EXTENSIONS_docker_observer_ENABLED=false \
Expand Down Expand Up @@ -198,28 +193,7 @@ func createRedis(cluster *kubeutils.KindCluster, name, namespace, serviceAccount
return redis.Name, string(redis.UID)
}

func createNamespaceAndServiceAccount(cluster *kubeutils.KindCluster) (string, string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ns, err := cluster.Clientset.CoreV1().Namespaces().Create(
ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}},
metav1.CreateOptions{},
)
require.NoError(cluster.Testcase, err)
namespace := ns.Name

ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
serviceAccount, err := cluster.Clientset.CoreV1().ServiceAccounts(namespace).Create(
ctx, &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: "some.serviceaccount"},
},
metav1.CreateOptions{})
require.NoError(cluster.Testcase, err)
return namespace, serviceAccount.Name
}

func configToConfigMapManifest(t testing.TB, cfg, namespace string) (name, manifest string) {
func configMapData(t testing.TB, cfg string) string {
config, err := os.ReadFile(filepath.Join(".", "testdata", cfg))
require.NoError(t, err)
configStore := map[string]any{"config": string(config)}
Expand All @@ -232,15 +206,10 @@ func configToConfigMapManifest(t testing.TB, cfg, namespace string) (name, manif

configYaml, err := yaml.Marshal(configStore)
require.NoError(t, err)

cm := manifests.ConfigMap{
Name: "collector.config", Namespace: namespace,
Data: string(configYaml),
}
return cm.Name, cm.Render(t)
return string(configYaml)
}

func clusterRoleAndBindingManifests(t testing.TB, namespace, serviceAccount string) (string, string) {
func clusterRoleAndBinding(namespace, serviceAccount string) (manifests.ClusterRole, manifests.ClusterRoleBinding) {
cr := manifests.ClusterRole{
Name: "cluster-role",
Namespace: namespace,
Expand Down Expand Up @@ -314,10 +283,10 @@ func clusterRoleAndBindingManifests(t testing.TB, namespace, serviceAccount stri
ServiceAccountName: serviceAccount,
}

return cr.Render(t), crb.Render(t)
return cr, crb
}

func daemonSetManifest(cluster *kubeutils.KindCluster, namespace, serviceAccount, configMap string) (name, manifest string) {
func daemonSet(cluster *kubeutils.KindCluster, namespace, serviceAccount, configMap string) manifests.DaemonSet {
splat := strings.Split(cluster.Testcase.OTLPEndpoint, ":")
port := splat[len(splat)-1]
var hostFromContainer string
Expand Down Expand Up @@ -398,5 +367,5 @@ func daemonSetManifest(cluster *kubeutils.KindCluster, namespace, serviceAccount
},
},
}
return ds.Name, ds.Render(cluster.Testcase)
return ds
}
114 changes: 39 additions & 75 deletions tests/receivers/discovery/k8s_observer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"time"

"github.com/stretchr/testify/require"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand All @@ -49,11 +48,19 @@ func TestDiscoveryReceiverWithK8sObserverProvidesEndpointLogs(t *testing.T) {
cluster.Create()
cluster.LoadLocalCollectorImageIfNecessary()

namespace, serviceAccount := createNamespaceAndServiceAccount(cluster)
namespace := manifests.Namespace{Name: "test-namespace"}
serviceAccount := manifests.ServiceAccount{Name: "some.serviceacount", Namespace: namespace.Name}
configMap := manifests.ConfigMap{
Name: "collector.config", Namespace: namespace.Name,
Data: configMapData(t, "k8s_observer_endpoints_config.yaml"),
}

sout, serr, err := cluster.Apply(manifests.RenderAll(t, namespace, serviceAccount, configMap))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
_, err := cluster.Clientset.CoreV1().Nodes().Create(ctx, &corev1.Node{
_, err = cluster.Clientset.CoreV1().Nodes().Create(ctx, &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"some.annotation": "annotation.value",
Expand All @@ -70,32 +77,19 @@ func TestDiscoveryReceiverWithK8sObserverProvidesEndpointLogs(t *testing.T) {
}, metav1.CreateOptions{})
require.NoError(t, err)

_ = createRedis(cluster, "some.pod", namespace, serviceAccount)
_ = createRedis(cluster, "some.pod", namespace.Name, serviceAccount.Name)

configMap, configMapManifest := configToConfigMapManifest(t, "k8s_observer_endpoints_config.yaml", namespace)
sout, serr, err := cluster.Apply(configMapManifest)
tc.Logger.Debug("applying ConfigMap", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
clusterRole, clusterRoleBinding := clusterRoleAndBinding(namespace.Name, serviceAccount.Name)
ds := daemonSet(cluster, namespace.Name, serviceAccount.Name, configMap.Name)

crManifest, crbManifest := clusterRoleAndBindingManifests(t, namespace, serviceAccount)
sout, serr, err = cluster.Apply(crManifest)
tc.Logger.Debug("applying ClusterRole", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)

sout, serr, err = cluster.Apply(crbManifest)
tc.Logger.Debug("applying ClusterRoleBinding", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)

daemonSet, dsManifest := daemonSetManifest(cluster, namespace, serviceAccount, configMap)
sout, serr, err = cluster.Apply(dsManifest)
tc.Logger.Debug("applying DaemonSet", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
sout, serr, err = cluster.Apply(manifests.RenderAll(t, clusterRole, clusterRoleBinding, ds))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", daemonSet),
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", ds.Name),
})
require.NoError(t, err)
if len(dsPods.Items) > 0 {
Expand Down Expand Up @@ -123,33 +117,28 @@ func TestDiscoveryReceiverWithK8sObserverAndSmartAgentRedisReceiverProvideStatus
cluster.Create()
cluster.LoadLocalCollectorImageIfNecessary()

namespace, serviceAccount := createNamespaceAndServiceAccount(cluster)
namespace := manifests.Namespace{Name: "test-namespace"}
serviceAccount := manifests.ServiceAccount{Name: "some.serviceacount", Namespace: namespace.Name}
configMap := manifests.ConfigMap{
Name: "collector.config", Namespace: namespace.Name,
Data: configMapData(t, "k8s_observer_smart_agent_redis_config.yaml"),
}

configMap, configMapManifest := configToConfigMapManifest(t, "k8s_observer_smart_agent_redis_config.yaml", namespace)
sout, serr, err := cluster.Apply(configMapManifest)
tc.Logger.Debug("applying ConfigMap", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
sout, serr, err := cluster.Apply(manifests.RenderAll(t, namespace, serviceAccount, configMap))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

redis := createRedis(cluster, "target.redis", namespace, serviceAccount)
redis := createRedis(cluster, "target.redis", namespace.Name, serviceAccount.Name)

crManifest, crbManifest := clusterRoleAndBindingManifests(t, namespace, serviceAccount)
sout, serr, err = cluster.Apply(crManifest)
tc.Logger.Debug("applying ClusterRole", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
clusterRole, clusterRoleBinding := clusterRoleAndBinding(namespace.Name, serviceAccount.Name)
ds := daemonSet(cluster, namespace.Name, serviceAccount.Name, configMap.Name)

sout, serr, err = cluster.Apply(crbManifest)
tc.Logger.Debug("applying ClusterRoleBinding", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)

daemonSet, dsManifest := daemonSetManifest(cluster, namespace, serviceAccount, configMap)
sout, serr, err = cluster.Apply(dsManifest)
tc.Logger.Debug("applying DaemonSet", zap.String("stdout", sout.String()), zap.String("stderr", serr.String()))
require.NoError(t, err)
sout, serr, err = cluster.Apply(manifests.RenderAll(t, clusterRole, clusterRoleBinding, ds))
require.NoError(t, err, "stdout: %s, stderr: %s", sout, serr)

require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
rPod, err := cluster.Clientset.CoreV1().Pods(namespace).Get(ctx, redis, metav1.GetOptions{})
rPod, err := cluster.Clientset.CoreV1().Pods(namespace.Name).Get(ctx, redis, metav1.GetOptions{})
require.NoError(t, err)
tc.Logger.Debug(fmt.Sprintf("redis is: %s\n", rPod.Status.Phase))
return rPod.Status.Phase == corev1.PodRunning
Expand All @@ -158,8 +147,8 @@ func TestDiscoveryReceiverWithK8sObserverAndSmartAgentRedisReceiverProvideStatus
require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", daemonSet),
dsPods, err := cluster.Clientset.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name = %s", ds.Name),
})
require.NoError(t, err)
if len(dsPods.Items) > 0 {
Expand Down Expand Up @@ -208,40 +197,15 @@ func createRedis(cluster *kubeutils.KindCluster, name, namespace, serviceAccount
return redis.Name
}

func createNamespaceAndServiceAccount(cluster *kubeutils.KindCluster) (string, string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ns, err := cluster.Clientset.CoreV1().Namespaces().Create(
ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}},
metav1.CreateOptions{},
)
require.NoError(cluster.Testcase, err)
namespace := ns.Name

ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
serviceAccount, err := cluster.Clientset.CoreV1().ServiceAccounts(namespace).Create(
ctx, &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: "some.serviceaccount"},
},
metav1.CreateOptions{})
require.NoError(cluster.Testcase, err)
return namespace, serviceAccount.Name
}

func configToConfigMapManifest(t testing.TB, configPath, namespace string) (name, manifest string) {
func configMapData(t testing.TB, configPath string) string {
config, err := os.ReadFile(filepath.Join(".", "testdata", configPath))
configStore := map[string]any{"config": string(config)}
configYaml, err := yaml.Marshal(configStore)
require.NoError(t, err)
cm := manifests.ConfigMap{
Name: "collector.config", Namespace: namespace,
Data: string(configYaml),
}
return cm.Name, cm.Render(t)
return string(configYaml)
}

func clusterRoleAndBindingManifests(t testing.TB, namespace, serviceAccount string) (string, string) {
func clusterRoleAndBinding(namespace, serviceAccount string) (manifests.ClusterRole, manifests.ClusterRoleBinding) {
cr := manifests.ClusterRole{
Name: "cluster-role",
Namespace: namespace,
Expand Down Expand Up @@ -314,10 +278,10 @@ func clusterRoleAndBindingManifests(t testing.TB, namespace, serviceAccount stri
ServiceAccountName: serviceAccount,
}

return cr.Render(t), crb.Render(t)
return cr, crb
}

func daemonSetManifest(cluster *kubeutils.KindCluster, namespace, serviceAccount, configMap string) (name, manifest string) {
func daemonSet(cluster *kubeutils.KindCluster, namespace, serviceAccount, configMap string) manifests.DaemonSet {
splat := strings.Split(cluster.Testcase.OTLPEndpoint, ":")
port := splat[len(splat)-1]
var hostFromContainer string
Expand Down Expand Up @@ -368,5 +332,5 @@ func daemonSetManifest(cluster *kubeutils.KindCluster, namespace, serviceAccount
},
},
}
return ds.Name, ds.Render(cluster.Testcase)
return ds
}
Loading

0 comments on commit 5890581

Please sign in to comment.