diff --git a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java index 203d10d1d..723f5f04c 100644 --- a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java +++ b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java @@ -76,39 +76,39 @@ public static class Args { private boolean help; @Parameter(required = true, - names = { "--snapshot-name" }, + names = { "--snapshot-name", "--snapshotName" }, description = "The name of the snapshot to migrate") public String snapshotName; @Parameter(required = false, - names = { "--snapshot-local-dir" }, + names = { "--snapshot-local-dir", "--snapshotLocalDir" }, description = ("The absolute path to the directory on local disk where the snapshot exists. " + - "Use this parameter if have a copy of the snapshot disk. Mutually exclusive with " + + "Use this parameter if there is a reachable copy of the snapshot on disk. Mutually exclusive with " + "--s3-local-dir, --s3-repo-uri, and --s3-region.")) public String snapshotLocalDir = null; @Parameter(required = false, - names = { "--s3-local-dir" }, + names = { "--s3-local-dir", "--s3LocalDir" }, description = ("The absolute path to the directory on local disk to download S3 files to. " + "If you supply this, you must also supply --s3-repo-uri and --s3-region. " + "Mutually exclusive with --snapshot-local-dir.")) public String s3LocalDir = null; @Parameter(required = false, - names = {"--s3-repo-uri" }, + names = {"--s3-repo-uri", "--s3RepoUri" }, description = ("The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2. " + "If you supply this, you must also supply --s3-local-dir and --s3-region. " + "Mutually exclusive with --snapshot-local-dir.")) public String s3RepoUri = null; @Parameter(required = false, - names = { "--s3-region" }, + names = { "--s3-region", "--s3Region" }, description = ("The AWS Region the S3 bucket is in, like: us-east-2. If you supply this, you must" + " also supply --s3-local-dir and --s3-repo-uri. Mutually exclusive with --snapshot-local-dir.")) public String s3Region = null; @Parameter(required = true, - names = { "--lucene-dir" }, + names = { "--lucene-dir", "--luceneDir" }, description = "The absolute path to the directory where we'll put the Lucene docs") public String luceneDir; @@ -116,20 +116,20 @@ public static class Args { public ConnectionContext.TargetArgs targetArgs = new ConnectionContext.TargetArgs(); @Parameter(required = false, - names = { "--index-allowlist" }, + names = { "--index-allowlist", "--indexAllowlist" }, description = ("Optional. List of index names to migrate (e.g. 'logs_2024_01, logs_2024_02'). " + "Default: all non-system indices (e.g. those not starting with '.')")) public List indexAllowlist = List.of(); @Parameter(required = false, - names = { "--max-shard-size-bytes" }, + names = { "--max-shard-size-bytes", "--maxShardSizeBytes" }, description = ("Optional. The maximum shard size, in bytes, to allow when " + "performing the document migration. " + "Useful for preventing disk overflow. Default: 80 * 1024 * 1024 * 1024 (80 GB)")) public long maxShardSizeBytes = 80 * 1024 * 1024 * 1024L; @Parameter(required = false, - names = { "--initial-lease-duration" }, + names = { "--initial-lease-duration", "--initialLeaseDuration" }, converter = DurationConverter.class, description = "Optional. The time that the first attempt to migrate a shard's documents should take. " + "If a process takes longer than this the process will terminate, allowing another process to " + @@ -137,32 +137,32 @@ public static class Args { public Duration initialLeaseDuration = Duration.ofMinutes(10); @Parameter(required = false, - names = { "--otel-collector-endpoint" }, + names = { "--otel-collector-endpoint", "--otelCollectorEndpoint" }, arity = 1, description = "Endpoint (host:port) for the OpenTelemetry Collector to which metrics logs should be" + "forwarded. If no value is provided, metrics will not be forwarded.") String otelCollectorEndpoint; @Parameter(required = false, - names = "--documents-per-bulk-request", + names = {"--documents-per-bulk-request", "--documentsPerBulkRequest"}, description = "Optional. The number of documents to be included within each bulk request sent. " + "Default no max (controlled by documents size)") int numDocsPerBulkRequest = Integer.MAX_VALUE; @Parameter(required = false, - names = "--documents-size-per-bulk-request", + names = { "--documents-size-per-bulk-request", "--documentsSizePerBulkRequest" }, description = "Optional. The maximum aggregate document size to be used in bulk requests in bytes. " + "Note does not apply to single document requests. Default 10 MiB") long numBytesPerBulkRequest = 10 * 1024L * 1024L; @Parameter(required = false, - names = "--max-connections", + names = {"--max-connections", "--maxConnections" }, description = "Optional. The maximum number of connections to simultaneously " + "used to communicate to the target, default 10") int maxConnections = 10; @Parameter(required = true, - names = { "--source-version" }, + names = { "--source-version", "--sourceVersion" }, converter = VersionConverter.class, description = ("Version of the source cluster.")) public Version sourceVersion = Version.fromString("ES 7.10"); @@ -239,7 +239,8 @@ public static void validateArgs(Args args) { public static void main(String[] args) throws Exception { // TODO: Add back arg printing after not consuming plaintext password MIGRATIONS-1915 var workerId = ProcessHelpers.getNodeInstanceName(); - log.info("Starting RfsMigrateDocuments with workerId =" + workerId); + System.err.println("Starting program with: " + String.join(" ", args)); + log.info("Starting RfsMigrateDocuments with workerId=" + workerId); Args arguments = new Args(); JCommander jCommander = JCommander.newBuilder().addObject(arguments).build(); diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/ConnectionContext.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/ConnectionContext.java index 59bc0a805..523e0d3c7 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/ConnectionContext.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/ConnectionContext.java @@ -103,28 +103,41 @@ default ConnectionContext toConnectionContext() { @Getter public static class TargetArgs implements IParams { - @Parameter(names = { - "--target-host" }, description = "The target host and port (e.g. http://localhost:9200)", required = true) + @Parameter( + names = {"--target-host", "--targetHost" }, + description = "The target host and port (e.g. http://localhost:9200)", + required = true) public String host; - @Parameter(names = { - "--target-username" }, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) + @Parameter( + names = {"--target-username", "--targetUsername" }, + description = "Optional. The target username; if not provided, will assume no auth on target", + required = false) public String username = null; - @Parameter(names = { - "--target-password" }, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) + @Parameter( + names = {"--target-password", "--targetPassword" }, + description = "Optional. The target password; if not provided, will assume no auth on target", + required = false) public String password = null; - @Parameter(names = { - "--target-aws-region" }, description = "Optional. The target aws region. Required only if sigv4 auth is used", required = false) + @Parameter( + names = {"--target-aws-region", "--targetAwsRegion" }, + description = "Optional. The target aws region. Required only if sigv4 auth is used", + required = false) public String awsRegion = null; - @Parameter(names = { - "--target-aws-service-signing-name" }, description = "Optional. The target aws service signing name, e.g 'es' for Amazon OpenSearch Service and 'aoss' for Amazon OpenSearch Serverless. Required if sigv4 auth is used.", required = false) + @Parameter( + names = {"--target-aws-service-signing-name", "--targetAwsServiceSigningName" }, + description = "Optional. The target aws service signing name, e.g 'es' for " + + "Amazon OpenSearch Service and 'aoss' for Amazon OpenSearch Serverless. " + + "Required if sigv4 auth is used.", + required = false) public String awsServiceSigningName = null; - @Parameter(names = { - "--target-insecure" }, description = "Allow untrusted SSL certificates for target", required = false) + @Parameter( + names = { "--target-insecure", "--targetInsecure" }, + description = "Allow untrusted SSL certificates for target", required = false) public boolean insecure = false; @ParametersDelegate @@ -139,35 +152,50 @@ public boolean isCompressionEnabled() { // Flags that require more testing and validation before recommendations are made @Getter public static class TargetAdvancedArgs { - @Parameter(names = { - "--target-compression" }, description = "**Advanced**. Allow request compression to target", required = false) + @Parameter(names = {"--target-compression", "--targetCompression" }, + description = "**Advanced**. Allow request compression to target", + required = false) public boolean compressionEnabled = false; } @Getter public static class SourceArgs implements IParams { - @Parameter(names = { - "--source-host" }, description = "The source host and port (e.g. http://localhost:9200)", required = false) + @Parameter( + names = {"--source-host", "--sourceHost" }, + description = "The source host and port (e.g. http://localhost:9200)", + required = false) public String host = null; - @Parameter(names = { - "--source-username" }, description = "The source username; if not provided, will assume no auth on source", required = false) + @Parameter( + names = {"--source-username", "--sourceUsername" }, + description = "The source username; if not provided, will assume no auth on source", + required = false) public String username = null; - @Parameter(names = { - "--source-password" }, description = "The source password; if not provided, will assume no auth on source", required = false) + @Parameter( + names = {"--source-password", "--sourcePassword" }, + description = "The source password; if not provided, will assume no auth on source", + required = false) public String password = null; - @Parameter(names = { - "--source-aws-region" }, description = "Optional. The source aws region, e.g. 'us-east-1'. Required if sigv4 auth is used", required = false) + @Parameter( + names = {"--source-aws-region", "--sourceAwsRegion" }, + description = "Optional. The source aws region, e.g. 'us-east-1'. Required if sigv4 auth is used", + required = false) public String awsRegion = null; - @Parameter(names = { - "--source-aws-service-signing-name" }, description = "Optional. The source aws service signing name, e.g 'es' for Amazon OpenSearch Service and 'aoss' for Amazon OpenSearch Serverless. Required if sigv4 auth is used.", required = false) + @Parameter( + names = {"--source-aws-service-signing-name", "--sourceAwsServiceSigningName" }, + description = "Optional. The source aws service signing name, e.g 'es' for " + + "Amazon OpenSearch Service and 'aoss' for Amazon OpenSearch Serverless. " + + "Required if sigv4 auth is used.", + required = false) public String awsServiceSigningName = null; - @Parameter(names = { - "--source-insecure" }, description = "Allow untrusted SSL certificates for source", required = false) + @Parameter( + names = {"--source-insecure", "--sourceInsecure" }, + description = "Allow untrusted SSL certificates for source", + required = false) public boolean insecure = false; public boolean isCompressionEnabled() { diff --git a/TrafficCapture/dockerSolution/build.gradle b/TrafficCapture/dockerSolution/build.gradle index c8d9c095a..4c708413b 100644 --- a/TrafficCapture/dockerSolution/build.gradle +++ b/TrafficCapture/dockerSolution/build.gradle @@ -18,6 +18,7 @@ dependencies { def dockerFilesForExternalServices = [ "elasticsearch_searchguard": "elasticsearchWithSearchGuard", "capture_proxy_base": "captureProxyBase", + "k8s_config_map_util_scripts": "k8sConfigMapUtilScripts", "elasticsearch_client_test_console": "elasticsearchTestConsole", "migration_console": "migrationConsole", "otel_collector": "otelCollector", @@ -125,6 +126,7 @@ dockerCompose { } task buildDockerImages { + dependsOn buildDockerImage_k8s_config_map_util_scripts dependsOn buildDockerImage_elasticsearch_searchguard dependsOn buildDockerImage_migration_console dependsOn buildDockerImage_otel_collector diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Dockerfile b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Dockerfile new file mode 100644 index 000000000..104b7c92b --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Dockerfile @@ -0,0 +1,39 @@ +FROM amazonlinux:2023 + +ENV PIP_ROOT_USER_ACTION ignore +ENV LANG C.UTF-8 + +RUN dnf install -y \ + jq \ + less \ + python3.11 \ + python3.11-devel \ + python3.11-pip \ + python3.11-wheel \ + tar \ + unzip \ + vim \ + wget \ + && \ + dnf clean all && \ + rm -rf /var/cache/dnf + +# Define the virtual environment path to use for all pipenv runs +ENV WORKON_HOME=/ +ENV PIPENV_CUSTOM_VENV_NAME=.venv +ENV PIPENV_DEFAULT_PYTHON_VERSION=3.11 +ENV PIPENV_MAX_DEPTH=1 + +RUN python3.11 -m pip install pipenv +WORKDIR / +RUN python3.11 -m venv .venv + +WORKDIR /root +COPY Pipfile . +COPY Pipfile.lock . +RUN pipenv install --deploy + +COPY configmap2yaml/* /root/ +RUN chmod ug+x /root/*.py + +ENTRYPOINT ["tail", "-f", "/dev/null"] diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile new file mode 100644 index 000000000..f7fc5e3a8 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile @@ -0,0 +1,14 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +kubernetes = ">=30.1.0" +pyyaml = ">=6.0.2" +Jinja2 = ">=3.1.4" + +[dev-packages] + +[requires] +python_version = "3.11" diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile.lock b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile.lock new file mode 100644 index 000000000..a9d1630da --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/Pipfile.lock @@ -0,0 +1,396 @@ +{ + "_meta": { + "hash": { + "sha256": "1a8ef45dec4b21bca87e48a7c03f16d6b7c79bf8955bb0e2812d4af2dc5a8f10" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "cachetools": { + "hashes": [ + "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", + "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a" + ], + "markers": "python_version >= '3.7'", + "version": "==5.5.0" + }, + "certifi": { + "hashes": [ + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.8.30" + }, + "charset-normalizer": { + "hashes": [ + "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621", + "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", + "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", + "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", + "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", + "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", + "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", + "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", + "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", + "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", + "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", + "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", + "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab", + "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", + "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", + "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", + "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", + "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", + "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62", + "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", + "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", + "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", + "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", + "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", + "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455", + "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858", + "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", + "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", + "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", + "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", + "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", + "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea", + "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", + "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", + "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", + "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", + "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", + "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", + "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", + "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee", + "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", + "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", + "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51", + "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", + "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8", + "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", + "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613", + "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", + "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", + "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", + "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", + "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", + "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", + "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", + "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", + "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", + "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417", + "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", + "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", + "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", + "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", + "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", + "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149", + "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41", + "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574", + "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", + "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f", + "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", + "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654", + "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", + "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19", + "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", + "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578", + "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", + "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", + "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51", + "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", + "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", + "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", + "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", + "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade", + "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", + "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", + "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6", + "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", + "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", + "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6", + "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2", + "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12", + "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf", + "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", + "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7", + "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", + "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", + "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", + "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", + "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", + "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4", + "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", + "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", + "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", + "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748", + "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", + "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", + "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.4.0" + }, + "durationpy": { + "hashes": [ + "sha256:e65359a7af5cedad07fb77a2dd3f390f8eb0b74cb845589fa6c057086834dd38", + "sha256:fd3feb0a69a0057d582ef643c355c40d2fa1c942191f914d12203b1a01ac722a" + ], + "version": "==0.9" + }, + "google-auth": { + "hashes": [ + "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f", + "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a" + ], + "markers": "python_version >= '3.7'", + "version": "==2.35.0" + }, + "idna": { + "hashes": [ + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" + ], + "markers": "python_version >= '3.6'", + "version": "==3.10" + }, + "jinja2": { + "hashes": [ + "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", + "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.4" + }, + "kubernetes": { + "hashes": [ + "sha256:28945de906c8c259c1ebe62703b56a03b714049372196f854105afe4e6d014c0", + "sha256:bf141e2d380c8520eada8b351f4e319ffee9636328c137aa432bc486ca1200e1" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==31.0.0" + }, + "markupsafe": { + "hashes": [ + "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", + "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", + "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", + "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", + "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", + "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", + "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", + "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", + "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", + "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", + "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", + "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", + "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", + "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", + "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", + "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", + "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", + "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", + "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", + "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", + "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", + "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", + "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", + "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", + "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", + "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", + "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", + "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", + "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", + "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", + "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", + "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", + "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", + "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", + "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", + "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", + "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", + "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", + "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", + "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", + "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", + "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", + "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", + "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", + "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", + "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", + "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", + "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", + "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", + "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", + "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", + "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", + "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", + "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", + "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", + "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", + "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", + "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", + "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", + "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", + "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50" + ], + "markers": "python_version >= '3.9'", + "version": "==3.0.2" + }, + "oauthlib": { + "hashes": [ + "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", + "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918" + ], + "markers": "python_version >= '3.6'", + "version": "==3.2.2" + }, + "pyasn1": { + "hashes": [ + "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", + "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" + ], + "markers": "python_version >= '3.8'", + "version": "==0.6.1" + }, + "pyasn1-modules": { + "hashes": [ + "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", + "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c" + ], + "markers": "python_version >= '3.8'", + "version": "==0.4.1" + }, + "python-dateutil": { + "hashes": [ + "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", + "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==2.9.0.post0" + }, + "pyyaml": { + "hashes": [ + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==6.0.2" + }, + "requests": { + "hashes": [ + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" + ], + "markers": "python_version >= '3.8'", + "version": "==2.32.3" + }, + "requests-oauthlib": { + "hashes": [ + "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", + "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9" + ], + "markers": "python_version >= '3.4'", + "version": "==2.0.0" + }, + "rsa": { + "hashes": [ + "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", + "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21" + ], + "markers": "python_version >= '3.6' and python_version < '4'", + "version": "==4.9" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==1.16.0" + }, + "urllib3": { + "hashes": [ + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.3" + }, + "websocket-client": { + "hashes": [ + "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", + "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da" + ], + "markers": "python_version >= '3.8'", + "version": "==1.8.0" + } + }, + "develop": {} +} diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/config_watcher.py b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/config_watcher.py new file mode 100644 index 000000000..90902ffa0 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/config_watcher.py @@ -0,0 +1,150 @@ +import argparse +from format_services_yaml import YAMLTemplateConverter +from io import StringIO +from kubernetes import client, config, watch +import logging +import os +import signal +import tempfile +from typing import Dict, Any +import sys +import yaml + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class ConfigMapWatcher: + def __init__(self, label_selector: str, namespace: str, output_file: str): + self.label_selector = label_selector + self.namespace = namespace + self.output_file = output_file + self.current_data: Dict[str, Any] = {} + self.formatter = YAMLTemplateConverter() + + # Validate output file path + output_dir = os.path.dirname(output_file) + if not os.path.exists(output_dir): + raise ValueError(f"Output directory does not exist: {output_dir}") + if not os.access(output_dir, os.W_OK): + raise ValueError(f"Output directory is not writable: {output_dir}") + + try: + config.load_incluster_config() + except config.ConfigException: + logger.warning("Unable to load in-cluster config, falling back to local kubeconfig") + config.load_kube_config() + + self.v1 = client.CoreV1Api() + + def update_yaml_file(self) -> None: + """Update the output YAML file with new ConfigMap data""" + try: + # Create a temporary file in the same directory as the target file + output_dir = os.path.dirname(self.output_file) + with tempfile.NamedTemporaryFile(mode='w', dir=output_dir, delete=False) as temp_file: + YAMLTemplateConverter().convert(StringIO(yaml.safe_dump(self.current_data)), temp_file) + temp_file.flush() + os.fsync(temp_file.fileno()) # Ensure all data is written to disk + + # Atomic rename + os.rename(temp_file.name, self.output_file) + logger.info(f"Updated {self.output_file} with new configuration") + except Exception as e: + logger.error(f"Error updating YAML file: {e}") + # Clean up temporary file if it exists + if 'temp_file' in locals(): + try: + os.unlink(temp_file.name) + except OSError: + pass + raise + + def watch_configmaps(self) -> None: + """Watch ConfigMaps for changes and write the contents at startup and upon an configMap changes""" + w = watch.Watch() + + # First, get existing ConfigMaps + logger.info(f"Loading existing ConfigMaps for {self.namespace} and {self.label_selector}") + existing_configmaps = self.v1.list_namespaced_config_map( + namespace=self.namespace, + label_selector=self.label_selector + ) + logger.info(f"Got configmaps: {existing_configmaps}") + for configmap in existing_configmaps.items: + logger.info(f"configmap={configmap}") + self.current_data[configmap.metadata.name] = configmap.data if configmap.data else {} + + self.update_yaml_file() + + # Then watch for changes + try: + for event in w.stream( + self.v1.list_namespaced_config_map, + namespace=self.namespace, + label_selector=self.label_selector + ): + configmap = event['object'] + event_type = event['type'] + + if event_type in ['ADDED', 'MODIFIED']: + self.current_data[configmap.metadata.name] = configmap.data if configmap.data else {} + elif event_type == 'DELETED': + name = configmap.metadata.name + if name in self.current_data: + logger.info(f"Removing ConfigMap: {name}") + del self.current_data[name] + + self.update_yaml_file() + + except Exception as e: + logger.error(f"Error watching ConfigMaps: {e}") + raise + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Watch Kubernetes ConfigMaps and update a YAML file' + ) + parser.add_argument( + '--outfile', + required=True, + help='Path to output YAML file (required)' + ) + parser.add_argument( + '--label-selector', + default=os.getenv('LABEL_SELECTOR', ''), + help='Label selector for ConfigMaps' + ) + parser.add_argument( + '--namespace', + default=os.getenv('NAMESPACE', 'default'), + help='Kubernetes namespace (default: default)' + ) + return parser.parse_args() + + +def sigterm_handler(signum, frame): + # Clean exit without traceback + sys.exit(0) + + +if __name__ == "__main__": + args = parse_args() + + # Register the signal handler + try: + signal.signal(signal.SIGTERM, sigterm_handler) + watcher = ConfigMapWatcher( + label_selector=args.label_selector, + namespace=args.namespace, + output_file=args.outfile + ) + watcher.watch_configmaps() + except KeyboardInterrupt: + # Handle Ctrl+C cleanly too + sys.exit(0) + except Exception as e: + logger.error(f"Fatal error: {e}") + sys.exit(1) diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/extract_yaml_path.py b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/extract_yaml_path.py new file mode 100644 index 000000000..79c627a7a --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/extract_yaml_path.py @@ -0,0 +1,27 @@ +#!/usr/bin/python3 +import yaml +import sys + + +# Parse YAML and extract value using dot notation +def yaml_extract(yaml_str, path): + data = yaml.safe_load(yaml_str) + keys = path.split('.') + result = data + for key in keys: + result = result[key] + return result + + +if __name__ == "__main__": + # Parse args like yq does: script.py '.foo.bar' file.yaml + path = sys.argv[1] + if len(sys.argv) > 2: + # Read from file + with open(sys.argv[2]) as f: + yaml_str = f.read() + else: + # Read from stdin + yaml_str = sys.stdin.read() + + print(yaml_extract(yaml_str, path.lstrip('.'))) diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/format_services_yaml.py b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/format_services_yaml.py new file mode 100644 index 000000000..534ee59cd --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/format_services_yaml.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +import sys +import os +import yaml +from jinja2 import Environment, FileSystemLoader + + +def to_yaml_filter(value): + """Custom filter to convert value to YAML format.""" + if value is None: + return '' + return yaml.dump(value, default_flow_style=False).rstrip() + + +def pop_value(dictionary, key, default=None): + """Remove and return a value from a nested dictionary using dot notation.""" + keys = key.split('.') + current = dictionary + + # Navigate to the parent of the target key + for k in keys[:-1]: + if k not in current: + return default + current = current[k] + + # Pop the final key + return current.pop(keys[-1], default) + + +class YAMLTemplateConverter: + def __init__(self, template_dir='.', template_file='migration_services.yaml.j2'): + """ + Initialize the converter with template directory and file. + + Args: + template_dir (str): Directory containing the template files + template_file (str): Name of the template file + """ + self.template_dir = template_dir + self.template_file = template_file + + def convert(self, inStream, outStream): + # Read YAML from stdin + values = yaml.safe_load(inStream) + + # Setup Jinja2 environment + env = Environment(loader=FileSystemLoader(self.template_dir)) + env.filters['to_yaml'] = to_yaml_filter + env.filters['pop_value'] = pop_value + + template = env.get_template(self.template_file) + outStream.write(template.render(values=values)) + + +def main(): + template_path = sys.argv[1] if len(sys.argv) > 1 else 'migration_services.yaml.j2' + template_dir = os.path.dirname(template_path) or '.' + template_file = os.path.basename(template_path) + + try: + YAMLTemplateConverter(template_dir, template_file).convert(sys.stdin, sys.stdout) + except yaml.YAMLError as e: + print(f"Error parsing YAML input: {e}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/migration_services.yaml.j2 b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/migration_services.yaml.j2 new file mode 100644 index 000000000..2eaceda04 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/migration_services.yaml.j2 @@ -0,0 +1,43 @@ +{%- if values.source_cluster is defined -%} +source_cluster: + {{ values | pop_value("source_cluster") | to_yaml | indent(2) }} +{%- endif %} + +{%- if values.target_clusters is defined %} +target_clusters: +{%- for cluster_name in (values | pop_value("target_clusters")) %} + {%- if values[cluster_name] is defined %} + {{ cluster_name }}: + {{ values | pop_value(cluster_name) | to_yaml | indent(4) }} + {%- endif %} +{%- endfor %} +{%- endif %} + +{%- if values.metricsSource is defined %} +metrics_source: + {{ values | pop_value("metricsSource") | to_yaml | indent(2) }} +{%- endif %} + +{%- if values.snapshot is defined %} +snapshot: + {{ values | pop_value("snapshot") | to_yaml | indent(2) }} +{%- endif %} + +{%- if values.otelEndpoint is defined %} +otel_endpoint: {{ values | pop_value("otelEndpoint") }} +{%- endif %} + +metadata_migration: + from_snapshot: {{ values.metadata_migration_from_snapshot | default(none) }} + min_replicas: {{ values | pop_value("minReplicasForMigratedIndices") | default(0) }} + +{%- if values.kafkaBroker is defined %} +kafka: + {{ values | pop_value("kafkaBroker") | to_yaml | indent(2) }} +{%- endif %} + + +{%- if values %} +extraValues: + {{ values | to_yaml | indent(2) }} +{%- endif %} diff --git a/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/print_env_vars_as_exports.py b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/print_env_vars_as_exports.py new file mode 100644 index 000000000..cad400dc6 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/k8sConfigMapUtilScripts/configmap2yaml/print_env_vars_as_exports.py @@ -0,0 +1,15 @@ +#!/usr/bin/python3 +import os +import shlex + + +# Export environment variables to sourceable shell script +def export_env(): + for key, value in sorted(os.environ.items()): + print(f"export {key}={shlex.quote(value)}") + + +# Main logic to handle both cases +if __name__ == "__main__": + # No args - export env vars + export_env() diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java index e50824839..141dd5d6f 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java @@ -105,19 +105,19 @@ public static class Parameters { @Parameter( required = false, - names = {REMOVE_AUTH_HEADER_VALUE_ARG }, + names = {REMOVE_AUTH_HEADER_VALUE_ARG, "--removeAuthHeader" }, arity = 0, description = "Remove the authorization header if present and do not replace it with anything. " + "(cannot be used with other auth arguments)") boolean removeAuthHeader; @Parameter( required = false, - names = { AUTH_HEADER_VALUE_ARG }, + names = { AUTH_HEADER_VALUE_ARG, "--authHeaderValue" }, arity = 1, description = "Static value to use for the \"authorization\" header of each request " + "(cannot be used with other auth arguments)") String authHeaderValue; @Parameter( - required = false, names = { - AWS_AUTH_HEADER_USER_AND_SECRET_ARG }, + required = false, + names = { AWS_AUTH_HEADER_USER_AND_SECRET_ARG, "--authHeaderUserAndSecret" }, splitter = NoSplitter.class, arity = 2, description = " pair to specify " @@ -128,7 +128,7 @@ public static class Parameters { List awsAuthHeaderUserAndSecret; @Parameter( required = false, - names = { SIGV_4_AUTH_HEADER_SERVICE_REGION_ARG }, + names = { SIGV_4_AUTH_HEADER_SERVICE_REGION_ARG, "--sigv4AuthHeaderServiceRegion" }, arity = 1, description = "Use AWS SigV4 to sign each request with the specified service name and region. " + "(e.g. es,us-east-1) " @@ -144,7 +144,7 @@ public static class Parameters { @Parameter( required = false, - names = "--user-agent", + names = { "--user-agent", "--userAgent" }, arity = 1, description = "For HTTP requests to the target cluster, append this string (after \"; \") to" + "the existing user-agent field or if the field wasn't present, simply use this value") @@ -158,33 +158,33 @@ public static class Parameters { String inputFilename; @Parameter( required = false, - names = {"-t", PACKET_TIMEOUT_SECONDS_PARAMETER_NAME }, + names = {"-t", PACKET_TIMEOUT_SECONDS_PARAMETER_NAME, "--packetTimeoutSeconds" }, arity = 1, description = "assume that connections were terminated after this many " + "seconds of inactivity observed in the captured stream") int observedPacketConnectionTimeout = 70; @Parameter( required = false, - names = { "--speedup-factor" }, + names = { "--speedup-factor", "--speedupFactor" }, arity = 1, description = "Accelerate the replayed communications by this factor. " + "This means that between each interaction will be replayed at this rate faster " + "than the original observations, provided that the replayer and target are able to keep up.") double speedupFactor = 1.0; @Parameter( required = false, - names = { LOOKAHEAD_TIME_WINDOW_PARAMETER_NAME }, + names = { LOOKAHEAD_TIME_WINDOW_PARAMETER_NAME, "--lookaheadTimeWindow" }, arity = 1, description = "Number of seconds of data that will be buffered.") int lookaheadTimeSeconds = 300; @Parameter( required = false, - names = { "--max-concurrent-requests" }, + names = { "--max-concurrent-requests", "--maxConcurrentRequests" }, arity = 1, description = "Maximum number of requests at a time that can be outstanding") int maxConcurrentRequests = 1024; @Parameter( required = false, - names = { "--num-client-threads" }, + names = { "--num-client-threads", "--numClientThreads" }, arity = 1, description = "Number of threads to use to send requests from.") int numClientThreads = 0; @@ -192,46 +192,46 @@ public static class Parameters { // https://github.com/opensearch-project/opensearch-java/blob/main/java-client/src/main/java/org/opensearch/client/transport/httpclient5/ApacheHttpClient5TransportBuilder.java#L49-L54 @Parameter( required = false, - names = { "--target-response-timeout" }, + names = { "--target-response-timeout", "--targetResponseTimeout" }, arity = 1, description = "Seconds to wait before timing out a replayed request to the target.") int targetServerResponseTimeoutSeconds = 30; @Parameter( required = false, - names = { "--kafka-traffic-brokers" }, + names = { "--kafka-traffic-brokers", "--kafkaTrafficBrokers" }, arity = 1, description = "Comma-separated list of host and port pairs that are the addresses of the Kafka brokers " + "to bootstrap with i.e. 'kafka-1:9092,kafka-2:9092'") String kafkaTrafficBrokers; @Parameter( required = false, - names = { "--kafka-traffic-topic" }, + names = { "--kafka-traffic-topic", "--kafkaTrafficTopic" }, arity = 1, description = "Topic name used to pull messages from Kafka") String kafkaTrafficTopic; @Parameter( required = false, - names = { "--kafka-traffic-group-id" }, + names = { "--kafka-traffic-group-id", "--kafkaTrafficGroupId" }, arity = 1, description = "Consumer group id that is used when pulling messages from Kafka") String kafkaTrafficGroupId; @Parameter( required = false, - names = { "--kafka-traffic-enable-msk-auth" }, + names = { "--kafka-traffic-enable-msk-auth", "--kafkaTrafficEnabledMskAuth" }, arity = 0, description = "Enables SASL properties required for connecting to MSK with IAM auth") boolean kafkaTrafficEnableMSKAuth; @Parameter( required = false, - names = { "--kafka-traffic-property-file" }, + names = { "--kafka-traffic-property-file", "--kafkaTrafficPropertyFile" }, arity = 1, description = "File path for Kafka properties file to use for additional or overriden Kafka properties") String kafkaTrafficPropertyFile; @Parameter( required = false, - names = { "--otelCollectorEndpoint" }, + names = { "--otelCollectorEndpoint", "--otel-collector-endpoint" }, arity = 1, description = "Endpoint (host:port) for the OpenTelemetry Collector to which metrics logs should be" + "forwarded. If no value is provided, metrics will not be forwarded.") @@ -242,13 +242,15 @@ public static class Parameters { public static class RequestTransformationParams implements TransformerParams { @Override public String getTransformerConfigParameterArgPrefix() { - return REQUEST_TRANSFORMER_ARG_PREFIX; + return REQUEST_SNAKE_TRANSFORMER_ARG_PREFIX; } - private final static String REQUEST_TRANSFORMER_ARG_PREFIX = ""; + private final static String REQUEST_SNAKE_TRANSFORMER_ARG_PREFIX = ""; + private final static String REQUEST_CAMEL_TRANSFORMER_ARG_PREFIX = ""; @Parameter( required = false, - names = "--" + REQUEST_TRANSFORMER_ARG_PREFIX + "transformer-config-encoded", + names = { "--" + REQUEST_SNAKE_TRANSFORMER_ARG_PREFIX + "transformer-config-encoded", + "--" + REQUEST_CAMEL_TRANSFORMER_ARG_PREFIX + "transformerConfigEncoded" }, arity = 1, description = "Configuration of message transformers. The same contents as --transformer-config but " + "Base64 encoded so that the configuration is easier to pass as a command line parameter.") @@ -256,7 +258,8 @@ public String getTransformerConfigParameterArgPrefix() { @Parameter( required = false, - names = "--" + REQUEST_TRANSFORMER_ARG_PREFIX + "transformer-config", + names = {"--" + REQUEST_SNAKE_TRANSFORMER_ARG_PREFIX + "transformer-config", + "--" + REQUEST_CAMEL_TRANSFORMER_ARG_PREFIX + "transformerConfig",}, arity = 1, description = "Configuration of message transformers. Either as a string that identifies the " + "transformer that should be run (with default settings) or as json to specify options " @@ -267,7 +270,8 @@ public String getTransformerConfigParameterArgPrefix() { @Parameter( required = false, - names = "--" + REQUEST_TRANSFORMER_ARG_PREFIX + "transformer-config-file", + names = {"--" + REQUEST_SNAKE_TRANSFORMER_ARG_PREFIX + "transformer-config-file", + "--" + REQUEST_CAMEL_TRANSFORMER_ARG_PREFIX + "transformerConfigFile"}, arity = 1, description = "Path to the JSON configuration file of message transformers.") private String transformerConfigFile; @@ -276,13 +280,15 @@ public String getTransformerConfigParameterArgPrefix() { @Getter public static class TupleTransformationParams implements TransformerParams { public String getTransformerConfigParameterArgPrefix() { - return TUPLE_TRANSFORMER_CONFIG_PARAMETER_ARG_PREFIX; + return TUPLE_TRANSFORMER_CONFIG_SNAKE_PARAMETER_ARG_PREFIX; } - final static String TUPLE_TRANSFORMER_CONFIG_PARAMETER_ARG_PREFIX = "tuple-"; + final static String TUPLE_TRANSFORMER_CONFIG_SNAKE_PARAMETER_ARG_PREFIX = "tuple-"; + final static String TUPLE_TRANSFORMER_CONFIG_CAMEL_PARAMETER_ARG_PREFIX = "tuple"; @Parameter( required = false, - names = "--" + TUPLE_TRANSFORMER_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config-base64", + names = { "--" + TUPLE_TRANSFORMER_CONFIG_SNAKE_PARAMETER_ARG_PREFIX + "transformer-config-base64", + "--" + TUPLE_TRANSFORMER_CONFIG_CAMEL_PARAMETER_ARG_PREFIX + "TransformerConfigBase64" }, arity = 1, description = "Configuration of tuple transformers. The same contents as --tuple-transformer-config but " + "Base64 encoded so that the configuration is easier to pass as a command line parameter.") @@ -290,7 +296,8 @@ public String getTransformerConfigParameterArgPrefix() { @Parameter( required = false, - names = "--" + TUPLE_TRANSFORMER_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config", + names = { "--" + TUPLE_TRANSFORMER_CONFIG_SNAKE_PARAMETER_ARG_PREFIX + "transformer-config", + "--" + TUPLE_TRANSFORMER_CONFIG_CAMEL_PARAMETER_ARG_PREFIX + "TransformerConfig" }, arity = 1, description = "Configuration of tuple transformers. Either as a string that identifies the " + "transformer that should be run (with default settings) or as json to specify options " @@ -301,7 +308,8 @@ public String getTransformerConfigParameterArgPrefix() { @Parameter( required = false, - names = "--" + TUPLE_TRANSFORMER_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config-file", + names = { "--" + TUPLE_TRANSFORMER_CONFIG_SNAKE_PARAMETER_ARG_PREFIX + "transformer-config-file", + "--" + TUPLE_TRANSFORMER_CONFIG_CAMEL_PARAMETER_ARG_PREFIX + "TransformerConfigFile" } , arity = 1, description = "Path to the JSON configuration file of tuple transformers.") private String transformerConfigFile; diff --git a/deployment/k8s/.gitignore b/deployment/k8s/.gitignore new file mode 100644 index 000000000..97ef61ca7 --- /dev/null +++ b/deployment/k8s/.gitignore @@ -0,0 +1 @@ +charts/**/charts/* \ No newline at end of file diff --git a/deployment/k8s/README.md b/deployment/k8s/README.md new file mode 100644 index 000000000..210e1876c --- /dev/null +++ b/deployment/k8s/README.md @@ -0,0 +1,130 @@ +# Kubernetes Deployment + +## Prerequisites + +#### Install kubectl +Follow instructions [here](https://kubernetes.io/docs/tasks/tools/) to install the Kubernetes command-line tool. This will be the go-to tool for interacting with the Kubernetes cluster + +#### Install helm +Follow instructions [here](https://helm.sh/docs/intro/install/) to install helm. helm will be used for deploying to the Kubernetes cluster + +#### Install docker +Follow instructions [here](https://docs.docker.com/engine/install/) to set up Docker. Docker will be used to build Docker images as well as run a local Kubernetes cluster. Later versions are recommended. + + +## Local Kubernetes Cluster +Creating a local Kubernetes cluster is useful for testing and developing a given deployment. There are a few different tools for running a Kubernetes cluster locally. This documentation focuses on using [Minikube](https://github.com/kubernetes/minikube) to run the local Kubernetes cluster. + +### Install Minikube +Follow instructions [here](https://minikube.sigs.k8s.io/docs/start/?arch=%2Fmacos%2Fx86-64%2Fstable%2Fbinary+download) to install Minikube + +### Loading Docker images into Minikube +Since Minikube uses a different Docker registry than the normal host machine, the Docker images shown will differ from that on the host machine. The script `buildDockerImagesMini.sh` in this directory will configure the environment to use the Minikube Docker registry and build the Docker images into Minikube + +Show Docker images available to Minikube +```shell +minikube image ls +``` +Build Docker images into Minikube +```shell +./buildDockerImagesMini.sh +``` + +### Start/Pause/Delete +A convenience script `minikubeLocal.sh` is located in this directory which wraps the Minikube commands to start/pause/delete Minikube. This is useful for automatically handling items such as mounting the local repo and creating a tunnel to make localhost calls to containers +```shell +./miniKubeLocal.sh --start +./miniKubeLocal.sh --pause +./miniKubeLocal.sh --delete +``` + + +## Deploying + +### Migration Assistant environment +Guide for deploying a complete Migration Assistant environment helm chart, with the ability to enabled/disable different Migration services and clusters as needed + +The full environment helm charts consists of: +* Source cluster +* Target cluster +* Migration services + +**Note**: For first-time deployments and deployments after changes have been made to a dependent helm package, such as the `migration-console` chart, the following command is needed to update dependent charts +```shell +helm dependency update migration-assistant +``` + +The full environment helm chart can be deployed with the helm command +```shell +helm install ma migration-assistant +``` + +### Specific services +Guide for deploying an individual Migration service helm chart + +A particular service could then be deployed with a command similar to the below. +```shell +helm install migration-console services/migration-console +``` + +## Uninstalling +To show all helm deployments +```shell +helm list +``` + +To uninstall a particular helm deployment +```shell +helm uninstall +``` + +### AWS Initial Setup +#### Setting up EBS driver to dynamically provision PVs +```shell +# To check if any IAM OIDC provider is configured: +aws iam list-open-id-connect-providers +# If none exist, create one: +eksctl utils associate-iam-oidc-provider --cluster --approve +# Create IAM role for service account in order to use EBS CSI driver in EKS +# This currently creates a CFN stack and may +eksctl create iamserviceaccount \ + --name ebs-csi-controller-sa \ + --namespace kube-system \ + --cluster \ + --role-name AmazonEKS_EBS_CSI_DriverRole \ + --role-only \ + --attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \ + --approve +# Install add-on to EKS cluster using the created IAM role for the service account +eksctl create addon --cluster --name aws-ebs-csi-driver --version latest --service-account-role-arn --force +# Create StorageClass to dynamically provision persistent volumes (PV) +kubectl apply -f aws/storage-class-ebs.yml +``` +#### Setting up EFS driver to dynamically provision PVs +```shell +export cluster_name= +export role_name=AmazonEKS_EFS_CSI_DriverRole +eksctl create iamserviceaccount \ + --name efs-csi-controller-sa \ + --namespace kube-system \ + --cluster $cluster_name \ + --role-name $role_name \ + --role-only \ + --attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEFSCSIDriverPolicy \ + --approve +TRUST_POLICY=$(aws iam get-role --role-name $role_name --query 'Role.AssumeRolePolicyDocument' | \ + sed -e 's/efs-csi-controller-sa/efs-csi-*/' -e 's/StringEquals/StringLike/') +aws iam update-assume-role-policy --role-name $role_name --policy-document "$TRUST_POLICY" +eksctl create addon --cluster $cluster_name --name aws-efs-csi-driver --version latest --service-account-role-arn --force +kubectl apply -f aws/storage-class-efs.yml +``` + +Create an ECR to store images +```shell +./buildDockerImagesMini.sh --create-ecr +``` + +Build images and push to ECR +```shell +./buildDockerImagesMini.sh --sync-ecr +``` diff --git a/deployment/k8s/aws/README.md b/deployment/k8s/aws/README.md new file mode 100644 index 000000000..9c8f99cf9 --- /dev/null +++ b/deployment/k8s/aws/README.md @@ -0,0 +1,3 @@ +The contents of this directory include resources that are not yet wired into any of our deployments. +They're still in-development. I'm including them here for reference so that we can easily move them into place +in the near future. diff --git a/deployment/k8s/aws/ack-resource-setup/.helmignore b/deployment/k8s/aws/ack-resource-setup/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/aws/ack-resource-setup/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/aws/ack-resource-setup/Chart.yaml b/deployment/k8s/aws/ack-resource-setup/Chart.yaml new file mode 100644 index 000000000..0a8045d46 --- /dev/null +++ b/deployment/k8s/aws/ack-resource-setup/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: ack-resource-setup +description: A Helm chart for deploying required AWS resources for running the Migration Assistant +version: 0.1.0 +appVersion: "3.5.0" +dependencies: + - name: strimzi-kafka-operator + version: 0.43.0 + repository: https://strimzi.io/charts/ diff --git a/deployment/k8s/aws/ack-resource-setup/templates/kafka-single-node.yaml b/deployment/k8s/aws/ack-resource-setup/templates/kafka-single-node.yaml new file mode 100644 index 000000000..018ba3090 --- /dev/null +++ b/deployment/k8s/aws/ack-resource-setup/templates/kafka-single-node.yaml @@ -0,0 +1,51 @@ +#https://github.com/strimzi/strimzi-kafka-operator/blob/release-0.43.x/examples/kafka/kraft/kafka-single-node.yaml +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: dual-role + labels: + strimzi.io/cluster: kafka-cluster +spec: + replicas: 1 + roles: + - controller + - broker + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: 10Gi + deleteClaim: true + kraftMetadata: shared +--- + +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: kafka-cluster + annotations: + strimzi.io/node-pools: enabled + strimzi.io/kraft: enabled +spec: + kafka: + version: 3.8.0 + metadataVersion: 3.8-IV0 + listeners: + - name: plain + port: 9092 + type: internal + tls: false + - name: tls + port: 9093 + type: internal + tls: true + config: + offsets.topic.replication.factor: 1 + transaction.state.log.replication.factor: 1 + transaction.state.log.min.isr: 1 + default.replication.factor: 1 + min.insync.replicas: 1 + entityOperator: + topicOperator: {} + userOperator: {} diff --git a/deployment/k8s/aws/ack-resource-setup/values.yaml b/deployment/k8s/aws/ack-resource-setup/values.yaml new file mode 100644 index 000000000..01cce5cc2 --- /dev/null +++ b/deployment/k8s/aws/ack-resource-setup/values.yaml @@ -0,0 +1,38 @@ +# Configuration for Strimzi Kafka Operator +#strimzi-kafka-operator: +# You can add operator-specific configurations here if needed +# For example, enabling metrics, RBAC settings, etc. + +## Configuration for the Kafka Cluster +#kafka: +# name: kafka-cluster +# version: "3.6.0" # Kafka version supporting KRaft +# replicas: 1 +# config: +# processRoles: "broker,controller" +# nodeId: 1 +# controllerQuorumVoters: "1@kafka-cluster-0.kafka-cluster-bootstrap:9093" +# interBrokerProtocolVersion: "3.5" +# logMessageFormatVersion: "3.5" +# autoCreateTopicsEnable: "true" +# storage: +# type: "ephemeral" # Use "persistent-claim" for persistent storage +# size: "20Gi" +# class: "managed-nfs-storage" # Set to empty string "" if not using a specific storage class +# deleteClaim: false +# +## Configuration for the Controller +#controller: +# replicas: 1 +# resources: +# limits: +# memory: "2Gi" +# cpu: "1000m" +# requests: +# memory: "1Gi" +# cpu: "500m" +# storage: +# type: "ephemeral" # Use "persistent-claim" for persistent storage +# size: "10Gi" +# class: "managed-nfs-storage" # Set to empty string "" if not using a specific storage class +# deleteClaim: false diff --git a/deployment/k8s/aws/storage-class-ebs.yml b/deployment/k8s/aws/storage-class-ebs.yml new file mode 100644 index 000000000..091496601 --- /dev/null +++ b/deployment/k8s/aws/storage-class-ebs.yml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: ebs-sc +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + encrypted: "true" diff --git a/deployment/k8s/aws/storage-class-efs.yml b/deployment/k8s/aws/storage-class-efs.yml new file mode 100644 index 000000000..e6ebc0392 --- /dev/null +++ b/deployment/k8s/aws/storage-class-efs.yml @@ -0,0 +1,16 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: efs-sc +provisioner: efs.csi.aws.com +parameters: + provisioningMode: efs-ap + fileSystemId: "fs-0bc6e04752a510618" + directoryPerms: "700" + #gidRangeStart: "1000" + #gidRangeEnd: "2000" + #basePath: "/dynamic_provisioning" +#mountOptions: +# - tls +#reclaimPolicy: Retain +volumeBindingMode: Immediate diff --git a/deployment/k8s/aws/values/migration-assistant-aws.yml b/deployment/k8s/aws/values/migration-assistant-aws.yml new file mode 100644 index 000000000..d58dc0bda --- /dev/null +++ b/deployment/k8s/aws/values/migration-assistant-aws.yml @@ -0,0 +1,161 @@ +# Define all enabled/disabled migration services +migration-console-enabled: true +capture-proxy-enabled: true +replayer-enabled: false +kafka-op-enabled: true +reindex-from-snapshot-enabled: false +elasticsearch-searchguard-enabled: true +opensearch-helm-enabled: true + +# Define specific dependency configuration +opensearch-helm: + fullnameOverride: "opensearch" + image: + tag: "2.17.0" + replicas: 1 + singleNode: true + config: + discovery.type: single-node + extraEnvs: + - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD + value: myStrongPassword123! + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + persistence: + enabled: true + storageClass: "ebs-sc" + size: "30Gi" + +kafka-op: + clusterName: kafka-cluster + kafkaVersion: 3.8.0 + kafkaMetadataVersion: 3.8-IV0 + nodePool: + replicas: 1 + volume: + size: 10Gi + class: "ebs-sc" + deleteClaim: true + +elasticsearch: + replicaCount: 1 + image: + repository: 977363099154.dkr.ecr.us-east-1.amazonaws.com/migrations-local-repo + pullPolicy: IfNotPresent + tag: "elasticsearch_searchguard-latest" + service: + type: ClusterIP + port: 19200 # Port exposed to external + targetPort: 9200 # Container port + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: path.repo + value: "/storage/snapshot" + volumes: + - name: snapshot-vol + persistentVolumeClaim: + claimName: snapshot-vol-pvc + volumeMounts: + - name: snapshot-vol + mountPath: /storage + snapshot-vol: + volume: + volumeName: snapshot-vol + mountPath: /storage # Path inside the container where the volume should be mounted + # Persistent volume claim settings + pvcClaimName: snapshot-vol-pvc + storageClass: "efs-sc" + accessMode: ReadWriteOnce + size: 5Gi + +migration-console: + replicaCount: 1 + image: + repository: 977363099154.dkr.ecr.us-east-1.amazonaws.com/migrations-local-repo + pullPolicy: IfNotPresent + tag: "migration_console-latest" + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: SHARED_LOGS_DIR_PATH + value: /shared-logs-output/traffic-replayer-default + - name: STAGE + configMapKeyRef: + name: test-env + key: STAGE + volumes: + - name: services-yaml + configMap: + name: services-yaml-config-map + - name: shared-logs + persistentVolumeClaim: + claimName: shared-logs-pvc + - name: snapshot-vol + persistentVolumeClaim: + claimName: snapshot-vol-pvc + volumeMounts: + - name: services-yaml + mountPath: /etc/migration-config + - name: shared-logs + mountPath: /shared-logs-output + - name: snapshot-vol + mountPath: /storage + snapshot-vol: + volume: + volumeName: snapshot-vol + mountPath: /storage # Path inside the container where the volume should be mounted + # Persistent volume claim settings + pvcClaimName: snapshot-vol-pvc + storageClass: "efs-sc" + accessMode: ReadWriteOnce + size: 5Gi + shared-logs-vol: + volume: + volumeName: shared-logs + mountPath: /shared-logs-output # Path inside the container where the volume should be mounted + # Persistent volume claim settings + pvcClaimName: shared-logs-pvc + storageClass: "efs-sc" + accessMode: ReadWriteOnce + size: 10Gi + +capture-proxy: + replicaCount: 1 + image: + repository: 977363099154.dkr.ecr.us-east-1.amazonaws.com/migrations-local-repo + pullPolicy: IfNotPresent + tag: "capture_proxy-latest" + service: + type: ClusterIP + port: 9201 # Port exposed to external + targetPort: 9200 # Container port + container: + command: > + /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.CaptureProxy + --kafkaConnection kafka-cluster-kafka-bootstrap:9092 + --destinationUri https://elasticsearch:19200 + --insecureDestination + --listenPort 9200 + --sslConfigFile /usr/share/elasticsearch/config/proxy_tls.yml + #--otelCollectorEndpoint http://localhost:4317 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi diff --git a/deployment/k8s/buildDockerImagesMini.sh b/deployment/k8s/buildDockerImagesMini.sh new file mode 100755 index 000000000..0ba7c4895 --- /dev/null +++ b/deployment/k8s/buildDockerImagesMini.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +sync_ecr_repo() { + REPO_NAME="migrations-local-repo" + ACCOUNT_ID=$(aws sts get-caller-identity --query "Account" --output text 2>/dev/null) + if [[ -z "$ACCOUNT_ID" ]]; then + echo "Error: Unable to retrieve AWS Account ID. Check your AWS CLI configuration." + exit 1 + fi + + # Retrieve the AWS Region from environment variable or AWS CLI config + REGION=${AWS_REGION:-$(aws configure get region)} + if [[ -z "$REGION" ]]; then + echo "Error: Unable to determine AWS region. Set AWS_REGION environment variable or configure it in AWS CLI." + exit 1 + fi + + echo "Using account: $ACCOUNT_ID and region: $REGION" + ECR_URI="$ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com" + ECR_REPO_URI="$ECR_URI/$REPO_NAME" + + # Authenticate Docker with ECR + aws ecr get-login-password --region "$REGION" | docker login --username AWS --password-stdin "$ECR_URI" + + # Get migration images that are latest + local_images=$(docker images --format "{{.Repository}}:{{.Tag}}" | grep "^migrations.*:latest$") + + echo "Found following images to export to ECR: $local_images" + + # Iterate over each image + for image in $local_images; do + # Tag the image for ECR + image_name=$(echo "$image" | cut -d'/' -f2 | cut -d':' -f1) + tag=$(echo "$image" | cut -d':' -f2) + ecr_image="$ECR_REPO_URI:$image_name-$tag" + echo "Tagging $image as $ecr_image" + docker tag "$image" "$ecr_image" + + # Push the image to ECR + echo "Pushing $ecr_image to ECR..." + if docker push "$ecr_image"; then + echo "Successfully pushed $ecr_image" + else + echo "Failed to push $ecr_image" + fi + done +} + +# Optional helper function to create an ECR repo +create_ecr_repo() { + stack_name="$REPO_NAME-stack" + aws cloudformation create-stack --stack-name my-ecr-repo-stack --template-body "{ + \"Resources\": { + \"ECRRepository\": { + \"Type\": \"AWS::ECR::Repository\", + \"Properties\": { + \"RepositoryName\": \"$REPO_NAME\" + } + } + }, + \"Outputs\": { + \"RepositoryArn\": { + \"Description\": \"The ARN of the ECR repository\", + \"Value\": { \"Fn::GetAtt\": [\"ECRRepository\", \"Arn\"] } + } + } + }" --stack-name "$stack_name" + + + # Wait for the stack creation to complete + aws cloudformation wait stack-create-complete --stack-name "$stack_name" + + # Retrieve and print the output + aws cloudformation describe-stacks --stack-name "$stack_name" --query "Stacks[0].Outputs[?OutputKey=='RepositoryArn'].OutputValue" --output text +} + +# Function to display usage +usage() { + echo "Usage: $0 [--sync-ecr] [--create-ecr]" + exit 1 +} + +SYNC_ECR=false +SKIP_BUILD=false +## Parse command-line arguments +while [[ "$#" -gt 0 ]]; do + case "$1" in + --create-ecr) + create_ecr_repo + exit 0 + ;; + --sync-ecr) + SYNC_ECR=true + shift 1 + ;; + --skip-build) + SKIP_BUILD=true + shift 1 + ;; + *) + echo "Invalid option: $1" + usage + ;; + esac +done + +# Allow executing this script from any dir +script_abs_path=$(readlink -f "$0") +script_dir_abs_path=$(dirname "$script_abs_path") +cd "$script_dir_abs_path" || exit + +cd ../.. || exit + +eval $(minikube docker-env) + +if [ "$SKIP_BUILD" = "false" ]; then + ./gradlew :buildDockerImages -x test +fi + +if [ "$SYNC_ECR" = "true" ]; then + sync_ecr_repo +fi diff --git a/deployment/k8s/charts/.gitignore b/deployment/k8s/charts/.gitignore new file mode 100644 index 000000000..ae0eb259a --- /dev/null +++ b/deployment/k8s/charts/.gitignore @@ -0,0 +1,2 @@ +*/charts/**/*tgz +*/Chart.lock \ No newline at end of file diff --git a/deployment/k8s/charts/aggregates/migrationAssistant/Chart.yaml b/deployment/k8s/charts/aggregates/migrationAssistant/Chart.yaml new file mode 100644 index 000000000..f85c74cb4 --- /dev/null +++ b/deployment/k8s/charts/aggregates/migrationAssistant/Chart.yaml @@ -0,0 +1,49 @@ +apiVersion: v2 +name: migration-assistant +version: 0.1.0 +type: application +appVersion: "1.16.0" +dependencies: + - name: migration-console + version: "0.1.0" + repository: "file://../../components/migrationConsole" + + - name: bulk-document-loader + condition: conditionalPackageInstalls.bulkLoader + version: "0.1.0" + repository: "file://../../components/bulkLoad" + + - name: capture-proxy + condition: conditionalPackageInstalls.proxy + version: "0.1.0" + repository: "file://../../components/captureProxy" + - name: kafka-cluster + alias: captured-traffic-kafka-cluster + condition: conditionalPackageInstalls.kafka + version: "0.1.0" + repository: "file://../../sharedResources/baseKafkaCluster" + - name: replayer + condition: conditionalPackageInstalls.replayer + version: "0.1.0" + repository: "file://../../components/replayer" + + - name: shared-configs + version: "0.1.0" + repository: "file://../../sharedResources/sharedConfigs" + +# - name: otel-collector +# version: 1.2.2 +# repository: + + - name: grafana + condition: conditionalPackageInstalls.grafana + version: "8.5.0" + repository: "https://grafana.github.io/helm-charts" + - name: prometheus + condition: conditionalPackageInstalls.prometheus + version: "25.27.0" + repository: "https://prometheus-community.github.io/helm-charts" + - name: jaeger + condition: conditionalPackageInstalls.jaeger + version: "3.2.0" + repository: "https://jaegertracing.github.io/helm-charts" diff --git a/deployment/k8s/charts/aggregates/migrationAssistant/templates/grafanaDashboard.yaml b/deployment/k8s/charts/aggregates/migrationAssistant/templates/grafanaDashboard.yaml new file mode 100644 index 000000000..16e4db9c5 --- /dev/null +++ b/deployment/k8s/charts/aggregates/migrationAssistant/templates/grafanaDashboard.yaml @@ -0,0 +1,138 @@ +{{ if .Values.conditionalPackageInstalls.grafana }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-metrics-dashboard + labels: + grafana_dashboard: "1" +data: + prometheus-metrics-dashboard.json: | + { + "id": null, + "title": "Prometheus Metrics Dashboard", + "tags": [], + "timezone": "browser", + "schemaVersion": 30, + "version": 1, + "refresh": "5s", + "panels": [ + { + "type": "graph", + "title": "CPU Usage", + "datasource": "Prometheus", + "targets": [ + { + "expr": "sum(rate(node_cpu_seconds_total{mode!=\"idle\"}[5m])) by (instance)", + "legendFormat": "thing", + "refId": "A" + } + ], + "xaxis": { + "mode": "time" + }, + "yaxes": [ + { + "format": "percent", + "label": "CPU Usage" + } + ], + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + } + }, + { + "type": "graph", + "title": "Memory Usage", + "datasource": "Prometheus", + "targets": [ + { + "expr": "node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes", + "legendFormat": "Memory Usage", + "refId": "A" + } + ], + "xaxis": { + "mode": "time" + }, + "yaxes": [ + { + "format": "percent", + "label": "Memory Usage" + } + ], + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + } + }, + { + "type": "graph", + "title": "Disk I/O", + "datasource": "Prometheus", + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[5m])", + "legendFormat": "thing - thing2", + "refId": "A" + } + ], + "xaxis": { + "mode": "time" + }, + "yaxes": [ + { + "format": "s", + "label": "I/O Time" + } + ], + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + } + }, + { + "type": "singlestat", + "title": "Total Requests", + "datasource": "Prometheus", + "targets": [ + { + "expr": "sum(rate(http_requests_total[5m]))", + "refId": "A" + } + ], + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 16 + }, + "format": "none" + }, + { + "type": "singlestat", + "title": "Up Time", + "datasource": "Prometheus", + "targets": [ + { + "expr": "time() - node_boot_time_seconds", + "refId": "A" + } + ], + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 16 + }, + "format": "s" + } + ] + } + {{ end }} \ No newline at end of file diff --git a/deployment/k8s/charts/aggregates/migrationAssistant/values.yaml b/deployment/k8s/charts/aggregates/migrationAssistant/values.yaml new file mode 100644 index 000000000..c771c47d7 --- /dev/null +++ b/deployment/k8s/charts/aggregates/migrationAssistant/values.yaml @@ -0,0 +1,103 @@ +conditionalPackageInstalls: + bulkLoader: true + kafka: true + replayer: true + jaeger: false + prometheus: false + proxy: true + grafana: false + + +capture-proxy: + enabled: true + parameters: + destinationUri: + configMapName: "source-cluster-endpoint" + yamlPath: "endpoint" + insecureDestination: + configMapName: "source-cluster-endpoint" + yamlPath: "allow_insecure" + +migration-console: + parameters: + nothingImportant: + value: false + +bulk-document-loader: + parameters: + luceneDir: + value: /tmp + allowRuntimeOverride: true + snapshotName: + value: rfs + allowRuntimeOverride: true + targetHost: + value: https://opensearch-cluster-master.mcc:9200/ + allowRuntimeOverride: true + snapshotLocalDir: + value: /snapshot + allowRuntimeOverride: true + +replayer: + parameters: + targetUri: + value: https://opensearch-cluster-master.mcc:9200/ + allowRuntimeOverride: true + kafkaTrafficBrokers: + value: captured-traffic-kafka-bootstrap.ma.svc:9092 + allowRuntimeOverride: true + + +captured-traffic-kafka-cluster: + environment: test + + clusterName: captured-traffic + + replicas: 1 + storageType: ephemeral + storageSize: 100Gi + storageDeleteClaim: true + dedicatedController: + replicas: 1 + storageSize: 10Gi + +jaeger: + allInOne: + enabled: true + provisionDataStore: + cassandra: false + storage: + type: memory + agent: + enabled: false + collector: + enabled: false + query: + enabled: false + +grafana: + ## Grafana data sources configuration + datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus-server.prometheus.svc.cluster.local:9090 + isDefault: true + editable: true + - name: Jaeger + type: jaeger + access: proxy + url: http://jaeger-query.jaeger.svc.cluster.local:16686 + isDefault: false + editable: true + + ## Set up the sidecar to import data sources (usually enabled by default) + sidecar: + datasources: + enabled: true + dashboards: + enabled: true + label: grafana_dashboard diff --git a/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.lock b/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.lock new file mode 100644 index 000000000..9ca89614b --- /dev/null +++ b/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: elasticsearch + repository: https://helm.elastic.co + version: 8.5.1 +- name: capture-proxy + repository: file://../../components/captureProxy + version: 0.1.0 +- name: opensearch + repository: https://opensearch-project.github.io/helm-charts/ + version: 2.23.1 +digest: sha256:9b3812d61b38a98c97adfbd7afe6cc27a73615ca3b3f792c8d64847fc50c5600 +generated: "2024-12-28T10:22:08.454087-05:00" diff --git a/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.yaml b/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.yaml new file mode 100644 index 000000000..f10e4036b --- /dev/null +++ b/deployment/k8s/charts/aggregates/mockCustomerClusters/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: mock-customer-clusters +version: 0.1.0 +dependencies: + - name: elasticsearch + version: "8.5.1" + repository: https://helm.elastic.co + alias: source + - name: capture-proxy + version: "0.1.0" + repository: file://../../components/captureProxy + alias: proxy + - name: opensearch + version: "2.23.1" + repository: "https://opensearch-project.github.io/helm-charts/" + alias: target diff --git a/deployment/k8s/charts/aggregates/mockCustomerClusters/values.yaml b/deployment/k8s/charts/aggregates/mockCustomerClusters/values.yaml new file mode 100644 index 000000000..42d005ec3 --- /dev/null +++ b/deployment/k8s/charts/aggregates/mockCustomerClusters/values.yaml @@ -0,0 +1,30 @@ +proxy: + parameters: + destinationUri: + value: "http://elasticsearch:9200" + +source: + imageTag: "7.10.2" + image: "docker.elastic.co/elasticsearch/elasticsearch-oss" + fullnameOverride: "elasticsearch" + roles: [ master, data, ingest ] + antiAffinity: "soft" + esJavaOpts: "-Xmx128m -Xms128m" + protocol: http + # for simplest dev testing + replicas: 2 # k8s replica count, not ES shard replicas + createCert: false + # esConfig: + # elasticsearch.yml: | + # discovery.type: single-node + + # for load-testing deployments + minimumMasterNodes: 1 + +target: + extraEnvs: + - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD + value: myStrongPassword123! + singleNode: true + persistence: + enabled: false diff --git a/deployment/k8s/charts/components/bulkLoad/Chart.lock b/deployment/k8s/charts/components/bulkLoad/Chart.lock new file mode 100644 index 000000000..9a7e066d7 --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 +- name: snapshot-volume + repository: file://../../sharedResources/snapshotVolume + version: 0.1.0 +- name: logs-volume + repository: file://../../sharedResources/logsVolume + version: 0.1.0 +digest: sha256:4f480eb9ba10ee779064919444e65e9adcab54916ba47962ca784738be1c803e +generated: "2024-12-28T10:22:19.870333-05:00" diff --git a/deployment/k8s/charts/components/bulkLoad/Chart.yaml b/deployment/k8s/charts/components/bulkLoad/Chart.yaml new file mode 100644 index 000000000..2f155731a --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: bulk-document-loader +version: 0.1.0 +type: application +appVersion: "1.16.0" +dependencies: + - name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 + - name: snapshot-volume + version: "0.1.0" + repository: "file://../../sharedResources/snapshotVolume" + - name: logs-volume + version: "0.1.0" + repository: "file://../../sharedResources/logsVolume" diff --git a/deployment/k8s/charts/components/bulkLoad/templates/commonConfigs.yaml b/deployment/k8s/charts/components/bulkLoad/templates/commonConfigs.yaml new file mode 100644 index 000000000..0ad1da2f2 --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/templates/commonConfigs.yaml @@ -0,0 +1 @@ +{{ include "generic.createCommonConfigs" . }} diff --git a/deployment/k8s/charts/components/bulkLoad/templates/configmaps.yaml b/deployment/k8s/charts/components/bulkLoad/templates/configmaps.yaml new file mode 100644 index 000000000..b848a0add --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/templates/configmaps.yaml @@ -0,0 +1,6 @@ +{{- include "generic.createConfigMaps" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "include" .Template.Include + "Template" .Template +) | indent 0 }} \ No newline at end of file diff --git a/deployment/k8s/charts/components/bulkLoad/templates/deployment.yaml b/deployment/k8s/charts/components/bulkLoad/templates/deployment.yaml new file mode 100644 index 000000000..1977f3079 --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/templates/deployment.yaml @@ -0,0 +1,39 @@ +{{ $envMountName := "env-vars" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "generic.fullname" . }} +spec: + replicas: 0 + selector: + matchLabels: + app: {{ include "generic.fullname" . }} + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: {{ include "generic.fullname" . }} + env: v1 + spec: + initContainers: + {{- include "generic.setupEnvLoadInitContainer" (merge . (dict + "MountName" $envMountName + "include" .Template.Include)) | nindent 8 }} + containers: + - name: bulk-load + image: migrations/reindex_from_snapshot:latest + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-c" + - | + echo `cat /shared/vars.sh` + source /shared/vars.sh + exec /rfs-app/runJavaWithClasspath.sh org.opensearch.migrations.RfsMigrateDocuments $ARGS + volumeMounts: + - name: {{ $envMountName }} + mountPath: /shared + volumes: + - name: {{ $envMountName }} + emptyDir: {} diff --git a/deployment/k8s/charts/components/bulkLoad/templates/job.yml b/deployment/k8s/charts/components/bulkLoad/templates/job.yml new file mode 100644 index 000000000..d84dfecc7 --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/templates/job.yml @@ -0,0 +1,20 @@ +{{/* A job may be a better way to model bulk load - but I can't remember the pros/cons */}} + +{{/*apiVersion: batch/v1*/}} +{{/*kind: Job*/}} +{{/*metadata:*/}} +{{/* name: {{ .Chart.Name }}*/}} +{{/* labels:*/}} +{{/* app: {{ .Chart.Name }}*/}} +{{/*spec:*/}} +{{/* parallelism: {{ .Values.parallelCount }}*/}} +{{/* completions: 1000 # Temporary until direction decided here*/}} +{{/* ttlSecondsAfterFinished: 30 # Clean up finished Jobs (either Complete or Failed) automatically after this time*/}} +{{/* template:*/}} +{{/* metadata:*/}} +{{/* labels:*/}} +{{/* app: {{ .Chart.Name }}*/}} +{{/* spec:*/}} +{{/* restartPolicy: Never*/}} +{{/* {{- include "common-lib.containers" . | nindent 6 }}*/}} +{{/* {{- include "common-lib.volumes" . | nindent 6 }}*/}} diff --git a/deployment/k8s/charts/components/bulkLoad/values.yaml b/deployment/k8s/charts/components/bulkLoad/values.yaml new file mode 100644 index 000000000..6c37bd920 --- /dev/null +++ b/deployment/k8s/charts/components/bulkLoad/values.yaml @@ -0,0 +1,6 @@ +useSharedConfigs: false +parameters: + initialLeaseDuration: + value: PT10M + documentsPerBulkRequest: + value: 1000 \ No newline at end of file diff --git a/deployment/k8s/charts/components/captureProxy/Chart.lock b/deployment/k8s/charts/components/captureProxy/Chart.lock new file mode 100644 index 000000000..c67b9629d --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 +digest: sha256:bc6f4a7b13fb0743870f216e559f3cc46bbf7cf573bdddcad9899a736696cd72 +generated: "2024-12-28T10:22:27.16052-05:00" diff --git a/deployment/k8s/charts/components/captureProxy/Chart.yaml b/deployment/k8s/charts/components/captureProxy/Chart.yaml new file mode 100644 index 000000000..77e9a90f7 --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: capture-proxy +version: 0.1.0 +dependencies: + - name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 diff --git a/deployment/k8s/charts/components/captureProxy/templates/commonConfigs.yaml b/deployment/k8s/charts/components/captureProxy/templates/commonConfigs.yaml new file mode 100644 index 000000000..0ad1da2f2 --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/templates/commonConfigs.yaml @@ -0,0 +1 @@ +{{ include "generic.createCommonConfigs" . }} diff --git a/deployment/k8s/charts/components/captureProxy/templates/configMaps.yaml b/deployment/k8s/charts/components/captureProxy/templates/configMaps.yaml new file mode 100644 index 000000000..b848a0add --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/templates/configMaps.yaml @@ -0,0 +1,6 @@ +{{- include "generic.createConfigMaps" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "include" .Template.Include + "Template" .Template +) | indent 0 }} \ No newline at end of file diff --git a/deployment/k8s/charts/components/captureProxy/templates/deployment.yaml b/deployment/k8s/charts/components/captureProxy/templates/deployment.yaml new file mode 100644 index 000000000..81c6daf8f --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/templates/deployment.yaml @@ -0,0 +1,45 @@ +{{ $mountName := "env-vars" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "generic.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "generic.fullname" . }} + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: {{ include "generic.fullname" . }} + env: v1 + spec: + initContainers: + {{- include "generic.setupEnvLoadInitContainer" (merge . (dict + "MountName" $mountName + "include" .Template.Include)) | nindent 8 }} + - name: wait-for-kafka + image: bitnami/kubectl:latest # or any image with curl/kubectl + command: [ 'sh', '-c', + 'until kubectl wait --for=condition=Ready kafka/captured-traffic -n {{.Release.Namespace }} --timeout=10s; do echo waiting for kafka cluster is ready; sleep 1; done' ] + containers: + - name: captureproxy + image: migrations/capture_proxy:latest + imagePullPolicy: IfNotPresent + + command: + - "/bin/sh" + - "-c" + - | + source /shared/vars.sh + exec /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.CaptureProxy $ARGS + ports: + - containerPort: 9200 + volumeMounts: + - name: {{ $mountName }} + mountPath: /shared + volumes: + - name: {{ $mountName }} + emptyDir: {} diff --git a/deployment/k8s/charts/components/captureProxy/templates/service.yaml b/deployment/k8s/charts/components/captureProxy/templates/service.yaml new file mode 100644 index 000000000..216ffd61d --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/templates/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "generic.fullname" . }} +spec: + selector: + app: {{ include "generic.fullname" . }} + env: v1 + ports: + - protocol: TCP + port: 9200 + targetPort: 9200 + type: ClusterIP diff --git a/deployment/k8s/charts/components/captureProxy/values.yaml b/deployment/k8s/charts/components/captureProxy/values.yaml new file mode 100644 index 000000000..e232b00ae --- /dev/null +++ b/deployment/k8s/charts/components/captureProxy/values.yaml @@ -0,0 +1,23 @@ +nameOverride: "" +fullnameOverride: "" +useSharedConfigs: false +parameters: + destinationUri: + value: "http://sourcecluster.example.com:9200" + allowRuntimeOverride: false + listenPort: + value: 9200 + allowRuntimeOverride: false + noCapture: + present: false + allowRuntimeOverride: true + insecureDestination: + present: true + allowRuntimeOverride: false +# TODO - setup cert management +# sslConfigFile: +# value: "/usr/share/elasticsearch/config/proxy_tls.yml" +# allowRuntimeOverride: false + otelCollectorEndpoint: + value: "http://otel-collector:4317" + allowRuntimeOverride: false diff --git a/deployment/k8s/charts/components/migrationConsole/.helmignore b/deployment/k8s/charts/components/migrationConsole/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/charts/components/migrationConsole/Chart.yaml b/deployment/k8s/charts/components/migrationConsole/Chart.yaml new file mode 100644 index 000000000..ac9e99106 --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: migration-console +description: A Helm chart to install the Migration Console +type: application +version: 0.1.0 +appVersion: "1.16.0" +dependencies: + - name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 \ No newline at end of file diff --git a/deployment/k8s/charts/components/migrationConsole/templates/commonConfigs.yaml b/deployment/k8s/charts/components/migrationConsole/templates/commonConfigs.yaml new file mode 100644 index 000000000..0ad1da2f2 --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/templates/commonConfigs.yaml @@ -0,0 +1 @@ +{{ include "generic.createCommonConfigs" . }} diff --git a/deployment/k8s/charts/components/migrationConsole/templates/configmaps.yaml b/deployment/k8s/charts/components/migrationConsole/templates/configmaps.yaml new file mode 100644 index 000000000..b848a0add --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/templates/configmaps.yaml @@ -0,0 +1,6 @@ +{{- include "generic.createConfigMaps" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "include" .Template.Include + "Template" .Template +) | indent 0 }} \ No newline at end of file diff --git a/deployment/k8s/charts/components/migrationConsole/templates/deployment.yaml b/deployment/k8s/charts/components/migrationConsole/templates/deployment.yaml new file mode 100644 index 000000000..04582a4ca --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/templates/deployment.yaml @@ -0,0 +1,71 @@ +{{ $mountName := "all-configs" }} +{{ $envVarMountName := "env-vars" }} +{{ $sharedLogsVolumeEnabled := false }} +{{ $snapshotVolumeEnabled := false }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "generic.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "generic.fullname" . }} + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: {{ include "generic.fullname" . }} + env: v1 + spec: + serviceAccountName: configmap-watcher + initContainers: + {{- include "generic.setupEnvLoadInitContainer" (merge (dict + "MountName" $envVarMountName + "include" .Template.Include) .) | nindent 8 }} + {{- include "generic.setupServicesYamlContainer" (merge (dict + "MountName" "merged-config" + "include" .Template.Include) .) | nindent 8 }} + containers: + - name: console + image: migrations/migration_console:latest + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-c" + - | + source /shared/vars.sh + export START_API_COMMAND="pipenv run python /root/console_api/manage.py runserver_plus 0.0.0.0:8000 --cert-file cert.crt" + export WAIT_AND_DO_NOTHING_COMMAND="tail -f /dev/null" + migrationEnabled=$(echo "$@" | grep -o -- "--migrationApiEnabled" | wc -l); if [ "$migrationEnabled" -gt 0 ]; then $START_API_COMMAND; else $WAIT_AND_DO_NOTHING_COMMAND; fi > run.sh + exec migrationEnabled $ARGS + volumeMounts: + {{- if $sharedLogsVolumeEnabled -}} + - name: shared-logs + mountPath: /shared-logs-output + {{- end }} + - name: {{ $envVarMountName }} + mountPath: /shared + - name: merged-config + mountPath: /etc/ + subPath: migration_services.yaml + {{- if $snapshotVolumeEnabled -}} + - name: snapshot-volume + mountPath: /storage + {{- end }} + volumes: + - name: {{ $envVarMountName }} + emptyDir: { } + - name: merged-config + emptyDir: { } + {{- if $sharedLogsVolumeEnabled -}} + - name: shared-logs + persistentVolumeClaim: + claimName: {{ .Values.sharedLogsPvc }} + {{- end -}} + {{- if $snapshotVolumeEnabled -}} + - name: snapshot-volume + persistentVolumeClaim: + claimName: {{ .Values.snapshotVolumePvc -}} + {{- end }} \ No newline at end of file diff --git a/deployment/k8s/charts/components/migrationConsole/templates/rbac.yaml b/deployment/k8s/charts/components/migrationConsole/templates/rbac.yaml new file mode 100644 index 000000000..66ea16fb2 --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/templates/rbac.yaml @@ -0,0 +1,28 @@ +# kubernetes/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: configmap-watcher + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: configmap-watcher +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "watch", "list"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-watcher +subjects: + - kind: ServiceAccount + name: configmap-watcher +roleRef: + kind: Role + name: configmap-watcher + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deployment/k8s/charts/components/migrationConsole/values.yaml b/deployment/k8s/charts/components/migrationConsole/values.yaml new file mode 100644 index 000000000..bf7b6c36e --- /dev/null +++ b/deployment/k8s/charts/components/migrationConsole/values.yaml @@ -0,0 +1,9 @@ +sharedLogsVolumeEnabled: false +sharedLogsPvc: "" +snapshotVolumeEnabled: false +snapshotVolumePvc: "" +useSharedConfigs: false +parameters: + migrationApiEnabled: + value: false + allowRuntimeOverride: false \ No newline at end of file diff --git a/deployment/k8s/charts/components/replayer/.helmignore b/deployment/k8s/charts/components/replayer/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/charts/components/replayer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/charts/components/replayer/Chart.yaml b/deployment/k8s/charts/components/replayer/Chart.yaml new file mode 100644 index 000000000..3bb6fbb20 --- /dev/null +++ b/deployment/k8s/charts/components/replayer/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: replayer +description: A Helm chart to install the Traffic Replayer +type: application +version: 0.1.0 +appVersion: "1.16.0" +dependencies: + - name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 diff --git a/deployment/k8s/charts/components/replayer/templates/commonConfigs.yaml b/deployment/k8s/charts/components/replayer/templates/commonConfigs.yaml new file mode 100644 index 000000000..0ad1da2f2 --- /dev/null +++ b/deployment/k8s/charts/components/replayer/templates/commonConfigs.yaml @@ -0,0 +1 @@ +{{ include "generic.createCommonConfigs" . }} diff --git a/deployment/k8s/charts/components/replayer/templates/configmaps.yaml b/deployment/k8s/charts/components/replayer/templates/configmaps.yaml new file mode 100644 index 000000000..b848a0add --- /dev/null +++ b/deployment/k8s/charts/components/replayer/templates/configmaps.yaml @@ -0,0 +1,6 @@ +{{- include "generic.createConfigMaps" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "include" .Template.Include + "Template" .Template +) | indent 0 }} \ No newline at end of file diff --git a/deployment/k8s/charts/components/replayer/templates/deployment.yaml b/deployment/k8s/charts/components/replayer/templates/deployment.yaml new file mode 100644 index 000000000..32c040d70 --- /dev/null +++ b/deployment/k8s/charts/components/replayer/templates/deployment.yaml @@ -0,0 +1,45 @@ +{{ $envMountName := "env-vars" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "generic.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "generic.fullname" . }} + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: {{ include "generic.fullname" . }} + env: v1 + spec: + initContainers: + {{- include "generic.setupEnvLoadInitContainer" (merge . (dict + "MountName" $envMountName + "PositionalArguments" (list "targetUri") + "include" .Template.Include)) | nindent 8 }} + - name: wait-for-kafka + image: bitnami/kubectl:latest # or any image with curl/kubectl + command: [ 'sh', '-c', + 'until kubectl wait --for=condition=Ready kafka/captured-traffic -n {{.Release.Namespace }} --timeout=10s; do echo waiting for kafka cluster is ready; sleep 1; done' ] + containers: + - name: replayer + image: migrations/traffic_replayer:latest + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-c" + - | + echo cat /shared/vars.sh... + cat /shared/vars.sh + source /shared/vars.sh + exec /runJavaWithClasspath.sh org.opensearch.migrations.replay.TrafficReplayer $ARGS + volumeMounts: + - name: {{ $envMountName }} + mountPath: /shared + volumes: + - name: {{ $envMountName }} + emptyDir: {} diff --git a/deployment/k8s/charts/components/replayer/templates/roles.yaml b/deployment/k8s/charts/components/replayer/templates/roles.yaml new file mode 100644 index 000000000..0a7f3bb4f --- /dev/null +++ b/deployment/k8s/charts/components/replayer/templates/roles.yaml @@ -0,0 +1,23 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: app-permissions +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: ["kafka.strimzi.io"] # Kafka's API group + resources: ["kafkas"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: app-permissions-binding +subjects: + - kind: ServiceAccount + name: default +roleRef: + kind: Role + name: app-permissions + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deployment/k8s/charts/components/replayer/values.yaml b/deployment/k8s/charts/components/replayer/values.yaml new file mode 100644 index 000000000..de78d3f00 --- /dev/null +++ b/deployment/k8s/charts/components/replayer/values.yaml @@ -0,0 +1,26 @@ +useSharedConfigs: false +parameters: +# positionalParameters: +# - https://opensearchtarget:9200 + speedupFactor: + value: 2 + allowRuntimeOverride: true + insecure: + present: true + allowRuntimeOverride: true + kafkaTrafficBrokers: + value: kafka:9092 + allowRuntimeOverride: true + kafkaTrafficTopic: + value: logging-traffic-topic + allowRuntimeOverride: true + kafkaTrafficGroupId: + value: logging-group-default + allowRuntimeOverride: true +# creates a collision +# otelCollectorEndpoint: +# value: http://otel-collector:4317 +# allowRuntimeOverride: true +# transformerConfigBase64: +# value: W3sgIkpzb25Kb2x0VHJhbnNmb3JtZXJQcm92aWRlciI6ClsKICB7CiAgICAic2NyaXB0IjogewogICAgICAib3BlcmF0aW9uIjogInNoaWZ0IiwKICAgICAgInNwZWMiOiB7CiAgICAgICAgIm1ldGhvZCI6ICJtZXRob2QiLAogICAgICAgICJVUkkiOiAiVVJJIiwKICAgICAgICAiaGVhZGVycyI6ICJoZWFkZXJzIiwKICAgICAgICAicGF5bG9hZCI6IHsKICAgICAgICAgICJpbmxpbmVkSnNvbkJvZHkiOiB7CiAgICAgICAgICAgICJ0b3AiOiB7CiAgICAgICAgICAgICAgInRhZ1RvRXhjaXNlIjogewogICAgICAgICAgICAgICAgIioiOiAicGF5bG9hZC5pbmxpbmVkSnNvbkJvZHkudG9wLiYiIAogICAgICAgICAgICAgIH0sCiAgICAgICAgICAgICAgIioiOiAicGF5bG9hZC5pbmxpbmVkSnNvbkJvZHkudG9wLiYiCiAgICAgICAgICAgIH0sCiAgICAgICAgICAiKiI6ICJwYXlsb2FkLmlubGluZWRKc29uQm9keS4mIgogICAgICAgICAgfQogICAgICAgIH0KICAgICAgfQogICAgfQogIH0sIAogewogICAic2NyaXB0IjogewogICAgICJvcGVyYXRpb24iOiAibW9kaWZ5LW92ZXJ3cml0ZS1iZXRhIiwKICAgICAic3BlYyI6IHsKICAgICAgICJVUkkiOiAiPXNwbGl0KCcvZXh0cmFUaGluZ1RvUmVtb3ZlJyxAKDEsJikpIgogICAgIH0KICB9CiB9LAogewogICAic2NyaXB0IjogewogICAgICJvcGVyYXRpb24iOiAibW9kaWZ5LW92ZXJ3cml0ZS1iZXRhIiwKICAgICAic3BlYyI6IHsKICAgICAgICJVUkkiOiAiPWpvaW4oJycsQCgxLCYpKSIKICAgICB9CiAgfQogfQpdCn1dCg== +# allowRuntimeOverride: true diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.lock b/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.lock new file mode 100644 index 000000000..710f5ee73 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: strimzi-kafka-operator + repository: https://strimzi.io/charts/ + version: 0.43.0 +digest: sha256:611b2dcc843c4a467cc02b65cffc0dff21c0bc60a21ead527ed4205c8efff75c +generated: "2024-12-29T23:19:44.375687-05:00" diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.yaml b/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.yaml new file mode 100644 index 000000000..9183c6f02 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: kafka-cluster +description: The Kafka Cluster +type: application +version: 0.1.0 +appVersion: "2.0.0" + +dependencies: + - name: strimzi-kafka-operator + version: 0.43.0 + repository: "https://strimzi.io/charts/" \ No newline at end of file diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/combinedPool.yaml b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/combinedPool.yaml new file mode 100644 index 000000000..6b1e9d59b --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/combinedPool.yaml @@ -0,0 +1,51 @@ +{{- if eq .Values.poolType "never" }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: dual-role + labels: + strimzi.io/cluster: {{ .Values.clusterName }} +spec: + replicas: 1 + roles: + - controller + - broker + storage: + type: jbod + volumes: + - id: 0 + type: ephemeral + size: 1Gi + deleteClaim: true + kraftMetadata: shared +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{ .Values.clusterName }} + annotations: + strimzi.io/node-pools: enabled + strimzi.io/kraft: enabled +spec: + kafka: + version: 3.8.0 + metadataVersion: 3.8-IV0 + listeners: + - name: plain + port: 9092 + type: internal + tls: false + - name: tls + port: 9093 + type: internal + tls: true + config: + offsets.topic.replication.factor: 1 + transaction.state.log.replication.factor: 1 + transaction.state.log.min.isr: 1 + default.replication.factor: 1 + min.insync.replicas: 1 + entityOperator: + topicOperator: {} + userOperator: {} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/configuration.yaml b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/configuration.yaml new file mode 100644 index 000000000..a0903ea5c --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/configuration.yaml @@ -0,0 +1,79 @@ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: controller + labels: + strimzi.io/cluster: {{ .Values.clusterName }} +spec: + replicas: {{ .Values.replicas }} + roles: + - controller + {{- if not (hasKey .Values "dedicatedController") }} + - broker + {{ end }} + storage: + type: jbod + volumes: + - id: 0 + type: {{ .Values.storageType }} + size: {{ .Values.storageSize }} + kraftMetadata: shared + deleteClaim: {{ .Values.storageDeleteClaim }} +--- +{{- if hasKey .Values "dedicatedController" }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: broker + labels: + strimzi.io/cluster: {{ .Values.clusterName }} +spec: + replicas: {{ .Values.dedicatedController.replicas }} + roles: + - broker + storage: + type: jbod + volumes: + - id: 0 + type: {{ .Values.storageType }} + size: {{ .Values.dedicatedController.storageSize }} + kraftMetadata: shared + deleteClaim: {{ .Values.storageDeleteClaim }} +--- +{{- end }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{ .Values.clusterName }} + annotations: + strimzi.io/node-pools: enabled + strimzi.io/kraft: enabled +spec: + kafka: + version: 3.8.0 + metadataVersion: 3.8-IV0 + listeners: + - name: plain + port: 9092 + type: internal + tls: false + - name: tls + port: 9093 + type: internal + tls: true + - name: interbroker + port: 9094 + type: internal + tls: true + config: + offsets.topic.replication.factor: 1 + transaction.state.log.replication.factor: 1 + transaction.state.log.min.isr: 1 + default.replication.factor: 1 + min.insync.replicas: 1 + # Add these KRaft-specific configurations + inter.broker.listener.name: interbroker + listener.security.protocol.map: PLAINTEXT:PLAIN,SSL:SSL,INTERBROKER:SSL + entityOperator: + topicOperator: {} + userOperator: {} \ No newline at end of file diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/separatePools.yaml b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/separatePools.yaml new file mode 100644 index 000000000..87c554bcc --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/templates/separatePools.yaml @@ -0,0 +1,74 @@ +{{- if eq .Values.poolType "never" }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: controller + labels: + strimzi.io/cluster: {{ .Values.clusterName }} +spec: + replicas: 3 + roles: + - controller + {{- if not eq .Values.poolType "separatePools" }} + - broker + {{ end }} + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: 100Gi + kraftMetadata: shared + deleteClaim: false +--- +{{- if eq .Values.poolType "separatePools" }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: broker + labels: + strimzi.io/cluster: {{ .Values.clusterName }} +spec: + replicas: 3 + roles: + - broker + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: 100Gi + kraftMetadata: shared + deleteClaim: false +--- +{{- end }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{ .Values.clusterName }} + annotations: + strimzi.io/node-pools: enabled + strimzi.io/kraft: enabled +spec: + kafka: + version: 3.8.0 + metadataVersion: 3.8-IV0 + listeners: + - name: plain + port: 9092 + type: internal + tls: false + - name: tls + port: 9093 + type: internal + tls: true + config: + offsets.topic.replication.factor: 3 + transaction.state.log.replication.factor: 3 + transaction.state.log.min.isr: 2 + default.replication.factor: 3 + min.insync.replicas: 2 + entityOperator: + topicOperator: {} + userOperator: {} +{{- end }} \ No newline at end of file diff --git a/deployment/k8s/charts/sharedResources/baseKafkaCluster/values.yaml b/deployment/k8s/charts/sharedResources/baseKafkaCluster/values.yaml new file mode 100644 index 000000000..f9b47c0a3 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/baseKafkaCluster/values.yaml @@ -0,0 +1,7 @@ +replicas: 1 +storageType: ephemeral +storageSize: 100Gi +storageDeleteClaim: true +dedicatedController: + replicas: 1 + storageSize: 10Gi diff --git a/deployment/k8s/charts/sharedResources/helmCommon/.helmignore b/deployment/k8s/charts/sharedResources/helmCommon/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/charts/sharedResources/helmCommon/Chart.yaml b/deployment/k8s/charts/sharedResources/helmCommon/Chart.yaml new file mode 100644 index 000000000..d7f0027c2 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: helm-common +version: 0.1.0 +description: A common library for the Migration Assistant +type: library diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/_createConfigMapsForParameters.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/_createConfigMapsForParameters.tpl new file mode 100644 index 000000000..8940d6411 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/_createConfigMapsForParameters.tpl @@ -0,0 +1,56 @@ +{{- define "generic.createParameterConfigMap" }} +{{ $key := .Key }} +{{ $param := .Param }} +{{ $namePrefix := .Prefix }} +{{ $namespace := .NameSpace }} +{{- $weight := .Weight | default 0 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $namePrefix }}{{ kebabcase $key }}-default + namespace: {{ $namespace }} + labels: + type: default + annotations: + helm.sh/hook-weight: "{{ $weight }}" +data: + {{- if hasKey $param "value" }} + value: "{{ $param.value }}" + {{- else if hasKey $param "list" }} + list: | + {{- range $item := $param.list }} + - "{{ $item }}" + {{- end }} + {{- else if hasKey $param "list" }} + data: | + {{ $param.data | toYaml | indent 4 }} + {{- else }} + present: "true" + {{- end }} + +{{- if hasKey $param "allowRuntimeOverride" | ternary $param.allowRuntimeOverride true }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $namePrefix }}{{ kebabcase $key }} + namespace: {{ $namespace }} + labels: + type: override + annotations: + helm.sh/hook-weight: "{{ $weight }}" +data: {} # Empty configmap for user overrides +{{- end }} +{{- end -}} + +{{- define "generic.createConfigMaps" }} +{{- $outerCtx := . -}} +{{- $packageName := .PackageName -}} +{{- range $key, $param := .Parameters }} +{{- include "generic.createParameterConfigMap" (merge (dict + "Key" $key + "Param" $param + "Prefix" (printf "%s-" $packageName)) $outerCtx) }} +{{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/_createGlobalConfigMapsForParameters.tpl.yaml b/deployment/k8s/charts/sharedResources/helmCommon/templates/_createGlobalConfigMapsForParameters.tpl.yaml new file mode 100644 index 000000000..3dd86fcc0 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/_createGlobalConfigMapsForParameters.tpl.yaml @@ -0,0 +1,16 @@ +{{- define "generic.createCommonConfigs" -}} +{{- $fullname := include "generic.fullname" . -}} +{{- if not (lookup "v1" "ConfigMap" .Release.Namespace $fullname) }} +{{- $weight := (dig "weights" "sharedConfig" 0 (.Values | merge (dict))) }} + +{{- range $key, $param := .Values.globalParameters }} +{{ include "generic.createParameterConfigMap" (dict + "Weight" (dig "weights" "sharedConfigMaps" 0 (.Values | merge (dict))) + "Key" $key + "Param" $param + "NameSpace" (dig "namespace" "name" "default" (.Values | merge (dict)) ) +) }} +{{- end }} + +{{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/_helpers.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/_helpers.tpl new file mode 100644 index 000000000..35c4fbfbe --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/_helpers.tpl @@ -0,0 +1,15 @@ +{{/* +Create a default fully qualified app name. +*/}} +{{- define "generic.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupInitContainerToLoadEnvVariablesAndArgs.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupInitContainerToLoadEnvVariablesAndArgs.tpl new file mode 100644 index 000000000..b8c51d938 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupInitContainerToLoadEnvVariablesAndArgs.tpl @@ -0,0 +1,28 @@ +{{- define "generic.setupEnvLoadInitContainer" -}} +{{- $mountName := .MountName -}} +- name: arg-prep + image: migrations/k8s_config_map_util_scripts + imagePullPolicy: IfNotPresent + env: + {{- include "generic.pullEnvVarsFromConfigMaps" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "include" .Template.Include + "Template" .Template) | indent 4 }} + + command: + - /bin/sh + - -c + - | + {{- include "generic.buildArgsFromEnvVarParameters" (dict + "Parameters" .Values.parameters + "PackageName" (include "generic.fullname" .) + "PositionalArguments" .PositionalArguments + "include" .Template.Include + "Template" .Template) | nindent 6 }} + /.venv/bin/python print_env_vars_as_exports.py > /shared/vars.sh + + volumeMounts: + - name: {{ $mountName }} + mountPath: /shared +{{- end -}} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupServicesYaml.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupServicesYaml.tpl new file mode 100644 index 000000000..fb1520475 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/_setupServicesYaml.tpl @@ -0,0 +1,16 @@ +{{- define "generic.setupServicesYamlContainer" -}} +{{- $mountName := .MountName -}} +- name: service-yaml-agent + image: migrations/k8s_config_map_util_scripts + imagePullPolicy: IfNotPresent + restartPolicy: Always + volumeMounts: + - name: {{ $mountName }} + mountPath: /config + subPath: migration_services.yaml + command: + - /.venv/bin/python + - config_watcher.py + - "--namespace={{.Release.Namespace}}" + - "--outfile=/config/migration_services.yaml" +{{- end -}} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_buildArgsFromEnvVarParameters.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_buildArgsFromEnvVarParameters.tpl new file mode 100644 index 000000000..979068049 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_buildArgsFromEnvVarParameters.tpl @@ -0,0 +1,65 @@ +{{- define "generic.buildArgsFromEnvVarParameters" -}} + {{- $argsName := .ArgsVarName | default "ARGS" -}} + {{- $lines := list -}} + {{- $lines = append $lines "set -e" -}} + + {{- /* Default environment variables if not set */ -}} + {{- range $key, $param := .Parameters }} + {{- $envVarName := snakecase $key | upper -}} + {{- $lines = append $lines (printf "if [ -z \"$%s\" ]; then" $envVarName) -}} + {{- $lines = append $lines (printf " export %s=\"$%s_DEFAULT\"" $envVarName $envVarName) -}} + {{- $lines = append $lines "fi" -}} + {{- end }} + + {{- /* Construct the command based on parameter types */ -}} + {{- $keyToPositionMap := dict }} + {{- $positionalMap := dict }} + {{- if .PositionalArguments -}} + {{- range $i, $v := .PositionalArguments }} + {{- $_ := set $keyToPositionMap $v $i }} + {{- end }} + {{- end }} + + {{- range $key, $param := .Parameters }} + {{- $envVarName := snakecase $key | upper -}} + {{- $formattedKeyFlagName := "" -}} + {{- if hasKey $keyToPositionMap $key -}} + {{- $positionalMap = merge $positionalMap (dict (get $keyToPositionMap $key) $envVarName) -}} + {{- else -}} + {{- $formattedKeyFlagName = printf " --%s " $key -}} + {{- end -}} + + {{- if hasKey $param "value" -}} + {{- if not (eq "" $formattedKeyFlagName) -}} + {{- $lines = append $lines (printf "if [ -n \"$%s\" ]; then" $envVarName) -}} + {{- $lines = append $lines (printf " export %s=\"$%s %s $%s\"" $argsName $argsName $formattedKeyFlagName $envVarName) -}} + {{- $lines = append $lines (printf "fi") -}} + {{- end -}} + {{- else if hasKey $param "list" -}} + {{- $lines = append $lines (printf "if [ -n \"$%s\" ]; then" $envVarName) -}} + {{- $lines = append $lines (printf " LIST_ITEMS=$(echo \"$%s\" | yq eval '.[ ]' - | xargs -I{} echo -n \"{} \")" $envVarName $envVarName) -}} + {{- if not (eq "" $formattedKeyFlagName) -}} + {{- $lines = append $lines (printf " export %s=\"$%s %s $LIST_ITEMS\"" $argsName $argsName $formattedKeyFlagName) -}} + {{- end -}} + {{- $lines = append $lines (printf "fi") -}} + {{- else if hasKey $param "present" -}} + {{- $lines = append $lines (printf "if [ \"$%s\" = \"true\" ] || [ \"$%s\" = \"1\" ]; then" $envVarName $envVarName) -}} + {{- if eq "" $formattedKeyFlagName -}} + {{ fail (printf "Got key %s as a boolean type ('present') and it is also specified as positional" $key) }} + {{- end -}} + {{- $lines = append $lines (printf " export %s=\"$%s %s\"" $argsName $argsName $formattedKeyFlagName) -}} + {{- $lines = append $lines (printf "fi") -}} + {{- else -}} + {{ fail (printf "Key %s did no specify 'value', 'list', or 'present' binding" $key) }} + {{- end -}} + {{- end -}} + + {{- if len $positionalMap -}} + {{- $orderedArgs := "" }} + {{- range $i := until (len $positionalMap) }} + {{- $orderedArgs = printf "%s $%s" $orderedArgs (get $positionalMap (toString $i)) -}} + {{- end -}} + {{- $lines = append $lines (printf "export %s=\"%s $%s\"" $argsName $orderedArgs $argsName) -}} + {{- end }} + {{- join "\n" $lines -}} +{{- end -}} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_pullEnvVariablesFromConfigMap.tpl b/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_pullEnvVariablesFromConfigMap.tpl new file mode 100644 index 000000000..643acd215 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/templates/initContainerToLoadEnvScript/_pullEnvVariablesFromConfigMap.tpl @@ -0,0 +1,20 @@ +{{- define "generic.pullEnvVarsFromConfigMaps" }} +{{- $packageName := .PackageName -}} +{{- range $sourceKey, $param := .Parameters -}} +{{- $configMapKey := printf "%s-%s" $packageName (kebabcase $sourceKey) }} +{{- $envName := ( snakecase $sourceKey | upper) }} +- name: {{ $envName }}_DEFAULT + valueFrom: + configMapKeyRef: + name: {{ $configMapKey }}-default + key: {{ if hasKey $param "value" }}value{{ else if hasKey $param "list" }}list{{ else }}present{{ end }} {{/*TODO be explicit*/}} +{{- if hasKey $param "allowRuntimeOverride" | ternary $param.allowRuntimeOverride true }} +- name: {{ $envName }} + valueFrom: + configMapKeyRef: + name: {{ $configMapKey }} + key: {{ if hasKey $param "value" }}value{{ else if hasKey $param "list" }}list{{ else }}present{{ end }} + optional: true +{{- end }} +{{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/helmCommon/values.yaml b/deployment/k8s/charts/sharedResources/helmCommon/values.yaml new file mode 100644 index 000000000..ecf2fa7b1 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/helmCommon/values.yaml @@ -0,0 +1,7 @@ +weights: + sharedConfigMaps: 0 +globalParameters: + foo: + value: "Foo" + bar: + value: "bar" diff --git a/deployment/k8s/charts/sharedResources/logsVolume/.helmignore b/deployment/k8s/charts/sharedResources/logsVolume/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/logsVolume/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/charts/sharedResources/logsVolume/Chart.yaml b/deployment/k8s/charts/sharedResources/logsVolume/Chart.yaml new file mode 100644 index 000000000..0a6df6f1c --- /dev/null +++ b/deployment/k8s/charts/sharedResources/logsVolume/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: logs-volume +description: A utility Helm chart to create a shared logs volume +type: application +version: 0.1.0 +appVersion: "1.16.0" diff --git a/deployment/k8s/charts/sharedResources/logsVolume/templates/pvc.yml b/deployment/k8s/charts/sharedResources/logsVolume/templates/pvc.yml new file mode 100644 index 000000000..4cffcffa4 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/logsVolume/templates/pvc.yml @@ -0,0 +1,17 @@ +# Only create PVC, if PVC with same name does not already exist +{{- $pvc := lookup "v1" "PersistentVolumeClaim" .Release.Namespace .Values.volume.pvcClaimName -}} +{{- if not $pvc }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.volume.pvcClaimName }} +spec: + accessModes: + - {{ .Values.volume.accessMode }} + resources: + requests: + storage: {{ .Values.volume.size }} + {{- if .Values.volume.storageClass }} + storageClassName: {{ .Values.volume.storageClass }} + {{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/logsVolume/values.yaml b/deployment/k8s/charts/sharedResources/logsVolume/values.yaml new file mode 100644 index 000000000..9c61a3b44 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/logsVolume/values.yaml @@ -0,0 +1,8 @@ +volume: + volumeName: shared-logs + mountPath: /shared-logs-output # Path inside the container where the volume should be mounted + # Persistent volume claim settings + pvcClaimName: shared-logs-pvc + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi diff --git a/deployment/k8s/charts/sharedResources/sharedConfigs/Chart.yaml b/deployment/k8s/charts/sharedResources/sharedConfigs/Chart.yaml new file mode 100644 index 000000000..a39d1c09d --- /dev/null +++ b/deployment/k8s/charts/sharedResources/sharedConfigs/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: shared-configs +description: A utility Helm chart to hold the configmaps for the common settings between all applications and the migration console. +type: application +version: 0.1.0 +appVersion: "1.16.0" diff --git a/deployment/k8s/charts/sharedResources/sharedConfigs/templates/configMaps.yaml b/deployment/k8s/charts/sharedResources/sharedConfigs/templates/configMaps.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/deployment/k8s/charts/sharedResources/sharedConfigs/values.yaml b/deployment/k8s/charts/sharedResources/sharedConfigs/values.yaml new file mode 100644 index 000000000..fe8a6c243 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/sharedConfigs/values.yaml @@ -0,0 +1,4 @@ +parameters: + sourceCluster: +# value: foo + allowRuntimeOverride: true \ No newline at end of file diff --git a/deployment/k8s/charts/sharedResources/snapshotVolume/.helmignore b/deployment/k8s/charts/sharedResources/snapshotVolume/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/snapshotVolume/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/k8s/charts/sharedResources/snapshotVolume/Chart.yaml b/deployment/k8s/charts/sharedResources/snapshotVolume/Chart.yaml new file mode 100644 index 000000000..2ae651bca --- /dev/null +++ b/deployment/k8s/charts/sharedResources/snapshotVolume/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: snapshot-volume +description: A utility Helm chart to create a storage volume for snapshots +type: application +version: 0.1.0 +appVersion: "1.16.0" diff --git a/deployment/k8s/charts/sharedResources/snapshotVolume/templates/pvc.yml b/deployment/k8s/charts/sharedResources/snapshotVolume/templates/pvc.yml new file mode 100644 index 000000000..4cffcffa4 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/snapshotVolume/templates/pvc.yml @@ -0,0 +1,17 @@ +# Only create PVC, if PVC with same name does not already exist +{{- $pvc := lookup "v1" "PersistentVolumeClaim" .Release.Namespace .Values.volume.pvcClaimName -}} +{{- if not $pvc }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.volume.pvcClaimName }} +spec: + accessModes: + - {{ .Values.volume.accessMode }} + resources: + requests: + storage: {{ .Values.volume.size }} + {{- if .Values.volume.storageClass }} + storageClassName: {{ .Values.volume.storageClass }} + {{- end }} +{{- end }} diff --git a/deployment/k8s/charts/sharedResources/snapshotVolume/values.yaml b/deployment/k8s/charts/sharedResources/snapshotVolume/values.yaml new file mode 100644 index 000000000..3729b0678 --- /dev/null +++ b/deployment/k8s/charts/sharedResources/snapshotVolume/values.yaml @@ -0,0 +1,8 @@ +volume: + volumeName: snapshot-volume + mountPath: /storage # Path inside the container where the volume should be mounted + # Persistent volume claim settings + pvcClaimName: snapshot-volume-pvc + storageClass: "" + accessMode: ReadWriteOnce + size: 5Gi diff --git a/deployment/k8s/charts/tests/testConsole/Chart.yaml b/deployment/k8s/charts/tests/testConsole/Chart.yaml new file mode 100644 index 000000000..275c3555d --- /dev/null +++ b/deployment/k8s/charts/tests/testConsole/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: test-console +description: A Helm chart to install a test pod based upon the Migration Console container +type: application +version: 0.1.0 +appVersion: "1.16.0" +dependencies: + - name: helm-common + repository: file://../../sharedResources/helmCommon + version: 0.1.0 diff --git a/deployment/k8s/charts/tests/testConsole/templates/deployment.yaml b/deployment/k8s/charts/tests/testConsole/templates/deployment.yaml new file mode 100644 index 000000000..86eceda6b --- /dev/null +++ b/deployment/k8s/charts/tests/testConsole/templates/deployment.yaml @@ -0,0 +1,46 @@ +{{ $envVarMountName := "env-vars" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "generic.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "generic.fullname" . }} + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: {{ include "generic.fullname" . }} + env: v1 + spec: + serviceAccountName: configmap-watcher + initContainers: + {{- include "generic.setupEnvLoadInitContainer" (merge (dict + "MountName" $envVarMountName + "include" .Template.Include) .) | nindent 8 }} + {{- include "generic.setupServicesYamlContainer" (merge (dict + "MountName" "merged-config" + "include" .Template.Include) .) | nindent 8 }} + containers: + - name: test-console + image: migrations/migration_console:latest + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-c" + - | + tail -f /dev/null + volumeMounts: + - name: {{ $envVarMountName }} + mountPath: /shared2 + - name: merged-config + mountPath: /etc + subPath: migration_services.yaml + volumes: + - name: {{ $envVarMountName }} + emptyDir: { } + - name: merged-config + emptyDir: { } diff --git a/deployment/k8s/charts/tests/testConsole/templates/rbac.yaml b/deployment/k8s/charts/tests/testConsole/templates/rbac.yaml new file mode 100644 index 000000000..53b82f381 --- /dev/null +++ b/deployment/k8s/charts/tests/testConsole/templates/rbac.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-watcher-test +subjects: + - kind: ServiceAccount + name: configmap-watcher +roleRef: + kind: Role + name: configmap-watcher + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deployment/k8s/charts/valueGenerator/Chart.yaml b/deployment/k8s/charts/valueGenerator/Chart.yaml new file mode 100644 index 000000000..39b95548f --- /dev/null +++ b/deployment/k8s/charts/valueGenerator/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +description: "This chart deploys nothing and is only meant to be used to consume fragments of yaml and root them into an aggregated file" +name: valueGenerator +version: 0.1.0 diff --git a/deployment/k8s/charts/valueGenerator/templates/values.yaml.tpl b/deployment/k8s/charts/valueGenerator/templates/values.yaml.tpl new file mode 100644 index 000000000..ce0d701e1 --- /dev/null +++ b/deployment/k8s/charts/valueGenerator/templates/values.yaml.tpl @@ -0,0 +1,11 @@ +{{- define "values" -}} +{{- range $key, $value := .Values }} +{{ $key }}: +{{- if kindIs "string" $value }} +{{- $value | fromYaml | toYaml | nindent 2 }} +{{- else }} +{{- $value | toYaml | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} +{{ include "values" . }} diff --git a/deployment/k8s/linkSubChartsToDependencies.sh b/deployment/k8s/linkSubChartsToDependencies.sh new file mode 100755 index 000000000..6e641a186 --- /dev/null +++ b/deployment/k8s/linkSubChartsToDependencies.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# Function to create symlinks for a chart's dependencies +process_chart() { + local chart_dir="$1" + local chart_yaml="$chart_dir/Chart.yaml" + + # Check if Chart.yaml exists + if [[ ! -f "$chart_yaml" ]]; then + return + fi + + echo "Processing $chart_yaml..." + + # Find file:// dependencies using yq + local deps=$(yq e '.dependencies[] | select(.repository | test("^file://")) | [.name, .repository] | @csv' "$chart_yaml") + + # Check if there are any dependencies + if [ -z "$deps" ]; then + echo "No file:// dependencies found in $chart_yaml" + return + fi + + # Create charts directory if it doesn't exist + mkdir -p "$chart_dir/charts" + + + while IFS=, read -r name repo; do + # Clean up the quotes and file:// prefix + name=$(echo "$name" | tr -d '"') + repo=$(echo "$repo" | tr -d '"' | sed 's|file://||') + + # Convert relative path to absolute based on Chart.yaml location + local abs_repo="$(cd "$(dirname "$chart_yaml")/$repo"; pwd)" + + # Get the actual chart name from the target's Chart.yaml + local target_name=$(yq e '.name' "$abs_repo/Chart.yaml") + local link_path="$chart_dir/charts/$target_name" + + echo "Creating symlink for $target_name (from dependency $name): $abs_repo -> $link_path" + + # Remove existing symlink if it exists + [ -L "$link_path" ] && rm "$link_path" + + # Create the symlink with absolute path + ln -s "$abs_repo" "$link_path" + + done <<< "$deps" + + echo "Running helm dependency build for $chart_dir..." + helm dependency build "$chart_dir" + + # Remove any tgz files that were created + rm -f "$chart_dir/charts"/*.tgz +} + +# Find all Chart.yaml files and process them +find . -name Chart.yaml -exec dirname {} \; | while read -r chart_dir; do + process_chart "$chart_dir" +done \ No newline at end of file diff --git a/deployment/k8s/localTesting.sh b/deployment/k8s/localTesting.sh new file mode 100755 index 000000000..9df297808 --- /dev/null +++ b/deployment/k8s/localTesting.sh @@ -0,0 +1,30 @@ +minikube start +eval $(minikube docker-env) + +helm dependency build charts/aggregates/mockCustomerClusters +./linkSubChartsToDependencies.sh charts/aggregates/mockCustomerClusters +helm install mcc -n mcc charts/aggregates/mockCustomerClusters --create-namespace + + +helm dependency build charts/aggregates/migrationAssistant +./linkSubChartsToDependencies.sh charts/aggregates/migrationAssistant +helm install ma -n ma charts/aggregates/migrationAssistant --create-namespace + +# Test with +# kc exec -n ma -it migration-console-7c846764b8-zvf6w -- curl https://opensearch-cluster-master.mcc:9200/ -u admin:myStrongPassword123! --insecure + +kubectl port-forward service/capture-proxy 9200:9200 & +kubectl port-forward service/elasticsearch 19200:9200 & +kubectl port-forward service/opensearch 29200:9200 & + +# kubectl get secret observability-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo +kc port-forward service/observability-grafana 3000:80 + +# this hasn't been tested recently and will be folded into packages + +# just the operator, not any clusters +helm install strimzi-cluster-operator --set replicas=1 --version 0.43.0 oci://quay.io/strimzi-helm/strimzi-kafka-operator +helm install capture-traffic-kafka-cluster ./capturedTrafficKafkaCluster --set environment=test +helm install replayer ./replayer + +helm install target opensearch/opensearch --version 2.21.0 --values ChartValues/localtesting/opensearchTarget.yaml diff --git a/deployment/k8s/minikubeLocal.sh b/deployment/k8s/minikubeLocal.sh new file mode 100755 index 000000000..7dd1d2eeb --- /dev/null +++ b/deployment/k8s/minikubeLocal.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# To have running during local development + +usage() { + echo "Usage: $0 [--start | --pause | --delete]" + exit 1 +} + +kill_minikube_processes() { + mount_process_id=$(pgrep -f "minikube mount") + if [ -n "$mount_process_id" ]; then + kill "$mount_process_id" + fi + tunnel_process_id=$(pgrep -f "minikube tunnel") + if [ -n "$tunnel_process_id" ]; then + kill "$tunnel_process_id" + fi +} + +start() { + helm repo add opensearch-operator https://opensearch-project.github.io/opensearch-k8s-operator/ + helm repo add strimzi https://strimzi.io/charts/ + + minikube start + minikube mount .:/opensearch-migrations > /dev/null 2>&1 & + minikube tunnel > /dev/null 2>&1 & +} + +pause() { + kill_minikube_processes + minikube pause +} + +delete() { + kill_minikube_processes + minikube delete +} + +# Check if the script was called with no arguments +if [ $# -eq 0 ]; then + usage +fi + +# Allow executing this script from any dir +script_abs_path=$(readlink -f "$0") +script_dir_abs_path=$(dirname "$script_abs_path") +cd "$script_dir_abs_path" || exit + +cd ../.. || exit + +# Parse the argument and call the appropriate function +case "$1" in + --start) + start + ;; + --pause) + pause + ;; + --delete) + delete + ;; + *) + echo "Invalid option: $1" + usage + ;; +esac + diff --git a/deployment/k8s/testRunners/configureMinikubeAndRun.sh b/deployment/k8s/testRunners/configureMinikubeAndRun.sh new file mode 100755 index 000000000..b2364f865 --- /dev/null +++ b/deployment/k8s/testRunners/configureMinikubeAndRun.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +if [ -z "$MINIKUBE_ACTIVE_DOCKERD" ]; then + minikube start + eval $(minikube docker-env) + ../../cdk/opensearch-service-migration/buildDockerImages.sh + + kubectl create namespace ma + kubectl create namespace mcc +fi + +"$@" \ No newline at end of file diff --git a/deployment/k8s/testRunners/installHelmChartsAndRun.sh b/deployment/k8s/testRunners/installHelmChartsAndRun.sh new file mode 100755 index 000000000..257bd2e3c --- /dev/null +++ b/deployment/k8s/testRunners/installHelmChartsAndRun.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +ORIGINAL_DIR=$(pwd) +cd "$(dirname "$0")/../" || exit + +helm install -n mcc mcc charts/aggregates/mockCustomerClusters +if [ $? -eq 0 ]; then + echo "installed mockCustomerClusters in 'mcc' namespace" +else + echo Rebuilding dependency + helm dependency build charts/aggregates/mockCustomerClusters + helm install -n mcc mcc charts/aggregates/mockCustomerClusters +fi + +helm install -n ma ma charts/aggregates/migrationAssistant +if [ $? -eq 0 ]; then + echo "installed mockCustomerClusters in 'ma' namespace" +else + echo Rebuilding dependency + helm dependency build charts/aggregates/migrationAssistant + helm install -n ma ma charts/aggregates/migrationAssistant +fi + +helm install -n ma ma charts/tests/testConsole +if [ $? -eq 0 ]; then + echo "installed testConsole in 'mcc' namespace" +else + echo Rebuilding dependency + helm dependency build charts/tests/testConsole + helm install -n ma ma charts/tests/testConsole +fi + +cd $ORIGINAL_DIR || exit + +migration_pod=$(kubectl get pods -n ma -l app=migration-console --field-selector status.phase=Running -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) +export migration_pod + +"$@" + +# Final cleanup so that future runs have a clean environment +#helm delete -n ma ma +#helm delete -n mcc mcc