From 3561c3518863f01d3b224c0d934c887903fc13f2 Mon Sep 17 00:00:00 2001 From: Vaibhav Mahajan Date: Thu, 16 Jan 2025 22:26:39 +0530 Subject: [PATCH] [Conf] Add cluster configuration for single site regression suites Cluster conf includes below ceph components - 1. Ceph Nodes - 13 2. Client Nodes - 04 3. mon - 05 4. mgr - 02 5. mds - 05 6. nfs - 05 7. rgw - 03 8. osd - 10 (+2 backup) 9. alertmanager, grafana, node-exporter, prometheus - 01 Signed-off-by: Vaibhav Mahajan --- ...13node-4client-single-site-regression.yaml | 120 +++++++++++++++ .../single-site-deploy-and-configure.yaml | 145 ++++++++++++++++++ 2 files changed, 265 insertions(+) create mode 100644 conf/squid/common/13node-4client-single-site-regression.yaml create mode 100644 suites/squid/common/regression/single-site-deploy-and-configure.yaml diff --git a/conf/squid/common/13node-4client-single-site-regression.yaml b/conf/squid/common/13node-4client-single-site-regression.yaml new file mode 100644 index 000000000..72ab01932 --- /dev/null +++ b/conf/squid/common/13node-4client-single-site-regression.yaml @@ -0,0 +1,120 @@ +# Single site cluster with 13 Ceph & 4 Client nodes +# with daemons - 13 crash, 5 mon, 3 mgr, 3 rgw +# 5 nfs, 10 (+2 backup) osds and +# 1 alertmanager, node-exporter, prometheus, grafana +globals: + - ceph-cluster: + name: ceph + node1: + role: + - _admin + - installer + - crash + - alertmanager + - mon + node2: + role: + - crash + - rgw + - nfs + - osd + no-of-volumes: 4 + disk-size: 20 + node3: + role: + - crash + - rgw + - nfs + - osd + no-of-volumes: 4 + disk-size: 20 + node4: + role: + - crash + - rgw + - nfs + - osd + no-of-volumes: 4 + disk-size: 20 + node5: + role: + - crash + - nfs + - osd + no-of-volumes: 4 + disk-size: 20 + node6: + role: + - crash + - nfs + - osd + no-of-volumes: 4 + disk-size: 20 + node7: + role: + - crash + - node-exporter + - mds + - osd + no-of-volumes: 4 + disk-size: 20 + node8: + role: + - crash + - prometheus + - mds + - osd + no-of-volumes: 4 + disk-size: 20 + node9: + role: + - crash + - grafana + - mds + - osd + no-of-volumes: 4 + disk-size: 20 + node10: + role: + - crash + - mon + - mds + - osd + no-of-volumes: 4 + disk-size: 20 + node11: + role: + - crash + - mon + - mds + - osd + no-of-volumes: 4 + disk-size: 20 + node12: + role: + - crash + - mon + - mgr + - osd-bak + no-of-volumes: 4 + disk-size: 20 + node13: + role: + - crash + - mon + - mgr + - osd-bak + no-of-volumes: 4 + disk-size: 20 + node14: + role: + - client + node15: + role: + - client + node16: + role: + - client + node17: + role: + - client diff --git a/suites/squid/common/regression/single-site-deploy-and-configure.yaml b/suites/squid/common/regression/single-site-deploy-and-configure.yaml new file mode 100644 index 000000000..81077d65d --- /dev/null +++ b/suites/squid/common/regression/single-site-deploy-and-configure.yaml @@ -0,0 +1,145 @@ +tests: + - test: + name: Setup pre-requisites + desc: Setup packages and configuration for cluster deployment + module: install_prereq.py + abort-on-fail: true + + - test: + name: Deploy cluster using cephadm + desc: Bootstrap and deploy services + polarion-id: CEPH-83573713 + module: test_cephadm.py + config: + steps: + - config: + service: cephadm + command: bootstrap + args: + mon-ip: node1 + - config: + service: host + command: add_hosts + args: + attach_ip_address: true + labels: apply-all-labels + - config: + service: osd + command: apply + args: + all-available-devices: true + - config: + service: rgw + command: apply + pos_args: + - rgw.1 + args: + placement: + label: rgw + - config: + command: shell + args: + - "ceph fs volume create cephfs" + - config: + service: mds + command: apply + args: + placement: + label: mds + base_cmd_args: + verbose: true + pos_args: + - cephfs + - config: + command: shell + args: + - "ceph osd pool create rbd" + - config: + command: shell + args: + - "rbd pool init rbd" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.1 + node: node14 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.2 + node: node15 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.3 + node: node16 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.4 + node: node17 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true