From 9784f74e0dea0a97b01bf73cef8c795f99112551 Mon Sep 17 00:00:00 2001 From: Mohit Bisht Date: Thu, 16 Jan 2025 07:49:01 -0500 Subject: [PATCH] [SMB][Automation]CLI Command Signed-off-by: Mohit Bisht --- cli/ceph/smb/dump.py | 34 +++++++ cli/ceph/smb/smb.py | 2 + suites/squid/smb/tier-1-smb.yaml | 91 +++++++++++++++++ tests/smb/smb_cli_support.py | 170 +++++++++++++++++++++++++++++++ 4 files changed, 297 insertions(+) create mode 100644 cli/ceph/smb/dump.py create mode 100644 tests/smb/smb_cli_support.py diff --git a/cli/ceph/smb/dump.py b/cli/ceph/smb/dump.py new file mode 100644 index 000000000..2085ce369 --- /dev/null +++ b/cli/ceph/smb/dump.py @@ -0,0 +1,34 @@ +from cli import Cli +from cli.utilities.utils import build_cmd_from_args + + +class Dump(Cli): + """This module provides CLI interface for smb dump related operations""" + + def __init__(self, nodes, base_cmd): + super(Dump, self).__init__(nodes) + self.base_cmd = f"{base_cmd} dump" + + def cluster_config(self, cluster_id, **kw): + """Dump smb cluster config + + Args: + cluster_id (str): A short string uniquely identifying the cluster + """ + cmd = f"{self.base_cmd} cluster-config {cluster_id} {build_cmd_from_args(**kw)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out + + def service_spec(self, cluster_id, **kw): + """Dump smb cluster service spec + + Args: + cluster_id (str): A short string uniquely identifying the cluster + """ + cmd = f"{self.base_cmd} service-spec {cluster_id} {build_cmd_from_args(**kw)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out diff --git a/cli/ceph/smb/smb.py b/cli/ceph/smb/smb.py index db7d59d46..abb587bfd 100644 --- a/cli/ceph/smb/smb.py +++ b/cli/ceph/smb/smb.py @@ -3,6 +3,7 @@ from .apply import Apply from .cluster import Cluster +from .dump import Dump from .share import Share @@ -15,6 +16,7 @@ def __init__(self, nodes, base_cmd=""): self.cluster = Cluster(nodes, self.base_cmd) self.share = Share(nodes, self.base_cmd) self.apply = Apply(nodes, self.base_cmd) + self.dump = Dump(nodes, self.base_cmd) def show(self, resource_names, **kw): """ diff --git a/suites/squid/smb/tier-1-smb.yaml b/suites/squid/smb/tier-1-smb.yaml index 80dd3db94..eb7c74a95 100644 --- a/suites/squid/smb/tier-1-smb.yaml +++ b/suites/squid/smb/tier-1-smb.yaml @@ -425,3 +425,94 @@ tests: smb_shares: [share1, share2] path: "/" earmark: smb + + - test: + name: Verify that the smb cli cluster listing command correctly lists all SMB clusters. + desc: Verify that the smb cli cluster listing command correctly lists all SMB clusters. + module: smb_cli_support.py + polarion-id: CEPH-83605168 + config: + cephfs_volume: cephfs + smb_subvolume_group: smb + smb_subvolumes: [sv1] + smb_subvolume_mode: '0777' + smb_cluster_id: smb1 + auth_mode: user + smb_user_name: user1 + smb_user_password: passwd + smb_shares: [share1] + path: "/" + cli_cmd: "cluster ls" + + - test: + name: Verify that the smb cli share listing command correctly lists all SMB shares. + desc: Verify that the smb cli cluster listing command correctly lists all SMB shares. + module: smb_cli_support.py + polarion-id: CEPH-83605169 + config: + cephfs_volume: cephfs + smb_subvolume_group: smb + smb_subvolumes: [sv1] + smb_subvolume_mode: '0777' + smb_cluster_id: smb1 + auth_mode: user + smb_user_name: user1 + smb_user_password: passwd + smb_shares: [share1] + path: "/" + cli_cmd: "share ls" + + - test: + name: Verify that the smb cli show command correctly show SMB resource. + desc: Verify that the smb cli show command correctly show SMB resource. + module: smb_cli_support.py + polarion-id: CEPH-83605170 + config: + cephfs_volume: cephfs + smb_subvolume_group: smb + smb_subvolumes: [sv1] + smb_subvolume_mode: '0777' + smb_cluster_id: smb1 + auth_mode: user + smb_user_name: user1 + smb_user_password: passwd + smb_shares: [share1] + path: "/" + cli_cmd: "show" + resource_names: "ceph.smb.cluster" + + - test: + name: Verify that the smb cli dump cluster-config command correctly give cluster-config. + desc: Verify that the smb cli dump cluster-config command correctly give cluster-config. + module: smb_cli_support.py + polarion-id: CEPH-83605171 + config: + cephfs_volume: cephfs + smb_subvolume_group: smb + smb_subvolumes: [sv1] + smb_subvolume_mode: '0777' + smb_cluster_id: smb1 + auth_mode: user + smb_user_name: user1 + smb_user_password: passwd + smb_shares: [share1] + path: "/" + cli_cmd: "dump cluster-config" + + - test: + name: Verify that the smb cli dump service-spec command correctly give service-spec details. + desc: Verify that the smb cli dump service-spec command correctly give service-spec details. + module: smb_cli_support.py + polarion-id: CEPH-83605173 + config: + cephfs_volume: cephfs + smb_subvolume_group: smb + smb_subvolumes: [sv1] + smb_subvolume_mode: '0777' + smb_cluster_id: smb1 + auth_mode: user + smb_user_name: user1 + smb_user_password: passwd + smb_shares: [share1] + path: "/" + cli_cmd: "dump service-spec" diff --git a/tests/smb/smb_cli_support.py b/tests/smb/smb_cli_support.py new file mode 100644 index 000000000..8af386f1d --- /dev/null +++ b/tests/smb/smb_cli_support.py @@ -0,0 +1,170 @@ +import json + +from smb_operations import ( + check_ctdb_health, + check_rados_clustermeta, + deploy_smb_service_imperative, + smb_cleanup, + smbclient_check_shares, +) + +from ceph.ceph_admin import CephAdmin +from cli.cephadm.cephadm import CephAdm +from utility.log import Log + +log = Log(__name__) + + +def run(ceph_cluster, **kw): + """Deploy samba with auth_mode 'user' using imperative style(CLI Commands) + Args: + **kw: Key/value pairs of configuration information to be used in the test + """ + # Get config + config = kw.get("config") + + # Get cephadm obj + cephadm = CephAdmin(cluster=ceph_cluster, **config) + + # Get cephfs volume + cephfs_vol = config.get("cephfs_volume", "cephfs") + + # Get smb subvloume group + smb_subvol_group = config.get("smb_subvolume_group", "smb") + + # Get smb subvloumes + smb_subvols = config.get("smb_subvolumes", ["sv1", "sv2"]) + + # Get smb subvolume mode + smb_subvolume_mode = config.get("smb_subvolume_mode", "0777") + + # Get smb cluster id + smb_cluster_id = config.get("smb_cluster_id", "smb1") + + # Get auth_mode + auth_mode = config.get("auth_mode", "user") + + # Get domain_realm + domain_realm = config.get("domain_realm", None) + + # Get custom_dns + custom_dns = config.get("custom_dns", None) + + # Get smb user name + smb_user_name = config.get("smb_user_name", "user1") + + # Get smb user password + smb_user_password = config.get("smb_user_password", "passwd") + + # Get smb shares + smb_shares = config.get("smb_shares", ["share1", "share2"]) + + # Get smb path + path = config.get("path", "/") + + # Get installer node + installer = ceph_cluster.get_nodes(role="installer")[0] + + # Get smb nodes + smb_nodes = ceph_cluster.get_nodes("smb") + + # Get client node + client = ceph_cluster.get_nodes(role="client")[0] + + # Check ctdb clustering + clustering = config.get("clustering", "default") + + # Get cli command + cli_cmd = config.get("cli_cmd", "cluster ls") + + # Get smb resource_names + resource_names = config.get("resource_names", "ceph.smb.cluster") + + try: + # deploy smb services + deploy_smb_service_imperative( + installer, + cephfs_vol, + smb_subvol_group, + smb_subvols, + smb_subvolume_mode, + smb_cluster_id, + auth_mode, + smb_user_name, + smb_user_password, + smb_shares, + path, + domain_realm, + custom_dns, + clustering, + ) + # Verify ctdb clustering + if clustering != "never": + # check samba clustermeta in rados + if not check_rados_clustermeta(cephadm, smb_cluster_id, smb_nodes): + log.error("rados clustermeta for samba not found") + return 1 + # Verify CTDB health + if not check_ctdb_health(smb_nodes, smb_cluster_id): + log.error("ctdb health error") + return 1 + # Check smb share using smbclient + smbclient_check_shares( + smb_nodes, + client, + smb_shares, + smb_user_name, + smb_user_password, + auth_mode, + domain_realm, + ) + # Verify cli command + if cli_cmd == "cluster ls": + out = ( + CephAdm(installer) + .ceph.smb.cluster.ls() + .strip() + .strip("[]") + .replace('"', "") + .split() + ) + if smb_cluster_id not in out: + log.error("smb cli cluster listing command not working as expected") + return 1 + elif cli_cmd == "share ls": + out = ( + CephAdm(installer) + .ceph.smb.share.ls(smb_cluster_id) + .strip() + .strip("[]") + .replace('"', "") + .split() + ) + if smb_shares != out: + log.error("smb cli share listing command not working as expected") + return 1 + elif cli_cmd == "show": + out = json.loads(CephAdm(installer).ceph.smb.show(resource_names)) + if smb_cluster_id != out["cluster_id"]: + log.error("smb cli show command not working as expected") + return 1 + elif cli_cmd == "dump cluster-config": + out = json.loads( + CephAdm(installer).ceph.smb.dump.cluster_config(smb_cluster_id) + ) + if smb_shares != out["configs"][smb_cluster_id]["shares"]: + log.error("smb cli dump command not working as expected") + return 1 + elif cli_cmd == "dump service-spec": + out = json.loads( + CephAdm(installer).ceph.smb.dump.service_spec(smb_cluster_id) + ) + if smb_cluster_id != out["service_id"]: + log.error("smb cli dump command not working as expected") + return 1 + except Exception as e: + log.error(f"Failed to deploy samba with auth_mode 'user' : {e}") + return 1 + finally: + smb_cleanup(installer, smb_shares, smb_cluster_id) + return 0