Skip to content

Commit

Permalink
Common Test File for all CephFS Volume Ops
Browse files Browse the repository at this point in the history
Signed-off-by: Hemanth <[email protected]>
  • Loading branch information
hkadam134 committed Jan 20, 2025
1 parent 48ebf07 commit 659032d
Show file tree
Hide file tree
Showing 10 changed files with 370 additions and 180 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,12 +331,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
12 changes: 3 additions & 9 deletions suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -307,12 +307,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -477,8 +471,8 @@ tests:
name: "nfs_multiple_export_using_single_conf"
polarion-id: "CEPH-83575082"
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
abort-on-fail: true
desc: Test for validating all CephFS Volume Operations
abort-on-fail: false
6 changes: 0 additions & 6 deletions suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
12 changes: 3 additions & 9 deletions suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -307,12 +307,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -477,8 +471,8 @@ tests:
name: "nfs_multiple_export_using_single_conf"
polarion-id: "CEPH-83575082"
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
abort-on-fail: true
desc: Test for validating all CephFS Volume Operations
abort-on-fail: false
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
10 changes: 2 additions & 8 deletions suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -458,12 +458,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -495,8 +489,8 @@ tests:
desc: cephfs subvolume idempoence earmark
abort-on-fail: false
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
desc: Test for validating all CephFS Volume Operations
abort-on-fail: false
81 changes: 52 additions & 29 deletions tests/cephfs/cephfs_utilsV1.py
Original file line number Diff line number Diff line change
Expand Up @@ -1297,58 +1297,81 @@ def remove_nfs_export(
return cmd_out, cmd_rc

def create_osd_pool(
self,
client,
pool_name,
pg_num=None,
pgp_num=None,
erasure=False,
validate=True,
**kwargs,
client,
pool_name,
pg_num=64,
pgp_num=None,
erasure=False,
validate=True,
**kwargs,
):
"""
Creates an OSD pool with given arguments.
It supports the following optional arguments:
Supports optional arguments for customization.
Args:
client:
pool_name:
pg_num: int
pgp_num: int
erasure: bool
validate: bool
client: Ceph client
pool_name: Name of the pool to create
pg_num: Number of placement groups (default: 64)
pgp_num: Number of placement groups for placement (optional)
erasure: Whether to create an erasure-coded pool (default: False)
validate: Whether to validate the pool creation (default: True)
**kwargs:
erasure_code_profile: str
crush_rule_name: str
expected_num_objects: int
autoscale_mode: str (on, off, warn)
check_ec: bool (default: True)
erasure_code_profile: Erasure code profile name
crush_rule_name: CRUSH rule name
expected_num_objects: Expected number of objects
autoscale_mode: Autoscale mode (on, off, warn)
check_ec: Whether to check the execution code (default: True)
Returns:
Returns the cmd_out and cmd_rc for the create command.
Tuple containing the command output and return code.
"""
# Additional configuration for erasure-coded pools
if erasure:
pool_cmd = f"ceph osd pool create {pool_name} {pg_num or ''} {pgp_num or ''} erasure"
if kwargs.get("erasure_code_profile"):
pool_cmd += f" {kwargs.get('erasure_code_profile')}"
else:
pool_cmd = f"ceph osd pool create {pool_name} {pg_num or ''} {pgp_num or ''} replicated"
log.info(f"Setting allow_ec_overwrites to true for erasure-coded pool: {pool_name}")
client.exec_command(
sudo=True, cmd=f"ceph osd pool set {pool_name} allow_ec_overwrites true"
)

# Determine pool type
pool_type = "erasure" if erasure else "replicated"
log.info(f"Creating {pool_type} OSD pool: {pool_name}")

# Build the pool creation command
pool_cmd = f"ceph osd pool create {pool_name} {pg_num} {pgp_num or ''} {pool_type}".strip()

# Append erasure code profile if specified
if erasure and kwargs.get("erasure_code_profile"):
pool_cmd += f" {kwargs['erasure_code_profile']}"

# Append CRUSH rule name if specified
if kwargs.get("crush_rule_name"):
pool_cmd += f" {kwargs.get('crush_rule_name')}"
pool_cmd += f" {kwargs['crush_rule_name']}"

# Append expected number of objects if specified
if kwargs.get("expected_num_objects"):
pool_cmd += f" {kwargs.get('expected_num_objects')}"
pool_cmd += f" {kwargs['expected_num_objects']}"

# Set autoscale mode if specified
if erasure and kwargs.get("autoscale_mode"):
pool_cmd += f" --autoscale-mode={kwargs.get('autoscale_mode')}"
pool_cmd += f" --autoscale-mode={kwargs['autoscale_mode']}"

# Execute the pool creation command
cmd_out, cmd_rc = client.exec_command(
sudo=True, cmd=pool_cmd, check_ec=kwargs.get("check_ec", True)
)

log.info(f"OSD pool {pool_name} created successfully")

# Validate pool creation
if validate:
log.info(f"Validating creation of OSD pool: {pool_name}")
out, rc = client.exec_command(
sudo=True, cmd="ceph osd pool ls --format json"
)
pool_ls = json.loads(out)
if pool_name not in pool_ls:
log.error(f"Creation of OSD pool {pool_name} failed")
raise CommandFailed(f"Creation of OSD pool: {pool_name} failed")

return cmd_out, cmd_rc
Expand Down
Loading

0 comments on commit 659032d

Please sign in to comment.