Skip to content

Commit

Permalink
migration across ceph clusters
Browse files Browse the repository at this point in the history
Signed-off-by: Sunil Angadi <[email protected]>
  • Loading branch information
Sunil Angadi authored and Sunil Angadi committed Jan 17, 2025
1 parent 23330ce commit b4d5dac
Show file tree
Hide file tree
Showing 5 changed files with 581 additions and 5 deletions.
46 changes: 44 additions & 2 deletions ceph/rbd/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,19 @@ def value(key, dictionary):
return str(list(find(key, dictionary))[0])


def copy_file(file_name, src, dest):
def copy_file(file_name, src, dest, dest_file_name=None):
"""Copies the given file from src node to dest node
Args:
file_name: Full path of the file to be copied
src: Source CephNode object
dest: Destination CephNode object
dest_file_name: Destination file name
"""
contents, err = src.exec_command(sudo=True, cmd="cat {}".format(file_name))
key_file = dest.remote_file(sudo=True, file_name=file_name, file_mode="w")
key_file = dest.remote_file(
sudo=True, file_name=dest_file_name or file_name, file_mode="w"
)
key_file.write(contents)
key_file.flush()

Expand Down Expand Up @@ -201,3 +204,42 @@ def convert_size(size_bytes):
p = math.pow(1024, i)
s = round(size_bytes / p)
return "%s%s" % (s, size_name[i])


def configure_common_client_node(client1, client2):
"""
Configure the common client node as client1 to access both clusters.
Args:
client1: Cluster1 client node object
client2: Cluster2 client node object
"""

# Ensure /etc/ceph directory exists and is writable on client1
client1.exec_command(cmd="sudo mkdir -p /etc/ceph && sudo chmod 777 /etc/ceph")

# Copy cluster2 configuration and keyring files to client1
cluster2_files = [
("/etc/ceph/ceph.conf", "/etc/ceph/cluster2.conf"),
(
"/etc/ceph/ceph.client.admin.keyring",
"/etc/ceph/cluster2.client.admin.keyring",
),
]
for file, dest_path in cluster2_files:
copy_file(file_name=file, src=client2, dest=client1, dest_file_name=dest_path)

client1.exec_command(sudo=True, cmd="chmod 644 /etc/ceph/*")

# verify cluster accessibility for both clusters
for cluster_name in ["ceph", "cluster2"]:
out, err = client1.exec_command(
cmd=f"ceph -s --cluster {cluster_name}", output=True
)
log.info(f"Cluster {cluster_name} status: {out}")
if err:
raise Exception(
f"Unable to access cluster {cluster_name} from common client node"
)
return 1
log.info("Common client node configured successfully.")
return 0
38 changes: 36 additions & 2 deletions ceph/rbd/workflows/migration.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import json
import tempfile

from cli.rbd.rbd import Rbd
from utility.log import Log

log = Log(__name__)


def verify_migration_state(action, image_spec, **kw):
def verify_migration_state(action, image_spec, cluster_name="ceph", **kw):
"""verify the migration status at each action.
This method will verify the migration state for an image for
Expand All @@ -24,7 +25,11 @@ def verify_migration_state(action, image_spec, **kw):
"""
rbd = Rbd(kw["client"])
log.info("verifying migration state")
status_config = {"image-spec": image_spec, "format": "json"}
status_config = {
"image-spec": image_spec,
"cluster": cluster_name,
"format": "json",
}
out, err = rbd.status(**status_config)
log.info(out)
status = json.loads(out)
Expand All @@ -39,3 +44,32 @@ def verify_migration_state(action, image_spec, **kw):
except Exception as error:
log.error(error)
return 1


def prepare_migration_source_spec(
cluster_name, client, pool_name, image_name, snap_name
):
"""
Create a native source spec file for migration.
Args:
cluster_name: Name of the source cluster
pool_name: Name of the source pool
image_name: Name of the source image
snap_name: Name of the snapshot
Returns:
Path to the native spec file
"""
native_spec = {
"cluster_name": cluster_name,
"type": "native",
"pool_name": pool_name,
"image_name": image_name,
"snap_name": snap_name,
}

temp_file = tempfile.NamedTemporaryFile(dir="/tmp", suffix=".json")
spec_file = client.remote_file(sudo=True, file_name=temp_file.name, file_mode="w")
spec_file.write(json.dumps(native_spec, indent=4))
spec_file.flush()

return temp_file.name
24 changes: 23 additions & 1 deletion cli/rbd/migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,28 @@ def action(self, **kw):
"""
action = kw.get("action", None)
dest_spec = kw.get("dest_spec", None)
cluster_name = kw.get("cluster_name", "ceph")
log.info(f"Starting the {action} migration process")
cmd = f"{self.base_cmd} {action} {dest_spec}"
cmd = f"{self.base_cmd} {action} {dest_spec} --cluster {cluster_name}"
return self.execute_as_sudo(cmd=cmd, long_running=True)

def prepare_import(self, **kw):
"""
Prepare the live migration of image from one ceph cluster to another,
Args:
kw(dict): Key/value pairs that needs to be provided to the client node.
Example::
Supported keys:
source_spec_path : json formatted string for streamed imports
dest_spec: Target image spec TARGET_POOL_NAME/TARGET_IMAGE_NAME
"""
log.info("Starting prepare Live migration of image to external ceph cluster")
source_spec_path = kw.get("source_spec_path", None)
dest_spec = kw.get("dest_spec", None)
cluster_name = kw.get("cluster_name", None)
cmd = (
f"{self.base_cmd} prepare --import-only --source-spec-path {source_spec_path} "
f"{dest_spec} --cluster {cluster_name}"
)
return self.execute_as_sudo(cmd=cmd, long_running=True)
169 changes: 169 additions & 0 deletions suites/squid/rbd/tier-2_rbd_migration_external_ceph.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
#===============================================================================================
# Tier-level: 2
# Test-Suite: tier-2_rbd_migration_external_ceph.yaml
#
# Cluster Configuration:
# cephci/conf/squid/rbd/5-node-2-clusters.yaml
# No of Clusters : 2
# Each cluster configuration
# 5-Node cluster(RHEL-8.3 and above)
# 3 MONS, 2 MGR, 3 OSD, 1 Client
# Node1 - Mon, Mgr, Installer
# Node2 - client
# Node3 - OSD, MON, MGR
# Node4 - OSD, MON
# Node5 - OSD,
#===============================================================================================
tests:
- test:
name: setup install pre-requisistes
desc: Setup phase to deploy the required pre-requisites for running the tests.
module: install_prereq.py
abort-on-fail: true

- test:
abort-on-fail: true
clusters:
ceph-rbd1:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
mon-ip: node1
orphan-initial-daemons: true
skip-monitoring-stack: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
ceph-rbd2:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
mon-ip: node1
orphan-initial-daemons: true
skip-monitoring-stack: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
desc: Two ceph cluster deployment for external ceph migration testing
destroy-clster: false
module: test_cephadm.py
name: deploy two ceph cluster

- test:
abort-on-fail: true
clusters:
ceph-rbd1:
config:
command: add
id: client.1
node: node2
install_packages:
- ceph-common
- fio
copy_admin_keyring: true
ceph-rbd2:
config:
command: add
id: client.1
node: node2
install_packages:
- ceph-common
- fio
copy_admin_keyring: true
desc: Configure the client node for both the clusters
destroy-cluster: false
module: test_client.py
name: configure client

- test:
desc: Enable mon_allow_pool_delete to True for deleting the pools
module: exec.py
name: configure mon_allow_pool_delete to True
abort-on-fail: true
config:
cephadm: true
commands:
- "ceph config set mon mon_allow_pool_delete true"

- test:
desc: Install rbd-nbd and remove any epel packages
module: exec.py
name: Install rbd-nbd
config:
sudo: true
commands:
- "rm -rf /etc/yum.repos.d/epel*"
- "dnf install rbd-nbd -y"

- test:
name: Test image migration with external ceph cluster
desc: live migration with external ceph native data format
module: test_rbd_migration_external_native_image.py
clusters:
ceph-rbd1:
config:
rep_pool_config:
num_pools: 1
num_images: 1
size: 4G
create_pool_parallely: true
create_image_parallely: true
test_ops_parallely: true
ec_pool_config:
num_pools: 1
num_images: 1
size: 4G
create_pool_parallely: true
create_image_parallely: true
test_ops_parallely: true
fio:
size: 1G
polarion-id: CEPH-83597689
Loading

0 comments on commit b4d5dac

Please sign in to comment.