Skip to content

Commit

Permalink
Apple automation for bucket list causing abnormal client IO
Browse files Browse the repository at this point in the history
Signed-off-by: Vidushi Mishra <[email protected]>
  • Loading branch information
viduship committed Jan 20, 2025
1 parent 710849a commit 6f410d4
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 0 deletions.
16 changes: 16 additions & 0 deletions rgw/v2/tests/s3_swift/configs/test_bucket_listing_fake_mp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# CEPH-83605383
# rgw/v2/tests/s3_swift/test_bucket_listing.py
config:
user_count: 1
bucket_count: 1
objects_count: 100
objects_size_range:
min: 10
max: 10
test_ops:
create_bucket: true
test_bucket_list_incomplete_mp: true
radoslist: true
meta_entries: 400000
num_objects: 200000
object_size: 4
78 changes: 78 additions & 0 deletions rgw/v2/tests/s3_swift/reusables/list_fake_mp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import json
import logging
import os
import random
import string
import time
import timeit
from urllib import parse as urlparse

import boto3
import v2.lib.manage_data as manage_data
import v2.utils.utils as utils
from botocore.exceptions import ClientError
from v2.lib.exceptions import RGWBaseException, TestExecError
from v2.lib.resource_op import Config
from v2.lib.rgw_config_opts import ConfigOpts

log = logging.getLogger()



def test_listing_incomplete_multipart(
rgw_client, bucket_name, meta_prefix, num_objects, meta_entries, object_size
):
"""
Perform the following operations:
1. Upload many objects (~10K) to a bucket.
2. Create ~10K incomplete multipart uploads.
3. Perform list_objects_v2 with pagination of 1000 objects.
Parameters:
rgw_client (boto3.client): S3 client instance.
bucket_name (str): Name of the bucket.
meta_prefix (str): Prefix for multipart upload objects.
num_objects (int): Number of objects to upload/create.
object_size (int): Size of each object in bytes.
"""
# Upload objects
log.info(f"Uploading {num_objects} objects of size {object_size} bytes each...")
for i in range(num_objects):
key = f"data/N27/good/193_tasking/2024-10-31/25/compacted-part-f55a5b45-f11f-4dd7-91e0-79658ca61548-0-object-{i}"
content = "".join(random.choices(string.ascii_letters + string.digits, k=object_size))

try:
rgw_client.put_object(Bucket=bucket_name, Key=key, Body=content)
if i % 1000 == 0:
log.info(f"Uploaded {i} objects...")
except ClientError as e:
log.error(f"Error uploading object {key}: {e}")
log.info(f"Uploaded {num_objects} objects successfully.")

# Create fake multipart uploads
log.info(
f"Creating {meta_entries} fake multipart uploads with prefix '{meta_prefix}'..."
)
for i in range(meta_entries):
key = f"{meta_prefix}{i}"
try:
rgw_client.create_multipart_upload(Bucket=bucket_name, Key=key)
if i % 1000 == 0:
log.info(f"Created {i} fake multipart uploads...")
except ClientError as e:
log.error(f"Error creating multipart upload for {key}: {e}")
log.info(f"Created {num_objects} fake multipart uploads successfully.")

# List objects
log.info(f"Listing objects in the bucket '{bucket_name}'...")
paginator = rgw_client.get_paginator("list_objects_v2")
operation_parameters = {"Bucket": bucket_name, "MaxKeys": 1000}
try:
for page in paginator.paginate(**operation_parameters):
if "Contents" in page:
for obj in page["Contents"]:
log.info(f"Key: {obj['Key']} | Size: {obj['Size']} bytes")
else:
log.info("No objects found in the bucket.")
except ClientError as e:
log.error(f"Error listing objects: {e}")
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
from v2.lib.s3.auth import Auth
from v2.lib.s3.write_io_info import BasicIOInfoStructure, BucketIoInfo, IOInfoInitialize
from v2.tests.s3_swift import reusable
from v2.tests.s3_swift.reusables import s3_object_restore as reusables_s3_restore
from v2.tests.s3_swift.reusables.bucket_notification import NotificationService
from v2.utils.log import configure_logging
from v2.utils.test_desc import AddTestInfo
Expand Down
26 changes: 26 additions & 0 deletions rgw/v2/tests/s3_swift/test_bucket_listing.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
test_bucket_listing_psuedo_only_ordered.yaml
test_bucket_listing_pseudo_ordered.yaml
test_bucket_listing_pseudo_ordered_dir_only.yaml
test_bucket_listing_fake_mp.yaml
Operation:
Create user
create objects as per the object structure mentioned in the yaml
Expand Down Expand Up @@ -38,6 +39,7 @@
from v2.lib.s3.auth import Auth
from v2.lib.s3.write_io_info import BasicIOInfoStructure, BucketIoInfo, IOInfoInitialize
from v2.tests.s3_swift import reusable
from v2.tests.s3_swift.reusables import list_fake_mp as bucket_list_incomplete_mp
from v2.utils.log import configure_logging
from v2.utils.test_desc import AddTestInfo
from v2.utils.utils import RGWService
Expand Down Expand Up @@ -97,7 +99,31 @@ def test_exec(config, ssh_con):
bucket = reusable.create_bucket(
bucket_name_to_create, rgw_conn, each_user
)

bucket_created.append(bucket)
if config.test_ops.get("test_bucket_list_incomplete_mp", False):
log.info(
"executing the command radosgw-admin bucket reshard to 341 shards"
)
reshard_bucket = utils.exec_shell_cmd(
f"radosgw-admin bucket reshard --bucket {bucket_name_to_create} --num-shards 341"
)
meta_prefix = "_multipart_data/run_9/main/good/298/2024-09-29/50/.part-9a2bf339-2780-46b0-8258-fcd2bb0a3275-0.2~uhJ-QZ5rU2y3bX6Mc6bZoYDfxSRrzMK/object-name-"
num_objects = config.test_ops.get("num_objects")
meta_entries = config.test_ops.get("meta_entries")
object_size = config.test_ops.get("object_size") * 1024
log.info(
f"the num_objects, meta_entries and object_size is {num_objects}, {meta_entries}, {object_size} respectively"
)
bucket_name = bucket_name_to_create
bucket_list_incomplete_mp.test_listing_incomplete_multipart(
rgw_client,
bucket_name,
meta_prefix,
num_objects,
meta_entries,
object_size,
)
if config.test_ops.get("enable_version", False):
log.info("enable bucket version")
reusable.enable_versioning(
Expand Down

0 comments on commit 6f410d4

Please sign in to comment.