Example #1
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    if config.dbr_scenario == "brownfield":
        user_brownfiled = "brownfield_user"
        all_users_info = s3lib.create_users(config.user_count, user_brownfiled)
    else:
        all_users_info = s3lib.create_users(config.user_count)

    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=%s"
                % (zone, compression_type)
            )
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (
                        data["placement_pools"][0]["val"]["compression"]
                        == compression_type
                    ):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (
                            data["placement_pools"][0]["val"]["storage_classes"][
                                "STANDARD"
                            ]["compression_type"]
                            == compression_type
                        ):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)
        if config.dynamic_resharding is True:
            if utils.check_dbr_support():
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_max_objs_per_shard,
                    str(config.max_objects_per_shard),
                )
                srv_restarted = rgw_service.restart()

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                if config.bucket_sync_crash is True:
                    is_primary = utils.is_cluster_primary()
                    if is_primary:
                        bucket_name_to_create = "bkt_crash_check"
                if config.dbr_scenario == "brownfield":
                    bucket_name_to_create = "brownfield_bucket"

                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.dynamic_resharding is True:
                    reusable.check_sync_status()
                    op = utils.exec_shell_cmd(
                        f"radosgw-admin bucket stats --bucket {bucket.name}"
                    )
                    json_doc = json.loads(op)
                    old_num_shards = json_doc["num_shards"]
                    log.info(f"no_of_shards_created: {old_num_shards}")
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    if utils.check_dbr_support():
                        if bucket_name_to_create == "brownfield_bucket":
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            if bool(json_doc["usage"]):
                                num_object = json_doc["usage"]["rgw.main"][
                                    "num_objects"
                                ]
                                config.objects_count = (
                                    num_object * 2 + config.objects_count
                                )
                                config.mapped_sizes = utils.make_mapped_sizes(config)

                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name
                            )
                            log.info(
                                "s3_object_download_path: %s" % s3_object_download_path
                            )
                            log.info(
                                "downloading to filename: %s" % s3_object_download_name
                            )
                            if (
                                config.test_ops.get("encryption_algorithm", None)
                                is not None
                            ):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s"
                                    % config.test_ops["encryption_algorithm"]
                                )
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey": encryption_key,
                                        "SSECustomerAlgorithm": config.test_ops[
                                            "encryption_algorithm"
                                        ],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op(
                                    {
                                        "obj": bucket,
                                        "resource": "download_file",
                                        "args": [
                                            s3_object_name,
                                            s3_object_download_path,
                                        ],
                                    }
                                )
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path
                            )
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info(
                                "s3_object_downloaded_md5: %s"
                                % s3_object_downloaded_md5
                            )
                            log.info(
                                "s3_object_uploaded_md5: %s" % s3_object_uploaded_md5
                            )
                            if str(s3_object_uploaded_md5) == str(
                                s3_object_downloaded_md5
                            ):
                                log.info("md5 match")
                                utils.exec_shell_cmd(
                                    "rm -rf %s" % s3_object_download_path
                                )
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    if config.bucket_sync_crash is True:
                        is_primary = utils.is_cluster_primary()
                        if is_primary is False:
                            crash_info = reusable.check_for_crash()
                            if crash_info:
                                raise TestExecError("ceph daemon crash found!")
                            realm, source_zone = utils.get_realm_source_zone_info()
                            log.info(f"Realm name: {realm}")
                            log.info(f"Source zone name: {source_zone}")
                            for i in range(600):  # Running sync command for 600 times
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket sync run --bucket bkt_crash_check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}"
                                )
                                crash_info = reusable.check_for_crash()
                                if crash_info:
                                    raise TestExecError("ceph daemon crash found!")
                                time.sleep(1)
                    if config.dynamic_resharding is True:
                        if utils.check_dbr_support():
                            reusable.check_sync_status()
                            for i in range(10):
                                time.sleep(
                                    60
                                )  # Adding delay for processing reshard list
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket stats --bucket {bucket.name}"
                                )
                                json_doc = json.loads(op)
                                new_num_shards = json_doc["num_shards"]
                                log.info(f"no_of_shards_created: {new_num_shards}")
                                if new_num_shards > old_num_shards:
                                    break
                            else:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    if config.manual_resharding is True:
                        if utils.check_dbr_support():
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            old_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {old_num_shards}")
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}"
                            )
                            op = utils.exec_shell_cmd("radosgw-admin reshard process")
                            time.sleep(60)
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            new_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {new_num_shards}")
                            if new_num_shards <= old_num_shards:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        shard_id, end_marker = reusable.get_datalog_marker()
                        cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            raise TestExecError("Segmentation fault occured")

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name
                        )
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (
                            out.replace('"', "")
                            .strip()
                            .split(":")[1]
                            .strip()
                            .replace(",", "")
                        )
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info("got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        time.sleep(10)
                        reusable.check_sync_status()
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=none" % zone
            )
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Example #2
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops['sharding']['enable'] is True:
            log.info('enabling sharding on buckets')
            max_shards = config.test_ops['sharding']['max_shards']
            log.info('making changes to ceph.conf')
            ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_override_bucket_index_max_shards,
                                       str(max_shards))
            log.info('trying to restart services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.test_ops['compression']['enable'] is True:
            compression_type = config.test_ops['compression']['type']
            log.info('enabling compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=%s' % (zone,compression_type)
            out = utils.exec_shell_cmd(cmd)
            try:
                data = json.loads(out)
                if data['placement_pools'][0]['val']['storage_classes']['STANDARD']['compression_type'] == compression_type:
                    log.info('Compression enabled successfully')
                else:
                    raise ValueError('failed to enable compression')
            except ValueError as e:
                exit(str(e))
            log.info('trying to restart rgw services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('upload_type') == 'multipart':
                            log.info('upload type: multipart')
                            reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                                            each_user)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' % s3_object_download_path)
                            log.info('downloading to filename: %s' % s3_object_download_name)
                            if config.test_ops.get('encryption_algorithm', None) is not None:
                                log.info('encryption download')
                                log.info('encryption algorithm: %s' % config.test_ops['encryption_algorithm'])
                                object_downloaded_status = bucket.download_file(s3_object_name,
                                                                                s3_object_download_path,
                                                                                ExtraArgs={
                                                                                    'SSECustomerKey': encryption_key,
                                                                                    'SSECustomerAlgorithm':
                                                                                        config.test_ops[
                                                                                            'encryption_algorithm']})
                            else:
                                object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                              'resource': 'download_file',
                                                                              'args': [s3_object_name,
                                                                                       s3_object_download_path],
                                                                              })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                            log.info('s3_object_uploaded_md5: %s' % s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(s3_object_downloaded_md5):
                                log.info('md5 match')
                                utils.exec_shell_cmd('rm -rf %s' % s3_object_download_path)
                            else:
                                raise TestExecError('md5 mismatch')
                        if config.local_file_delete is True:
                            log.info('deleting local file created after the upload')
                            utils.exec_shell_cmd('rm -rf %s' % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace('"', '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info('got output from sharing verification.--------')
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops['delete_bucket_object'] is True:
                        reusable.delete_objects(bucket)
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops['compression']['enable'] is True:
            log.info('disable compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=none' % zone
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status('test failed')
                sys.exit(1)
Example #3
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=%s" %
                   (zone, compression_type))
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (data["placement_pools"][0]["val"]["compression"] ==
                            compression_type):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (data["placement_pools"][0]["val"]
                            ["storage_classes"]["STANDARD"]["compression_type"]
                                == compression_type):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info("s3_object_download_path: %s" %
                                     s3_object_download_path)
                            log.info("downloading to filename: %s" %
                                     s3_object_download_name)
                            if (config.test_ops.get("encryption_algorithm",
                                                    None) is not None):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s" %
                                    config.test_ops["encryption_algorithm"])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey":
                                        encryption_key,
                                        "SSECustomerAlgorithm":
                                        config.
                                        test_ops["encryption_algorithm"],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    "obj":
                                    bucket,
                                    "resource":
                                    "download_file",
                                    "args": [
                                        s3_object_name,
                                        s3_object_download_path,
                                    ],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info("s3_object_downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("s3_object_uploaded_md5: %s" %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info("md5 match")
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        log.info(
                            "set debug_rgw to 20 before delete the bucket")
                        config.debug_rgw = 20
                        ceph_conf.set_to_ceph_conf("global",
                                                   ConfigOpts.debug_rgw,
                                                   str(config.debug_rgw))
                        log.info("trying to restart services")
                        srv_restarted = rgw_service.restart()
                        time.sleep(20)
                        if srv_restarted is False:
                            raise TestExecError("RGW service restart failed")
                        else:
                            log.info("RGW service restarted")
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=none" %
                   zone)
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")