Esempio n. 1
0
def check_for_crash():
    """
    check for crash on cluster
    """
    ceph_version_id, ceph_version_name = utils.get_ceph_version()
    if ceph_version_name == "nautilus":
        log.info("check for any new crashes on the ceph cluster ")
        ceph_crash = utils.exec_shell_cmd("ceph crash ls-new")
        if ceph_crash:
            ceph_crash_all = ceph_crash.split()
            no_of_crashes = len(ceph_crash_all)
            for i in range(3, no_of_crashes):
                if i % 3 == 0:
                    ceph_crash_id, ceph_crash_entity = (
                        ceph_crash_all[i],
                        ceph_crash_all[i + 1],
                    )
                    log.info(f"ceph daemon {ceph_crash_entity} crashed!")
                    crash_info = utils.exec_shell_cmd("ceph crash info %s" %
                                                      ceph_crash_id)
            log.info(
                "archiving the crashes to silence health warnings! to view the crashes use the command: ceph crash ls"
            )
            utils.exec_shell_cmd("ceph crash archive-all")
        else:
            log.info("No ceph daemon crash found")
        return ceph_crash
Esempio n. 2
0
    def set_to_ceph_conf(self, section, option, value=None):
        version_id, version_name = utils.get_ceph_version()
        log.info(f"ceph version id: {version_id}")
        log.info(f"version name: {version_name}")

        if version_name in ["luminous", "nautilus"]:
            log.info("using ceph_conf to config values")
            self.set_to_ceph_conf_file(section, option, value)
        else:
            log.info("using ceph config cli to set the config values")
            log.info(option)
            log.info(value)
            self.set_to_ceph_cli(option, value)
Esempio n. 3
0
def check_for_crash():
    """
    check for crash on cluster
    """
    ceph_version_id, ceph_version_name = utils.get_ceph_version()
    if ceph_version_name == 'nautilus':
        log.info('check for any new crashes on the ceph cluster ')
        ceph_crash = utils.exec_shell_cmd('ceph crash ls-new')
        if ceph_crash:
            ceph_crash_all = ceph_crash.split()
            no_of_crashes=len(ceph_crash_all)
            for i in range(3,no_of_crashes):
                if(i%3==0):
                    ceph_crash_id, ceph_crash_entity = ceph_crash_all[i] ,ceph_crash_all[i+1]
                    log.info(f'ceph daemon {ceph_crash_entity} crashed!')
                    crash_info=utils.exec_shell_cmd('ceph crash info %s' % ceph_crash_id)
            log.info('archiving the crashes to silence health warnings! to view the crashes use the command: ceph crash ls')
            utils.exec_shell_cmd('ceph crash archive-all') 
        else:
            log.info('No ceph daemon crash found')
        return ceph_crash
Esempio n. 4
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    _, version_name = utils.get_ceph_version()
    if "nautilus" in version_name:
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker,
                                   str(config.rgw_lc_max_worker))
    else:
        ceph_conf.set_to_ceph_conf(
            section=None,
            option=ConfigOpts.rgw_lc_max_worker,
            value=str(config.rgw_lc_max_worker),
        )
        ceph_conf.set_to_ceph_conf(section=None,
                                   option=ConfigOpts.rgw_lc_debug_interval,
                                   value="30")
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    obj_list = []
    obj_tag = "suffix1=WMV1"
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(
        map(
            lambda x: x,
            [
                rule["Filter"].get("Prefix")
                or rule["Filter"]["And"].get("Prefix")
                for rule in config.lifecycle_conf
            ],
        ))
    prefix = prefix if prefix else ["dummy1"]
    if config.test_ops["enable_versioning"] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops["version_count"] > 0:
                    for vc in range(config.test_ops["version_count"]):
                        log.info("version count for %s is %s" %
                                 (s3_object_name, str(vc)))
                        log.info("modifying data: %s" % s3_object_name)
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            user_info,
                            append_data=True,
                            append_msg="hello object for version: %s\n" %
                            str(vc),
                        )
                else:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket,
                                           TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops["delete_marker"] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                   life_cycle_rule_new, config)
    if config.test_ops["enable_versioning"] is False:
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()

        # authenticate sns client.
        rgw_sns_conn = auth.do_auth_sns_client()

        # authenticate with s3 client
        rgw_s3_client = auth.do_auth_using_client()

        # get ceph version
        ceph_version_id, ceph_version_name = utils.get_ceph_version()

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)

                # create topic with endpoint
                if config.test_ops["create_topic"] is True:
                    endpoint = config.test_ops.get("endpoint")
                    ack_type = config.test_ops.get("ack_type")
                    topic_id = str(uuid.uuid4().hex[:16])
                    persistent = False
                    topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id
                    log.info(
                        f"creating a topic with {endpoint} endpoint with ack type {ack_type}"
                    )
                    if config.test_ops.get("persistent_flag", False):
                        log.info("topic with peristent flag enabled")
                        persistent = config.test_ops.get("persistent_flag")
                    topic = notification.create_topic(rgw_sns_conn, endpoint,
                                                      ack_type, topic_name,
                                                      persistent)

                # get topic attributes
                if config.test_ops.get("get_topic_info", False):
                    log.info("get topic attributes")
                    get_topic_info = notification.get_topic(
                        rgw_sns_conn, topic, ceph_version_name)

                # put bucket notification with topic configured for event
                if config.test_ops["put_get_bucket_notification"] is True:
                    event = config.test_ops.get("event_type")
                    notification_name = "notification-" + str(event)
                    notification.put_bucket_notification(
                        rgw_s3_client,
                        bucket_name_to_create,
                        notification_name,
                        topic,
                        event,
                    )

                    # get bucket notification
                    log.info(
                        f"get bucket notification for bucket : {bucket_name_to_create}"
                    )
                    notification.get_bucket_notification(
                        rgw_s3_client, bucket_name_to_create)

                # create objects
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                # copy objects
                if config.test_ops.get("copy_object", False):
                    log.info("copy object")
                    status = rgw_s3_client.copy_object(
                        Bucket=bucket_name_to_create,
                        Key="copy_of_object" + s3_object_name,
                        CopySource={
                            "Bucket": bucket_name_to_create,
                            "Key": s3_object_name,
                        },
                    )
                    if status is None:
                        raise TestExecError("copy object failed")

            # delete objects
            if config.test_ops.get("delete_bucket_object", False):
                if config.test_ops.get("enable_version", False):
                    for name, path in objects_created_list:
                        reusable.delete_version_object(bucket, name, path,
                                                       rgw_conn, each_user)
                else:
                    reusable.delete_objects(bucket)

            # start kafka broker and consumer
            event_record_path = "/home/cephuser/event_record"
            start_consumer = notification.start_kafka_broker_consumer(
                topic_name, event_record_path)
            if start_consumer is False:
                raise TestExecError("Kafka consumer not running")

            # verify all the attributes of the event record. if event not received abort testcase
            log.info("verify event record attributes")
            verify = notification.verify_event_record(event,
                                                      bucket_name_to_create,
                                                      event_record_path,
                                                      ceph_version_name)
            if verify is False:
                raise EventRecordDataError(
                    "Event record is empty! notification is not seen")

        # delete topic logs on kafka broker
        notification.del_topic_from_kafka_broker(topic_name)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Esempio n. 6
0
    def read(self):
        """
        This function reads all the configurations parameters
        """
        if self.doc is None:
            raise ConfigError("config file not given")
        self.shards = self.doc["config"].get("shards")
        # todo: better suited to be added under ceph_conf
        self.max_objects_per_shard = self.doc["config"].get(
            "max_objects_per_shard")
        self.max_objects = None
        self.user_count = self.doc["config"].get("user_count")
        self.user_remove = self.doc["config"].get("user_remove", True)
        self.user_type = self.doc["config"].get("user_type")
        self.bucket_count = self.doc["config"].get("bucket_count")
        self.objects_count = self.doc["config"].get("objects_count")
        self.pseudo_dir_count = self.doc["config"].get("pseudo_dir_count")
        self.use_aws4 = self.doc["config"].get("use_aws4", None)
        self.objects_size_range = self.doc["config"].get("objects_size_range")
        self.sharding_type = self.doc["config"].get("sharding_type")
        self.split_size = self.doc["config"].get("split_size", 5)
        self.test_ops = self.doc["config"].get("test_ops", {})
        self.lifecycle_conf = self.doc["config"].get("lifecycle_conf")
        self.delete_marker_ops = self.doc["config"].get("delete_marker_ops")
        self.mapped_sizes = self.doc["config"].get("mapped_sizes")
        self.bucket_policy_op = self.doc["config"].get("bucket_policy_op")
        self.container_count = self.doc["config"].get("container_count")
        self.version_count = self.doc["config"].get("version_count")
        self.version_enable = self.doc["config"].get("version_enable", False)
        self.copy_version_object = self.doc["config"].get(
            "copy_version_object", False)
        self.object_expire = self.doc["config"].get("object_expire", False)
        self.dynamic_resharding = self.doc["config"].get(
            "dynamic_resharding", False)
        self.manual_resharding = self.doc["config"].get(
            "manual_resharding", False)
        self.large_object_upload = self.doc["config"].get(
            "large_object_upload", False)
        self.large_object_download = self.doc["config"].get(
            "large_object_download", False)
        self.local_file_delete = self.doc["config"].get(
            "local_file_delete", False)
        self.sts = self.doc["config"].get("sts")
        self.ceph_conf = self.doc["config"].get("ceph_conf")
        self.gc_verification = self.doc["config"].get("gc_verification", False)
        self.bucket_sync_crash = self.doc["config"].get(
            "bucket_sync_crash", False)
        self.bucket_stats = self.doc["config"].get("bucket_stats", False)
        self.header_size = self.doc["config"].get("header_size", False)
        self.test_datalog_trim_command = self.doc["config"].get(
            "test_datalog_trim_command", False)
        self.rgw_gc_obj_min_wait = self.doc["config"].get(
            "rgw_gc_obj_min_wait", False)
        self.ssl = self.doc["config"].get("ssl", )
        self.frontend = self.doc["config"].get("frontend")
        self.io_op_config = self.doc.get("config").get("io_op_config")
        self.radoslist_all = self.test_ops.get("radoslist_all", False)
        self.dbr_scenario = self.doc["config"].get("dbr_scenario", None)
        self.enable_sharding = self.doc["config"].get("enable_sharding", False)
        self.change_datalog_backing = self.test_ops.get(
            "change_datalog_backing", False)
        self.persistent_flag = self.test_ops.get("persistent_flag", False)
        self.copy_object = self.test_ops.get("copy_object", False)
        self.get_topic_info = self.test_ops.get("get_topic_info", False)
        ceph_version_id, ceph_version_name = utils.get_ceph_version()
        # todo: improve Frontend class
        if ceph_version_name in ["luminous", "nautilus"]:
            frontend_config = Frontend()
        else:
            frontend_config = Frontend_CephAdm()

        # if frontend is set in config yaml
        if self.frontend:
            log.info("frontend is set in config.yaml: {}".format(
                self.frontend))
            if self.ssl is None:
                # if ssl is not set in config.yaml
                log.info("ssl is not set in config.yaml")
                self.ssl = frontend_config.curr_ssl
            # configuring frontend
            frontend_config.set_frontend(self.frontend, ssl=self.ssl)

        # if ssl is True or False in config yaml
        # and if frontend is not set in config yaml,
        elif self.ssl is not None and not self.frontend:
            # get the current frontend and add ssl to it.
            log.info("ssl is set in config.yaml")
            log.info("frontend is not set in config.yaml")
            frontend_config.set_frontend(frontend_config.curr_frontend,
                                         ssl=self.ssl)

        elif self.ssl is None:
            # if ssl is not set in config yaml, check if ssl_enabled and configured by default,
            # set sel.ssl = True or False based on ceph conf
            log.info("ssl is not set in config.yaml")
            self.ssl = frontend_config.curr_ssl
Esempio n. 7
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    if config.dbr_scenario == "brownfield":
        user_brownfiled = "brownfield_user"
        all_users_info = s3lib.create_users(config.user_count, user_brownfiled)
    else:
        all_users_info = s3lib.create_users(config.user_count)

    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=%s" %
                   (zone, compression_type))
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (data["placement_pools"][0]["val"]["compression"] ==
                            compression_type):
                        log.info("Compression enabled successfully")
                    else:
                        log.error("Compression is not enabled on cluster")
                        raise TestExecError(
                            "Compression is not enabled on cluster")

                else:
                    if (data["placement_pools"][0]["val"]["storage_classes"]
                        ["STANDARD"]["compression_type"] == compression_type):
                        log.info("Compression enabled successfully")
                    else:
                        log.error("Compression is not enabled on cluster")
                        raise TestExecError(
                            "Compression is not enabled on cluster")

            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)
        if config.dynamic_resharding is True:
            if utils.check_dbr_support():
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_max_objs_per_shard,
                    str(config.max_objects_per_shard),
                )
                srv_restarted = rgw_service.restart()
        if config.bucket_sync_run_with_disable_sync_thread:
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf("global",
                                       ConfigOpts.rgw_run_sync_thread, "false")
            srv_restarted = rgw_service.restart()

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                if config.bucket_sync_crash is True:
                    is_primary = utils.is_cluster_primary()
                    if is_primary:
                        bucket_name_to_create = "bkt-crash-check"
                if config.dbr_scenario == "brownfield":
                    bucket_name_to_create = ("brownfield-dynamic-bkt"
                                             if config.dynamic_resharding else
                                             "brownfield-manual-bkt")

                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.dynamic_resharding is True:
                    reusable.check_sync_status()
                    op = utils.exec_shell_cmd(
                        f"radosgw-admin bucket stats --bucket {bucket.name}")
                    json_doc = json.loads(op)
                    old_num_shards = json_doc["num_shards"]
                    log.info(f"no_of_shards_created: {old_num_shards}")
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    if utils.check_dbr_support():
                        if bucket_name_to_create in [
                                "brownfield-dynamic-bkt",
                                "brownfield-manual-bkt",
                        ]:
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            if bool(json_doc["usage"]):
                                num_object = json_doc["usage"]["rgw.main"][
                                    "num_objects"]
                                config.objects_count = (num_object * 2 +
                                                        config.objects_count)
                                config.mapped_sizes = utils.make_mapped_sizes(
                                    config)

                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info("s3_object_download_path: %s" %
                                     s3_object_download_path)
                            log.info("downloading to filename: %s" %
                                     s3_object_download_name)
                            if (config.test_ops.get("encryption_algorithm",
                                                    None) is not None):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s" %
                                    config.test_ops["encryption_algorithm"])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey":
                                        encryption_key,
                                        "SSECustomerAlgorithm":
                                        config.
                                        test_ops["encryption_algorithm"],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    "obj":
                                    bucket,
                                    "resource":
                                    "download_file",
                                    "args": [
                                        s3_object_name,
                                        s3_object_download_path,
                                    ],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info("s3_object_downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("s3_object_uploaded_md5: %s" %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info("md5 match")
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    if config.reshard_cancel_cmd:
                        op = utils.exec_shell_cmd(
                            f"radosgw-admin reshard add --bucket {bucket.name} --num-shards 29"
                        )
                        op = utils.exec_shell_cmd(
                            f"radosgw-admin reshard list")
                        if bucket.name in op:
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard cancel --bucket {bucket.name}"
                            )
                            cancel_op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard list")
                            if bucket.name in cancel_op:
                                raise TestExecError(
                                    "bucket is still in reshard queue")
                        else:
                            raise TestExecError(
                                "Command failed....Bucket is not added into reshard queue"
                            )
                    if config.bucket_sync_run:
                        out = utils.check_bucket_sync(bucket.name)
                        if out is False:
                            raise TestExecError(
                                "Command is throwing error while running bucket sync run"
                            )

                    if config.bucket_sync_status:
                        out = utils.wait_till_bucket_synced(bucket.name)
                        if not out:
                            log.info(
                                "Bucket sync is not caught up with source.")

                    if config.bucket_sync_crash:
                        is_primary = utils.is_cluster_primary()
                        if is_primary is False:
                            crash_info = reusable.check_for_crash()
                            if crash_info:
                                raise TestExecError("ceph daemon crash found!")
                            realm, source_zone = utils.get_realm_source_zone_info(
                            )
                            log.info(f"Realm name: {realm}")
                            log.info(f"Source zone name: {source_zone}")
                            for i in range(
                                    600):  # Running sync command for 600 times
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket sync run --bucket bkt-crash-check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}"
                                )
                                crash_info = reusable.check_for_crash()
                                if crash_info:
                                    raise TestExecError(
                                        "ceph daemon crash found!")
                                time.sleep(1)
                    if config.dynamic_resharding is True:
                        if utils.check_dbr_support():
                            reusable.check_sync_status()
                            for i in range(10):
                                time.sleep(
                                    60
                                )  # Adding delay for processing reshard list
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket stats --bucket {bucket.name}"
                                )
                                json_doc = json.loads(op)
                                new_num_shards = json_doc["num_shards"]
                                log.info(
                                    f"no_of_shards_created: {new_num_shards}")
                                if new_num_shards > old_num_shards:
                                    break
                            else:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    if config.manual_resharding is True:
                        if utils.check_dbr_support():
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            old_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {old_num_shards}")
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}"
                            )
                            op = utils.exec_shell_cmd(
                                "radosgw-admin reshard process")
                            time.sleep(60)
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            new_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {new_num_shards}")
                            if new_num_shards <= old_num_shards:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        shard_id, end_marker = reusable.get_datalog_marker()
                        cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            raise TestExecError("Segmentation fault occured")

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        if config.bucket_sync_run_with_disable_sync_thread is False:
                            time.sleep(10)
                            reusable.check_sync_status()
                            reusable.delete_bucket(bucket)
                            ceph_version_id, _ = utils.get_ceph_version()
                            cmd = f"radosgw-admin bucket stats --bucket={bucket.name}"
                            ec, _ = sp.getstatusoutput(cmd)
                            log.info(f"Bucket stats for non-existent is {ec}")
                            if (float(ceph_version_id[0]) >= 16
                                    and float(ceph_version_id[1]) >= 2.8):
                                if ec != 2:
                                    raise TestExecError(
                                        "Bucket stats for non-existent bucket should return failure (2) or ENOENT."
                                    )
                    if config.bucket_sync_run_with_disable_sync_thread:
                        out = utils.check_bucket_sync(bucket.name)
                        if out is False:
                            raise TestExecError(
                                "Command is throwing error while running bucket sync run"
                            )
        if config.bucket_sync_run_with_disable_sync_thread:
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf("global",
                                       ConfigOpts.rgw_run_sync_thread, "True")
            srv_restarted = rgw_service.restart()
        if config.modify_user:
            user_id = each_user["user_id"]
            new_display_name = each_user["user_id"] + each_user["user_id"]
            cmd = f"radosgw-admin user modify --uid='{user_id}' --display-name='{new_display_name}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if new_display_name == out["display_name"]:
                log.info("User modified successfully")
            else:
                raise TestExecError("Failed to modify user")
        if config.suspend_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user suspend --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if out["suspended"] == 1:
                log.info("User got suspended")
            else:
                raise TestExecError("Failed to suspend user")
        if config.enable_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user enable --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if out["suspended"] == 0:
                log.info("User enabled successfully")
            else:
                raise TestExecError("Failed to enable user")
        if config.delete_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user rm --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            cmd = f"radosgw-admin user list"
            out = utils.exec_shell_cmd(cmd)
            if user_id not in out:
                log.info("User removed successfully")
            else:
                raise TestExecError("Failed to remove user")
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=none" %
                   zone)
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")

    # check for any health errors or large omaps
    out = utils.get_ceph_status()
    if not out:
        raise TestExecError(
            "ceph status is either in HEALTH_ERR or we have large omap objects."
        )