Ejemplo n.º 1
0
def verify_key_with_version(each_key, bucket):
    log.info('verifying data for key: %s' % os.path.basename(each_key['name']))
    key_from_s3 = bucket.Object(os.path.basename(each_key['name']))
    no_of_versions = len(each_key['versioning_info'])
    log.info('no of versions: %s' % no_of_versions)
    for each_version in each_key['versioning_info']:
        log.info('version_id: %s' % each_version['version_id'])
        key_from_s3_with_version = key_from_s3.get(
            VersionId=each_version['version_id'])
        log.info('verifying size')
        log.info('size from yaml: %s' % each_version['size'])
        log.info('size from s3 %s' % key_from_s3_with_version['ContentLength'])
        if int(each_version['size'] != int(
                key_from_s3_with_version['ContentLength'])):
            raise TestExecError('Size not matched')
        log.info('verifying md5')
        log.info('md5_local: %s' % each_version['md5_local'])
        key_from_s3.download_file(
            'download.temp',
            ExtraArgs={'VersionId': each_version['version_id']})
        downloaded_md5 = utils.get_md5('download.temp')
        log.info('md5_from_s3: %s' % downloaded_md5)
        if each_version['md5_local'] != downloaded_md5:
            raise TestExecError("Md5 not matched")
        utils.exec_shell_cmd('sudo rm -rf download.temp')
        log.info('verification complete for the key: %s ---> version_id: %s' %
                 (key_from_s3.key, each_version['version_id']))
Ejemplo n.º 2
0
def time_to_list_via_radosgw(bucket_name, listing):
    """
    Time taken to list via radosgw-admin command.
    :param bucket: s3Bucket object
    :param listing: ordered or unordered listing
    """
    if listing == "ordered":
        log.info(
            "listing via radosgw-admin bucket list --max-entries=.. --bucket <>"
        )
        cmd = "radosgw-admin bucket list --max-entries=100000 --bucket=%s " % (
            bucket_name)
        time_taken = timeit.timeit(utils.exec_shell_cmd(cmd),
                                   globals=globals())
        return time_taken

    if listing == "unordered":
        log.info(
            "listing via radosgw-admin bucket list --max-entries=.. --bucket <> --allow-unordered"
        )
        cmd = (
            "radosgw-admin bucket list --max-entries=100000 --bucket=%s --allow-unordered"
            % (bucket_name))
        time_taken = timeit.timeit(utils.exec_shell_cmd(cmd),
                                   globals=globals())
        return time_taken
def del_topic_from_kafka_broker(topic_name):
    """
    delete topic from kafka broker
    """
    log.info(f"delete topic {topic_name} from kafka broker")
    cmd = f"rm -rf /tmp/kafka-logs/{topic_name}"
    utils.exec_shell_cmd(cmd)
Ejemplo n.º 4
0
def check_for_crash():
    """
    check for crash on cluster
    """
    ceph_version_id, ceph_version_name = utils.get_ceph_version()
    if ceph_version_name == "nautilus":
        log.info("check for any new crashes on the ceph cluster ")
        ceph_crash = utils.exec_shell_cmd("ceph crash ls-new")
        if ceph_crash:
            ceph_crash_all = ceph_crash.split()
            no_of_crashes = len(ceph_crash_all)
            for i in range(3, no_of_crashes):
                if i % 3 == 0:
                    ceph_crash_id, ceph_crash_entity = (
                        ceph_crash_all[i],
                        ceph_crash_all[i + 1],
                    )
                    log.info(f"ceph daemon {ceph_crash_entity} crashed!")
                    crash_info = utils.exec_shell_cmd("ceph crash info %s" %
                                                      ceph_crash_id)
            log.info(
                "archiving the crashes to silence health warnings! to view the crashes use the command: ceph crash ls"
            )
            utils.exec_shell_cmd("ceph crash archive-all")
        else:
            log.info("No ceph daemon crash found")
        return ceph_crash
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # preparing data
    user_names = ['tuffy', 'scooby', 'max']
    tenant1 = 'tenant'
    cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s' % (
    user_names[0], user_names[0], tenant1)
    out = utils.exec_shell_cmd(cmd)
    if out is False:
        raise TestExecError("RGW User creation error")
    log.info('output :%s' % out)
    v1_as_json = json.loads(out)
    log.info('creted user_id: %s' % v1_as_json['user_id'])
    cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full' % (
    tenant1, user_names[0], user_names[0], tenant1)
    out2 = utils.exec_shell_cmd(cmd2)
    if out2 is False:
        raise TestExecError("sub-user creation error")
    v2_as_json = json.loads(out2)
    log.info('created subuser: %s' % v2_as_json['subusers'][0]['id'])
    cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret' % (
    user_names[0], user_names[0], tenant1, tenant1)
    out3 = utils.exec_shell_cmd(cmd3)
    if out3 is False:
        raise TestExecError("secret_key gen error")
    v3_as_json = json.loads(out3)
    log.info('created subuser: %s\nsecret_key generated: %s' % (
    v3_as_json['swift_keys'][0]['user'], v3_as_json['swift_keys'][0]['secret_key']))
    user_info = {'user_id': v3_as_json['swift_keys'][0]['user'],
                 'key': v3_as_json['swift_keys'][0]['secret_key']}
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=cc)
        container = swiftlib.resource_op({'obj': rgw,
                                          'resource': 'put_container',
                                          'args': [container_name]})
        if container is False:
            raise TestExecError("Resource execution failed: container creation faield")
        for oc,size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name('%s.container.%s' % (user_names[0], cc), oc)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            data_info = manage_data.io_generator(object_path, size)
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name, swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Ejemplo n.º 6
0
def download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path,
                    config):
    log.info("s3 object name to download: %s" % s3_object_name)
    s3_object_download_name = s3_object_name + "." + "download"
    s3_object_download_path = os.path.join(TEST_DATA_PATH,
                                           s3_object_download_name)
    object_downloaded_status = s3lib.resource_op({
        "obj":
        bucket,
        "resource":
        "download_file",
        "args": [s3_object_name, s3_object_download_path],
    })
    if object_downloaded_status is False:
        raise TestExecError(
            "Resource execution failed: object download failed")
    if object_downloaded_status is None:
        log.info("object downloaded")

    s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
    s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
    log.info("s3_object_downloaded_md5: %s" % s3_object_downloaded_md5)
    log.info("s3_object_uploaded_md5: %s" % s3_object_uploaded_md5)
    if str(s3_object_uploaded_md5) == str(s3_object_downloaded_md5):
        log.info("md5 match")
        utils.exec_shell_cmd("rm -rf %s" % s3_object_download_path)
    else:
        raise TestExecError("md5 mismatch")
    if config.local_file_delete is True:
        log.info("deleting local file created after the upload")
        utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
def validate_prefix_rule(bucket, config):
    """
    This function is to validate the prefix rule for versioned objects

    Parameters:
        bucket(char): Name of the bucket
        config(list): config
    """
    log.info("verification starts")
    op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
    op2 = utils.exec_shell_cmd("radosgw-admin bucket list --bucket=%s" % bucket.name)
    json_doc = json.loads(op)
    json_doc2 = json.loads(op2)
    objects = json_doc["usage"]["rgw.main"]["num_objects"]
    objs_total = (config.test_ops["version_count"]) * (config.objects_count)
    objs_ncurr = (config.test_ops["version_count"]) * (config.objects_count) - (
        config.objects_count
    )
    objs_diff = objs_total - objs_ncurr
    c1 = 0
    if objects == objs_total:
        for i, entry in enumerate(json_doc2):
            print(entry["tag"])
            if entry["tag"] == "delete-marker":
                c1 = c1 + 1
        if c1 == (config.objects_count):
            log.info(
                "Lifecycle expiration of current object version validated for prefix filter"
            )
    if objects == objs_diff:
        log.info(
            "Lifecycle expiration of non_current object version validated for prefix filter"
        )
Ejemplo n.º 8
0
def set_gc_conf(ceph_conf, conf):
    log.info('making changes to ceph.conf')
    ceph_conf.set_to_ceph_conf(
        'global', ConfigOpts.bluestore_block_size,
        str(conf.get('bluestore_block_size', 1549267441664)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_max_queue_size,
                               str(conf.get('rgw_gc_max_queue_size', 367788)))
    ceph_conf.set_to_ceph_conf(
        'global', ConfigOpts.rgw_gc_processor_max_time,
        str(conf.get('rgw_gc_processor_max_time', 3600)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_max_concurrent_io,
                               str(conf.get('rgw_gc_max_concurrent_io', 10)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_objexp_gc_interval,
                               str(conf.get('rgw_objexp_gc_interval', 10)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_max_trim_chunk,
                               str(conf.get('rgw_gc_max_trim_chunk', 32)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,
                               str(conf.get('rgw_gc_obj_min_wait', 10)))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_processor_period,
                               str(conf.get('rgw_gc_processor_period', 10)))
    log.info('trying to restart services')
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')
    # Delete gc queue
    pool_name = utils.exec_shell_cmd(
        'ceph df |awk \'{ print $1 }\'| grep rgw.log')
    pool_name = pool_name.replace("\n", "")
    for i in range(0, 32):
        utils.exec_shell_cmd('rados rm gc.%d -p %s -N gc' % (i, pool_name))
Ejemplo n.º 9
0
def verify_key(each_key, bucket):
    """
        This function verifies data of each key in the bucket

        Parameters:
            key(char): key to be verified
            bucket(char): bucket name
        
        Retuns:

    """
    log.info('verifying data for key: %s' % os.path.basename(each_key['name']))
    key_from_s3 = bucket.Object(os.path.basename(each_key['name']))
    log.info('verifying size')
    log.info('size from yaml: %s' % each_key['size'])
    log.info('size from s3: %s' % key_from_s3.content_length)
    if int(each_key['size']) != int(key_from_s3.content_length):
        raise TestExecError("Size not matched")
    log.info('verifying md5')
    log.info('md5_local: %s' % each_key['md5_local'])
    key_from_s3.download_file('download.temp')
    downloaded_md5 = utils.get_md5('download.temp')
    log.info('md5_from_s3: %s' % downloaded_md5)
    if each_key['md5_local'] != downloaded_md5:
        raise TestExecError("Md5 not matched")
    utils.exec_shell_cmd('sudo rm -rf download.temp')
    log.info('verification complete for the key: %s' % key_from_s3.key)
Ejemplo n.º 10
0
def check_datalog_marker():
    """
    check the datalog marker
    """
    # changing the value of rgw_data_log_num_shards is not supported. Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1938105#c7
    log.info("get the value of rgw_data_log_num_shards")
    cmd = "ceph config get mon.* rgw_data_log_num_shards"
    datalog_num_shards = utils.exec_shell_cmd(cmd)
    log.info(f"datalog_num_shards: {datalog_num_shards}")

    # check for marker in datalog status
    cmd = "radosgw-admin datalog status"
    datalog_status_cmd = utils.exec_shell_cmd(cmd)
    datalog_status = json.loads(datalog_status_cmd)

    # fetch the first occurance of marker
    get_datalog_marker = ""
    datalog_num_shards = int(datalog_num_shards) - 1
    for i in range(datalog_num_shards):
        if datalog_status[i]["marker"] is "":
            continue
        else:
            get_datalog_marker = datalog_status[i]["marker"]
            break

    if "1_" in get_datalog_marker:
        return "omap"
    if ":" in get_datalog_marker:
        return "fifo"
    if "" in get_datalog_marker:
        raise TestExecError("failed to fetch datalog marker")
Ejemplo n.º 11
0
def create_pem():
    """
    creates a pem file.

    Parameters:

    Returns:
    PEM_FILE_PATH :  returns the pem file path
    """
    try:
        log.info("Creating pem file")
        cmd = (
            "openssl req -x509 -newkey rsa:2048 -keyout server.key -out server.csr -days 365 -nodes "
            '-subj "/C=IN/ST=KA/L=BLR/O=Carina Company/OU=Redhat/CN=*.ceph.redhat.com"'
        )
        out = utils.exec_shell_cmd(cmd)
        if out is False:
            raise Exception("Key file creation error")
        log.info("output :%s" % out)
        cmd2 = "cat server.csr server.key > {pem_file_path}".format(
            pem_file_path=PEM_FILE_PATH
        )
        out2 = utils.exec_shell_cmd(cmd2)
        if out2 is False:
            raise Exception("Pem file generation error")
        log.info("output :%s" % out2)
        log.info("pem file created")
        return PEM_FILE_PATH

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        sys.exit(1)
Ejemplo n.º 12
0
    def verify_if_objects_created(self):

        log.info('verification of s3 objects')

        for each_key in self.objects:

            log.info('verifying data for key: %s' %
                     os.path.basename(each_key['name']))

            log.info('bucket: %s' % each_key['bucket'])

            key_from_s3 = self.rgw_conn.Object(
                each_key['bucket'], os.path.basename(each_key['name']))
            log.info('got key name from s3: %s' % key_from_s3.key)

            if each_key['type'] == 'file':

                log.info('verifying md5')

                log.info('md5_local: %s' % each_key['md5'])
                key_from_s3.download_file('download.temp')
                downloaded_md5 = utils.get_md5('download.temp')
                log.info('md5_from_s3: %s' % downloaded_md5)

                if each_key['md5'] != downloaded_md5:
                    raise TestExecError("md5 not matched")

                utils.exec_shell_cmd('sudo rm -rf download.temp')
Ejemplo n.º 13
0
def get_datalog_marker():
    """
    check the datalog marker
    """
    # changing the value of rgw_data_log_num_shards is not supported. Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1938105#c7
    log.info("get the value of rgw_data_log_num_shards")
    cmd = "ceph config get mon.* rgw_data_log_num_shards"
    datalog_num_shards = utils.exec_shell_cmd(cmd)
    log.info(f"datalog_num_shards: {datalog_num_shards}")

    # check for marker in datalog status
    cmd = "radosgw-admin datalog status"
    datalog_status_cmd = utils.exec_shell_cmd(cmd)
    datalog_status = json.loads(datalog_status_cmd)

    # fetch the first occurance of marker
    get_datalog_marker = ""
    shard_id = -1
    datalog_num_shards = int(datalog_num_shards) - 1
    for i in range(datalog_num_shards):
        if datalog_status[i]["marker"] is "":
            continue
        else:
            get_datalog_marker = datalog_status[i]["marker"]
            shard_id = i
            break

    # return shard_id and datalog_mark, Ref BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1981860
    return shard_id, get_datalog_marker
Ejemplo n.º 14
0
def verify_key(each_key, bucket):
    """
    This function verifies data of each key in the bucket

    Parameters:
        key(char): key to be verified
        bucket(char): bucket name
    """
    log.info("verifying data for key: %s" % os.path.basename(each_key["name"]))
    check_object_exists(os.path.basename(each_key["name"]), bucket)
    key_from_s3 = bucket.Object(os.path.basename(each_key["name"]))
    log.info("verifying size")
    log.info("size from yaml: %s" % each_key["size"])
    log.info("size from s3: %s" % key_from_s3.content_length)
    if int(each_key["size"]) != int(key_from_s3.content_length):
        raise TestExecError("Size not matched")
    log.info("verifying md5")
    log.info("md5_local: %s" % each_key["md5_local"])
    key_from_s3.download_file("download.temp")
    downloaded_md5 = utils.get_md5("download.temp")
    log.info("md5_from_s3: %s" % downloaded_md5)
    if each_key["md5_local"] != downloaded_md5:
        raise TestExecError("Md5 not matched")
    utils.exec_shell_cmd("sudo rm -rf download.temp")
    log.info("verification complete for the key: %s" % key_from_s3.key)
Ejemplo n.º 15
0
def create_pem():
    try:
        log.info('Creating pem file')
        cmd = 'openssl req -x509 -newkey rsa:2048 -keyout server.key -out server.csr -days 365 -nodes ' \
              '-subj "/C=IN/ST=KA/L=BLR/O=Carina Company/OU=Redhat/CN=*.ceph.redhat.com"'
        out = utils.exec_shell_cmd(cmd)
        if out is False:
            raise Exception("Key file creation error")
        log.info('output :%s' % out)
        cmd2 = 'cat server.csr server.key > {pem_file_path}'.format(
            pem_file_path=PEM_FILE_PATH)
        out2 = utils.exec_shell_cmd(cmd2)
        if out2 is False:
            raise Exception("Pem file generation error")
        log.info('output :%s' % out2)
        # # copy tmp_pem_file to PEM_FILE_PATH
        # cmd3 = 'cp {tmp_pem_file} {pem_file_path}'.format(tmp_pem_file=PEM_FILE_NAME,
        #                                                   pem_file_path=PEM_FILE_PATH)
        # out3 = utils.exec_shell_cmd(cmd3)
        # if out3 is False:
        #     raise Exception("Linux copy error")
        # log.info('output :%s' % out3)
        log.info('pem file created')
        return PEM_FILE_PATH

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        sys.exit(1)
Ejemplo n.º 16
0
def create_local_file(file_size, file_name):
    """
    Creates a local file with specified size
    Args:
        file_size(int): Size of the file to be created
        file_name(str): Name of the file to be created
    """
    exec_shell_cmd(f"fallocate -l {file_size} {file_name}")
Ejemplo n.º 17
0
 def ganesha_stop(self):
     """
         This function is to stop the nfs-ganesha service
     """
     log.info('stopping ganesha services via systemctl')
     cmd = 'sudo systemctl stop nfs-ganesha'
     utils.exec_shell_cmd(cmd)
     time.sleep(10)
Ejemplo n.º 18
0
def put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config):
    bucket_life_cycle = s3lib.resource_op({'obj': rgw_conn,
                                           'resource': 'BucketLifecycleConfiguration',
                                           'args': [bucket.name]})
    put_bucket_life_cycle = s3lib.resource_op({"obj": bucket_life_cycle,
                                               "resource": "put",
                                               "kwargs": dict(LifecycleConfiguration=life_cycle_rule)})
    log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle)
    if put_bucket_life_cycle is False:
        raise TestExecError("Resource execution failed: put bucket lifecycle failed")
    if put_bucket_life_cycle is not None:
        response = HttpResponseParser(put_bucket_life_cycle)
        if response.status_code == 200:
            log.info('bucket life cycle added')
        else:
            raise TestExecError("bucket lifecycle addition failed")
    log.info('trying to retrieve bucket lifecycle config')
    get_bucket_life_cycle_config = s3lib.resource_op({"obj": rgw_conn2,
                                                      "resource": 'get_bucket_lifecycle_configuration',
                                                      "kwargs": dict(Bucket=bucket.name)
                                                      })
    if get_bucket_life_cycle_config is False:
        raise TestExecError("bucket lifecycle config retrieval failed")
    if get_bucket_life_cycle_config is not None:
        response = HttpResponseParser(get_bucket_life_cycle_config)
        if response.status_code == 200:
            log.info('bucket life cycle retrieved')
        else:
            raise TestExecError("bucket lifecycle config retrieval failed")
    else:
        raise TestExecError("bucket life cycle retrieved")
    objs_total = (config.test_ops['version_count']) * (config.objects_count)
    for rule in config.lifecycle_conf:
        if rule.get('Expiration', {}).get('Date', False):
            # todo: need to get the interval value from yaml file
            log.info("wait for 60 seconds")
            time.sleep(60)
        else:
            for time_interval in range(19):
                bucket_stats_op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
                json_doc1 = json.loads(bucket_stats_op)
                obj_pre_lc = json_doc1['usage']['rgw.main']['num_objects']
                if obj_pre_lc == objs_total:
                    time.sleep(30)
                else:
                    raise TestExecError("Objects expired before the expected days")
            time.sleep(60)

    log.info('testing if lc is applied via the radosgw-admin cli')
    op = utils.exec_shell_cmd("radosgw-admin lc list")
    json_doc = json.loads(op)
    for i, entry in enumerate(json_doc):
        print(i)
        print(entry['status'])
        if entry['status'] == 'COMPLETE' or entry['status'] == 'PROCESSING':
            log.info('LC is applied on the bucket')
        else:
            log.info('LC is not applied')
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()

    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    cmd = "radosgw-admin quota enable --quota-scope=user --uid={uid} --tenant={tenant}".format(
        uid=user_names[0], tenant=tenant)
    enable_user_quota = utils.exec_shell_cmd(cmd)
    cmd = "radosgw-admin quota set --quota-scope=user --uid={uid} --tenant={tenant} --max_buckets=2000".format(
        uid=user_names[0], tenant=tenant)
    max_bucket = utils.exec_shell_cmd(cmd)
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info["user_id"], rand_no=cc)
        container = swiftlib.resource_op({
            "obj": rgw,
            "resource": "put_container",
            "args": [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

    host, ip = utils.get_hostname_ip()
    port = utils.get_radosgw_port_no()
    hostname = str(ip) + ":" + str(port)
    cmd = "swift -A http://{hostname}/auth/1.0 -U '{uid}' -K '{key}' stat".format(
        hostname=hostname, uid=user_info["user_id"], key=user_info["key"])
    swift_cmd = utils.exec_shell_cmd(cmd)
    swift_cmd = swift_cmd.replace(" ", "")
    swift_cmd = swift_cmd.replace("\n", ":")
    li = list(swift_cmd.split(":"))
    res_dct = {li[i]: li[i + 1] for i in range(0, len(li) - 1, 2)}

    if int(res_dct["Containers"]) == config.container_count:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.success_status("test passed")
        sys.exit(0)
    else:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.failed_status("test failed")
        sys.exit(1)
Ejemplo n.º 20
0
    def ganesha_restart(self):

        log.info('restarting ganesha services')

        log.info('restarting services using systemctl')

        cmd = 'sudo systemctl restart nfs-ganesha'
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
Ejemplo n.º 21
0
def set_gc_conf(ceph_conf, conf):
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.bluestore_block_size,
        str(conf.get("bluestore_block_size", 1549267441664)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_max_queue_size,
        str(conf.get("rgw_gc_max_queue_size", 367788)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_processor_max_time,
        str(conf.get("rgw_gc_processor_max_time", 3600)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_max_concurrent_io,
        str(conf.get("rgw_gc_max_concurrent_io", 10)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_objexp_gc_interval,
        str(conf.get("rgw_objexp_gc_interval", 10)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_max_trim_chunk,
        str(conf.get("rgw_gc_max_trim_chunk", 32)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_obj_min_wait,
        str(conf.get("rgw_gc_obj_min_wait", 10)),
    )
    ceph_conf.set_to_ceph_conf(
        "global",
        ConfigOpts.rgw_gc_processor_period,
        str(conf.get("rgw_gc_processor_period", 10)),
    )
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")
    # Delete gc queue
    pool_name = utils.exec_shell_cmd(
        "ceph df |awk '{ print $1 }'| grep rgw.log")
    pool_name = pool_name.replace("\n", "")
    for i in range(0, 32):
        utils.exec_shell_cmd("rados rm gc.%d -p %s -N gc" % (i, pool_name))
Ejemplo n.º 22
0
def add_caps(user_info, caps="roles=*"):
    """for RGW STS, we need to enable caps on user_1

    Args:
        user_info (dict): user info dict
        caps (str, optional): Defaults to "roles=*".
    """
    log.info("adding caps to user info")
    add_caps_cmd = 'sudo radosgw-admin caps add --uid="{user_id}" --caps={caps}'.format(
        user_id=user_info["user_id"], caps=caps)
    utils.exec_shell_cmd(add_caps_cmd)
def get_svc_time():

    cmd = "pidof radosgw"
    pid = utils.exec_shell_cmd(cmd)
    pid = pid.strip()
    cmd = "ps -p " + pid + " -o etimes"
    srv_time = utils.exec_shell_cmd(cmd)
    srv_time = srv_time.replace("\n", "")
    srv_time = srv_time.replace(" ", "")
    srv_time = int(srv_time[7:])
    return srv_time
Ejemplo n.º 24
0
    def ganesha_start(self):

        log.info('starting nfs-ganesha services')

        cmd = 'sudo systemctl enable nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        cmd = 'sudo systemctl start nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
Ejemplo n.º 25
0
    def kernel_stop(self):

        log.info('stopping nfs kernel services')

        cmd = 'systemctl stop nfs-server.service'
        utils.exec_shell_cmd(cmd)

        cmd = 'systemctl disable nfs-server.service'
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
Ejemplo n.º 26
0
    def ganesha_restart(self):
        """
             This function is to restart the nfs-ganesha service
        """
        log.info('restarting ganesha services')

        log.info('restarting services using systemctl')

        cmd = 'sudo systemctl restart nfs-ganesha'
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    user_info = resource_op.create_users(
        no_of_users_to_create=config.user_count)[0]
    user_name = user_info["user_id"]

    ip_and_port = s3cmd_reusable.get_rgw_ip_and_port()
    s3_auth.do_auth(user_info, ip_and_port)

    if config.bucket_stats:
        bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0)
        s3cmd_reusable.create_bucket(bucket_name)
        log.info(f"Bucket {bucket_name} created")
        utils.exec_shell_cmd(f"fallocate -l 25m obj25m")
        object_name = f"s3://{bucket_name}/encyclopedia/space & universe/.bkp/journal$i"
        range_val = f"1..{config.objects_count}"
        cmd = ("for i in {" + range_val +
               "}; do /home/cephuser/venv/bin/s3cmd put obj25m " +
               object_name + ";done;")

        rc = utils.exec_shell_cmd(cmd)

        if rc:
            raise AssertionError("expected scenario is not achieved!!!")

        bucket_stats = utils.exec_shell_cmd(
            f"radosgw-admin bucket stats --bucket {bucket_name}")
        log.info(f" bucket stats are :{bucket_stats}")

        data = json.loads(bucket_stats)

        num_objects = data["usage"]["rgw.main"]["num_objects"]
        log.info(f"num objects :{num_objects}")

        object_count = utils.exec_shell_cmd(
            f"/home/cephuser/venv/bin/s3cmd ls s3://{bucket_name} --recursive | wc -l"
        )
        log.info(f"object_count :{object_count}")

        if int(num_objects) != int(object_count):
            raise AssertionError("Inconsistency found in number of objects")

        if "rgw.none" in data["usage"].keys():
            raise AssertionError("inconsistency issue observed")
Ejemplo n.º 28
0
    def ganesha_start(self):
        """
        This function is to start the nfs-ganesha service

        """
        log.info('starting nfs-ganesha services')

        cmd = 'sudo systemctl enable nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        cmd = 'sudo systemctl start nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
Ejemplo n.º 29
0
def sync_status(retry=10, delay=60):
    """
    verify multisite sync status
    """
    log.info("check sync status")
    cmd = "sudo radosgw-admin sync status"
    check_sync_status = utils.exec_shell_cmd(cmd)

    # check for 'failed' or 'ERROR' in sync status.
    if "failed" in check_sync_status or "ERROR" in check_sync_status:
        log.info("checking for any sync error")
        cmd = "sudo radosgw-admin sync error list"
        sync_error_list = utils.exec_shell_cmd(cmd)
        raise SyncFailedError("sync status is in failed or errored state!")
    else:
        log.info("No errors or failures in sync status")

    log.info(
        f"check if sync is in progress, if sync is in progress retry {retry} times with {delay}secs of sleep between each retry"
    )
    if "behind" in check_sync_status or "recovering" in check_sync_status:
        log.info("sync is in progress")
        log.info(f"sleep of {delay} secs for sync to complete")
        for retry_count in range(retry):
            time.sleep(delay)
            cmd = "sudo radosgw-admin sync status"
            check_sync_status = utils.exec_shell_cmd(cmd)
            if "behind" in check_sync_status or "recovering" in check_sync_status:
                log.info(
                    f"sync is still in progress. sleep for {delay}secs and retry"
                )
            else:
                log.info("sync completed")
                break

        if (retry_count > retry) and ("behind" in check_sync_status
                                      or "recovering" in check_sync_status):
            raise SyncFailedError(
                f"sync looks slow or stuck. with {retry} retries and sleep of {delay}secs between each retry"
            )

    # check metadata sync status
    if "metadata is behind" in check_sync_status:
        raise Exception("metadata sync looks slow or stuck.")

    # check status for complete sync
    if "data is caught up with source" in check_sync_status:
        log.info("sync status complete")
    else:
        raise SyncFailedError("sync is either slow or stuck")
Ejemplo n.º 30
0
def link_chown_to_nontenanted(new_uid, bucket, tenant):
    """"""
    cmd2 = 'radosgw-admin bucket link --bucket=%s --uid=%s' % (
        tenant + '/' + bucket, new_uid)
    out3 = utils.exec_shell_cmd(cmd2)
    if out3 is False:
        raise TestExecError("RGW Bucket link error")
    log.info('output :%s' % out3)
    cmd3 = 'radosgw-admin bucket chown --bucket=%s --uid=%s' % (bucket, new_uid)
    out4 = utils.exec_shell_cmd(cmd3)
    if out4 is False:
        raise TestExecError("RGW Bucket chown error")
    log.info('output :%s' % out4)
    return