def create_tenant_user(tenant_name, user_id, cluster_name='ceph'):
    # using userid as displayname
    admin_ops = UserMgmt()
    return admin_ops.create_tenant_user(user_id=user_id,
                                        displayname=user_id,
                                        cluster_name=cluster_name,
                                        tenant_name=tenant_name)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()

    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    cmd = "radosgw-admin quota enable --quota-scope=user --uid={uid} --tenant={tenant}".format(
        uid=user_names[0], tenant=tenant)
    enable_user_quota = utils.exec_shell_cmd(cmd)
    cmd = "radosgw-admin quota set --quota-scope=user --uid={uid} --tenant={tenant} --max_buckets=2000".format(
        uid=user_names[0], tenant=tenant)
    max_bucket = utils.exec_shell_cmd(cmd)
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info["user_id"], rand_no=cc)
        container = swiftlib.resource_op({
            "obj": rgw,
            "resource": "put_container",
            "args": [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

    host, ip = utils.get_hostname_ip()
    port = utils.get_radosgw_port_no()
    hostname = str(ip) + ":" + str(port)
    cmd = "swift -A http://{hostname}/auth/1.0 -U '{uid}' -K '{key}' stat".format(
        hostname=hostname, uid=user_info["user_id"], key=user_info["key"])
    swift_cmd = utils.exec_shell_cmd(cmd)
    swift_cmd = swift_cmd.replace(" ", "")
    swift_cmd = swift_cmd.replace("\n", ":")
    li = list(swift_cmd.split(":"))
    res_dct = {li[i]: li[i + 1] for i in range(0, len(li) - 1, 2)}

    if int(res_dct["Containers"]) == config.container_count:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.success_status("test passed")
        sys.exit(0)
    else:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.failed_status("test failed")
        sys.exit(1)
Esempio n. 3
0
def create_users(no_of_users_to_create, user_names=None, cluster_name="ceph"):
    """
    This function is to create n users on the cluster

    Parameters:
        no_of_users_to_create(int): users to create
        cluster_name(char): Name of the ceph cluster. defaults to 'ceph'

    Returns:
        all_users_details
    """
    admin_ops = UserMgmt()
    all_users_details = []
    primary = utils.is_cluster_primary()
    user_detail_file = os.path.join(lib_dir, "user_details.json")
    if primary:
        for i in range(no_of_users_to_create):
            if user_names:
                user_details = admin_ops.create_admin_user(
                    user_id=user_names,
                    displayname=user_names,
                    cluster_name=cluster_name,
                )
                all_users_details.append(user_details)
            else:
                user_details = admin_ops.create_admin_user(
                    user_id=names.get_first_name().lower()
                    + random.choice(string.ascii_lowercase)
                    + "."
                    + str(random.randint(1, 1000)),
                    displayname=names.get_full_name().lower(),
                    cluster_name=cluster_name,
                )
                all_users_details.append(user_details)
        with open(user_detail_file, "w") as fout:
            json.dump(all_users_details, fout)
    elif not primary:
        if not os.path.exists(user_detail_file):
            raise FileNotFoundError(
                "user_details.json missing, this is needed in multisite setup"
            )
        with open(user_detail_file, "r") as fout:
            all_users_details = json.load(fout)
        for each_user_info in all_users_details:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            user_info = basic_io_structure.user(
                **{
                    "user_id": each_user_info["user_id"],
                    "access_key": each_user_info["access_key"],
                    "secret_key": each_user_info["secret_key"],
                }
            )
            write_user_info.add_user_info(user_info)
    return all_users_details
def create_users(no_of_users_to_create, cluster_name='ceph'):
    admin_ops = UserMgmt()
    all_users_details = []
    for i in range(no_of_users_to_create):
        user_details = admin_ops.create_admin_user(
            user_id=names.get_first_name().lower() + random.choice(string.ascii_lowercase) + "." + str(
                random.randint(1, 1000)),
            displayname=names.get_full_name().lower(),
            cluster_name=cluster_name)
        all_users_details.append(user_details)
    return all_users_details
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    # preparing data
    user_name = resource_op.create_users(no_of_users_to_create=1)[0]["user_id"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_name,
                                                displayname=user_name)
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_name)

    hostname = socket.gethostname()
    ip = socket.gethostbyname(hostname)
    port = utils.get_radosgw_port_no()

    ip_and_port = f"{ip}:{port}"
    s3_auth.do_auth(tenant_user_info, ip_and_port)

    bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0)

    # Create a bucket
    s3cmd_reusable.create_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} created")

    # Upload file to bucket
    uploaded_file_info = s3cmd_reusable.upload_file(
        bucket_name, test_data_path=TEST_DATA_PATH)
    uploaded_file = uploaded_file_info["name"]
    log.info(f"Uploaded file {uploaded_file} to bucket {bucket_name}")

    # Delete file from bucket
    s3cmd_reusable.delete_file(bucket_name, uploaded_file)
    log.info(f"Deleted file {uploaded_file} from bucket {bucket_name}")

    # Delete bucket
    s3cmd_reusable.delete_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} deleted")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Esempio n. 6
0
def create_tenant_users(no_of_users_to_create, tenant_name, cluster_name='ceph'):
    """
        This function is to create n users with tenant on the cluster 

        Parameters:
            no_of_users_to_create(int): users to create with tenant
            cluster_name(char): Name of the ceph cluster. defaults to 'ceph'

        Returns:
            all_users_details 
    """
    admin_ops = UserMgmt()
    all_users_details = []
    primary = utils.is_cluster_primary()
    user_detail_file = os.path.join(lib_dir, 'user_details.json')
    if primary:
        for i in range(no_of_users_to_create):
            user_details = admin_ops.create_tenant_user(
                user_id=names.get_first_name().lower() + random.choice(string.ascii_lowercase) + "." + str(
                    random.randint(1, 1000)),
                displayname=names.get_full_name().lower(),
                cluster_name=cluster_name,
                tenant_name=tenant_name)
            all_users_details.append(user_details)
        with open(user_detail_file, 'w') as fout:
            json.dump(all_users_details, fout)
    elif not primary:
        if not os.path.exists(user_detail_file):
            raise FileNotFoundError("user_details.json missing, this is needed in multisite setup")
        with open(user_detail_file, 'r') as fout:
            all_users_details = json.load(fout)
        log.info('dump user_info into io_info.yaml')
        for each_user_info in all_users_details:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            user_info = basic_io_structure.user(**{'user_id': each_user_info['user_id'],
                                                   'access_key': each_user_info['access_key'],
                                                   'secret_key': each_user_info['secret_key']})
            write_user_info.add_user_info(dict(user_info, **tenant_info.tenant(each_user_info['tenant'])))
    return all_users_details
Esempio n. 7
0
def test_exec(config):
    test_info = AddTestInfo('test swift user key gen')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    umgmt = UserMgmt()

    try:
        test_info.started_info()

        # preparing data

        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'

        tenant_user_info = umgmt.create_tenant_user(
            tenant_name=tenant,
            user_id=user_names[0],
            displayname=user_names[0],
            cluster_name=config.cluster_name)

        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0],
                                         cluster_name=config.cluster_name)

        auth = Auth(user_info)

        rgw = auth.do_auth()

        for cc in range(config.container_count):

            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=cc)

            container = swiftlib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })

            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")

            for oc in range(config.objects_count):

                swift_object_name = utils.gen_s3_object_name(
                    '%s.container.%s' % (user_names[0], cc), oc)

                log.info('object name: %s' % swift_object_name)

                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)

                log.info('object path: %s' % object_path)

                object_size = utils.get_file_size(
                    config.objects_size_range['min'],
                    config.objects_size_range['max'])

                data_info = manage_data.io_generator(object_path, object_size)

                # upload object

                if data_info is False:
                    TestExecError("data creation failed")

                log.info('uploading object: %s' % object_path)

                with open(object_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # download object

                swift_object_download_fname = swift_object_name + ".download"

                log.info('download object name: %s' %
                         swift_object_download_fname)

                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)

                log.info('download object path: %s' %
                         swift_object_download_path)

                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)

                with open(swift_object_download_path, 'w') as fp:
                    fp.write(swift_object_downloaded[1])

                # modify and re-upload

                log.info('appending new message to test_data')

                message_to_append = 'adding new msg after download'

                fp = open(swift_object_download_path, 'a+')
                fp.write(message_to_append)
                fp.close()

                with open(swift_object_download_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # delete object

                log.info('deleting swift object')

                rgw.delete_object(container_name, swift_object_name)

            # delete container

            log.info('deleting swift container')

            rgw.delete_container(container_name)

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Esempio n. 8
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    # create pool
    pool_name = '.rgw.buckets.special'
    pg_num = '8'
    pgp_num = '8'
    pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
        pool_name, pg_num, pgp_num)
    pool_create_exec = utils.exec_shell_cmd(pool_create)
    if pool_create_exec is False:
        raise TestExecError("Pool creation failed")
    # create realm
    realm_name = 'buz-tickets'
    log.info('creating realm name')
    realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s' % realm_name
    realm_create_exec = utils.exec_shell_cmd(realm_create)
    if realm_create_exec is False:
        raise TestExecError("cmd execution failed")
    # sample output of create realm
    """
    {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
    """
    log.info('modify zonegroup ')
    modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master' % realm_name
    modify_exec = utils.exec_shell_cmd(modify)
    if modify_exec is False:
        raise TestExecError("cmd execution failed")
    # get the zonegroup
    zonegroup_file = 'zonegroup.json'
    get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file
    get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
    if get_zonegroup_exec is False:
        raise TestExecError("cmd execution failed")
    add_to_placement_targets = {"name": "special-placement", "tags": []}
    fp = open(zonegroup_file, 'r')
    zonegroup_txt = fp.read()
    fp.close()
    log.info('got zonegroup info: \n%s' % zonegroup_txt)
    zonegroup = json.loads(zonegroup_txt)
    log.info('adding placement targets')
    zonegroup['placement_targets'].append(add_to_placement_targets)
    with open(zonegroup_file, 'w') as fp:
        json.dump(zonegroup, fp)
    zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file
    zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
    if zonegroup_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone group update completed')
    log.info('getting zone file')
    # get zone
    log.info('getting zone info')
    zone_file = 'zone.json'
    get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
    get_zone_exec = utils.exec_shell_cmd(get_zone)
    if get_zone_exec is False:
        raise TestExecError("cmd execution failed")
    fp = open(zone_file, 'r')
    zone_info = fp.read()
    fp.close()
    log.info('zone_info :\n%s' % zone_info)
    zone_info_cleaned = json.loads(zone_info)
    special_placement_info = {
        "key": "special-placement",
        "val": {
            "index_pool": ".rgw.buckets.index",
            "data_pool": ".rgw.buckets.special",
            "data_extra_pool": ".rgw.buckets.extra"
        }
    }
    log.info('adding  special placement info')
    zone_info_cleaned['placement_pools'].append(special_placement_info)
    with open(zone_file, 'w+') as fp:
        json.dump(zone_info_cleaned, fp)
    zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file
    zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
    if zone_file_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone info updated ')
    zone_group_update_set = 'radosgw-admin period update --commit'
    zone_group_update_set_exec = utils.exec_shell_cmd(zone_group_update_set)
    log.info(zone_group_update_set_exec)
    restarted = rgw_service.restart()
    if restarted is False:
        raise TestExecError("service restart failed")
    if config.rgw_client == 'rgw':
        log.info('client type is rgw')
        rgw_user_info = s3_swift_lib.create_users(1)
        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()
        # create bucket
        bucket_name = utils.gen_bucket_name_from_userid(
            rgw_user_info['user_id'], 0)
        bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
        # create object
        s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                rgw_user_info)
    if config.rgw_client == 'swift':
        log.info('client type is swift')
        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'
        umgmt = UserMgmt()
        umgmt.create_tenant_user(tenant_name=tenant,
                                 user_id=user_names[0],
                                 displayname=user_names[0])
        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0])
        auth = Auth(user_info)
        rgw = auth.do_auth()
        container_name = utils.gen_bucket_name_from_userid(
            user_info['user_id'], rand_no=0)
        container = s3_swift_lib.resource_op({
            'obj': rgw,
            'resource': 'put_container',
            'args': [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

        swift_object_name = utils.gen_s3_object_name(
            '%s.container.%s' % (user_names[0], 0), 0)
        log.info('object name: %s' % swift_object_name)
        object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
        log.info('object path: %s' % object_path)
        object_size = utils.get_file_size(config.objects_size_range['min'],
                                          config.objects_size_range['max'])
        data_info = manage_data.io_generator(object_path, object_size)
        # upload object
        if data_info is False:
            TestExecError("data creation failed")
        log.info('uploading object: %s' % object_path)
        with open(object_path, 'r') as fp:
            rgw.put_object(container_name,
                           swift_object_name,
                           contents=fp.read(),
                           content_type='text/plain')
def test_exec(config):
    test_info = AddTestInfo("test swift user key gen")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    try:
        test_info.started_info()
        # preparing data
        user_names = ["tuffy", "scooby", "max"]
        tenant = "tenant"
        tenant_user_info = umgmt.create_tenant_user(
            tenant_name=tenant,
            user_id=user_names[0],
            displayname=user_names[0],
            cluster_name=config.cluster_name,
        )
        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0],
                                         cluster_name=config.cluster_name)
        auth = Auth(user_info)
        rgw = auth.do_auth()
        for cc in range(config.container_count):
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")
            for oc in range(config.objects_count):
                swift_object_name = utils.gen_s3_object_name(
                    "%s.container.%s" % (user_names[0], cc), oc)
                log.info("object name: %s" % swift_object_name)
                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
                log.info("object path: %s" % object_path)
                object_size = utils.get_file_size(
                    config.objects_size_range["min"],
                    config.objects_size_range["max"])
                data_info = manage_data.io_generator(object_path, object_size)
                # upload object
                if data_info is False:
                    TestExecError("data creation failed")
                log.info("uploading object: %s" % object_path)
                with open(object_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # download object
                swift_object_download_fname = swift_object_name + ".download"
                log.info("download object name: %s" %
                         swift_object_download_fname)
                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)
                log.info("download object path: %s" %
                         swift_object_download_path)
                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)
                with open(swift_object_download_path, "w") as fp:
                    fp.write(swift_object_downloaded[1])
                # modify and re-upload
                log.info("appending new message to test_data")
                message_to_append = "adding new msg after download"
                fp = open(swift_object_download_path, "a+")
                fp.write(message_to_append)
                fp.close()
                with open(swift_object_download_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # delete object
                log.info("deleting swift object")
                rgw.delete_object(container_name, swift_object_name)
            # delete container
            log.info("deleting swift container")
            rgw.delete_container(container_name)
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Esempio n. 10
0
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    # preparing data
    user_name = resource_op.create_users(no_of_users_to_create=1)[0]["user_id"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_name,
                                                displayname=user_name)
    umgmt.create_subuser(tenant_name=tenant, user_id=user_name)

    ip_and_port = s3cmd_reusable.get_rgw_ip_and_port()
    s3_auth.do_auth(tenant_user_info, ip_and_port)

    bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0)

    # Create a bucket
    s3cmd_reusable.create_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} created")

    # Upload a 2GB file to bucket
    uploaded_file_info = s3cmd_reusable.upload_file(
        bucket_name, file_size=2147483648, test_data_path=TEST_DATA_PATH)
    uploaded_file = uploaded_file_info["name"]
    uploaded_file_md5 = uploaded_file_info["md5"]
    log.info(f"Uploaded file {uploaded_file} to bucket {bucket_name}")

    if config.gc_verification is True:
        log.info("making changes to ceph.conf")
        config.rgw_gc_obj_min_wait = 5
        ceph_conf.set_to_ceph_conf(
            "global",
            ConfigOpts.rgw_gc_obj_min_wait,
            str(config.rgw_gc_obj_min_wait),
        )
        log.info("trying to restart services")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
        log.info(
            "download large object again to make gc list with shadow entries")

        downloaded_file1 = s3cmd_reusable.download_file(
            bucket_name,
            uploaded_file,
            local_file_name="download1.img",
            test_data_path=TEST_DATA_PATH,
        )
        time.sleep(5)
        downloaded_file1_md5 = utils.get_md5(downloaded_file1)
        assert uploaded_file_md5 == downloaded_file1_md5
        gc_list_output = json.loads(
            utils.exec_shell_cmd("radosgw-admin gc list --include-all"))

        log.info(gc_list_output)

        if gc_list_output:
            log.info(
                "Shadow obj found after setting rgw_gc_obj_min_wait to 5 sec")
            utils.exec_shell_cmd("radosgw-admin gc process --include-all")
            log.info(
                "Object download should not error out in 404 NoSuchKey error")
            downloaded_file2 = s3cmd_reusable.download_file(
                bucket_name,
                uploaded_file,
                local_file_name="download2.img",
                test_data_path=TEST_DATA_PATH,
            )
            downloaded_file2_md5 = utils.get_md5(downloaded_file2)
            assert uploaded_file_md5 == downloaded_file2_md5

    # Delete file from bucket
    s3cmd_reusable.delete_file(bucket_name, uploaded_file)
    log.info(f"Deleted file {uploaded_file} from bucket {bucket_name}")

    # Delete bucket
    s3cmd_reusable.delete_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} deleted")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    test_info = AddTestInfo("storage_policy for %s" % config.rgw_client)
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    try:
        # create pool
        pool_name = ".rgw.buckets.special"
        pg_num = "8"
        pgp_num = "8"
        pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
            pool_name,
            pg_num,
            pgp_num,
        )
        pool_create_exec = utils.exec_shell_cmd(pool_create)
        if pool_create_exec is False:
            raise TestExecError("Pool creation failed")
        # create realm
        realm_name = "buz-tickets"
        log.info("creating realm name")
        realm_create = (
            "sudo radosgw-admin realm create --rgw-realm=%s --default" % realm_name
        )
        realm_create_exec = utils.exec_shell_cmd(realm_create)
        if realm_create_exec is False:
            raise TestExecError("cmd execution failed")
        # sample output of create realm
        """
        {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
        
        """
        log.info("modify zonegroup ")
        modify = (
            "sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default"
            % realm_name
        )
        modify_exec = utils.exec_shell_cmd(modify)
        if modify_exec is False:
            raise TestExecError("cmd execution failed")
        # get the zonegroup
        zonegroup_file = "zonegroup.json"
        get_zonegroup = (
            "sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s"
            % zonegroup_file
        )
        get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
        if get_zonegroup_exec is False:
            raise TestExecError("cmd execution failed")
        add_to_placement_targets = {"name": "special-placement", "tags": []}
        fp = open(zonegroup_file, "r")
        zonegroup_txt = fp.read()
        fp.close()
        log.info("got zonegroup info: \n%s" % zonegroup_txt)
        zonegroup = json.loads(zonegroup_txt)
        log.info("adding placement targets")
        zonegroup["placement_targets"].append(add_to_placement_targets)
        with open(zonegroup_file, "w") as fp:
            json.dump(zonegroup, fp)
        zonegroup_set = "sudo radosgw-admin zonegroup set < %s" % zonegroup_file
        zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
        if zonegroup_set_exec is False:
            raise TestExecError("cmd execution failed")
        log.info("zone group update completed")
        log.info("getting zone file")
        # get zone
        log.info("getting zone info")
        zone_file = "zone.json"
        get_zone = "sudo radosgw-admin zone --rgw-zone=default  get > zone.json"
        get_zone_exec = utils.exec_shell_cmd(get_zone)
        if get_zone_exec is False:
            raise TestExecError("cmd execution failed")
        fp = open(zone_file, "r")
        zone_info = fp.read()
        fp.close()
        log.info("zone_info :\n%s" % zone_info)
        zone_info_cleaned = json.loads(zone_info)
        special_placement_info = {
            "key": "special-placement",
            "val": {
                "index_pool": ".rgw.buckets.index",
                "data_pool": ".rgw.buckets.special",
                "data_extra_pool": ".rgw.buckets.extra",
            },
        }
        log.info("adding  special placement info")
        zone_info_cleaned["placement_pools"].append(special_placement_info)
        print(zone_info_cleaned)
        with open(zone_file, "w+") as fp:
            json.dump(zone_info_cleaned, fp)
        zone_file_set = "sudo radosgw-admin zone set < %s" % zone_file
        zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
        if zone_file_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info("zone info updated ")
        restarted = rgw_service.restart()
        if restarted is False:
            raise TestExecError("service restart failed")
        if config.rgw_client == "rgw":
            log.info("client type is rgw")
            rgw_user_info = s3_swift_lib.create_users(1)
            auth = Auth(rgw_user_info)
            rgw_conn = auth.do_auth()
            # create bucket
            bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info["user_id"], 0)
            bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
            # create object
            s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
            resuables.upload_object(
                s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info
            )

        if config.rgw_client == "swift":
            log.info("client type is swift")

            user_names = ["tuffy", "scooby", "max"]
            tenant = "tenant"

            umgmt = UserMgmt()
            umgmt.create_tenant_user(
                tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]
            )

            user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])

            auth = Auth(user_info)
            rgw = auth.do_auth()
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=0
            )
            container = s3_swift_lib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield"
                )
            swift_object_name = utils.gen_s3_object_name(
                "%s.container.%s" % (user_names[0], 0), 0
            )
            log.info("object name: %s" % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info("object path: %s" % object_path)
            object_size = utils.get_file_size(
                config.objects_size_range["min"], config.objects_size_range["max"]
            )
            data_info = manage_data.io_generator(object_path, object_size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info("uploading object: %s" % object_path)
            with open(object_path, "r") as fp:
                rgw.put_object(
                    container_name,
                    swift_object_name,
                    contents=fp.read(),
                    content_type="text/plain",
                )
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    umgmt = UserMgmt()

    host, ip = utils.get_hostname_ip()
    port = utils.get_radosgw_port_no()
    hostname = str(ip) + ":" + str(port)
    log.info(hostname)

    # preparing data
    admin_api_user = "******" + randomString()
    log.info(admin_api_user)
    user_info = umgmt.create_rest_admin_user(user_id=admin_api_user,
                                             displayname=admin_api_user)

    rgw = RGWAdmin(access_key=user_info['access_key'],
                   secret_key=user_info['secret_key'],
                   server=hostname,
                   secure=False,
                   verify=False)

    api_user = "******" + randomString()
    log.info(api_user)
    for uc in range(config.user_count):
        #Create User
        data = rgw.create_user(uid=api_user,
                               display_name=api_user,
                               email=api_user + '@abc.xyz')
        log.info("User created successfully")
        log.info(data)
        log.info('verification starts')
        op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" %
                                  api_user)
        json_doc = json.loads(op)
        log.info(json_doc)
        v = verify_user(data, json_doc)
        if v is False:
            test_info.failed_status('test failed')
            sys.exit(1)
        log.info("Verification for create operation completed")

        #Update User
        data = rgw.modify_user(uid=api_user,
                               display_name=api_user + "_11",
                               email=api_user + '*****@*****.**')
        log.info("User Updated successfully")
        log.info(data)
        log.info('verification starts')
        op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" %
                                  api_user)
        json_doc = json.loads(op)
        log.info(json_doc)
        v = verify_user(data, json_doc)
        if v is False:
            test_info.failed_status('test failed')
            sys.exit(1)
        log.info("Verification for Update operation completed")

        #delete User
        data = rgw.remove_user(uid=api_user, purge_data=False)
        log.info(data)
        log.info("User removed")
        op = utils.exec_shell_cmd("radosgw-admin user list")
        json_doc = json.loads(op)
        if api_user in json_doc:
            test_info.failed_status('test failed')
            sys.exit(1)
        log.info("Verification for Delete operation completed")
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    # preparing data
    user_names = ["tom", "ram", "sam"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(
        tenant_name=tenant, user_id=user_names[1], displayname=user_names[1]
    )
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[1])
    auth = Auth(user_info, config.ssl)
    rgw = auth.do_auth()

    container_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=0)
    container = swiftlib.resource_op(
        {"obj": rgw, "resource": "put_container", "args": [container_name]}
    )
    if container is False:
        raise TestExecError("Resource execution failed: container creation faield")
    for oc, size in list(config.mapped_sizes.items()):
        # upload objects to the container
        swift_object_name = fill_container(
            rgw, container_name, user_names[1], oc, 0, size
        )
    # delete all uploaded objects
    log.info("deleting all swift objects")
    auth_response = rgw.get_auth()
    token = auth_response[1]
    # test.txt file should contain container_name
    with open("test.txt", "w") as f:
        f.write(container_name)
    ip_and_port = rgw.authurl.split("/")[2]
    proto = "https" if config.ssl else "http"
    url = f"{proto}://{ip_and_port}/swift/v1/?bulk-delete"
    test_file = open("test.txt", "r")
    headers = {
        "Accept": "application/json",
        "Content-Type": "text/plain",
        "X-Auth-Token": token,
    }
    response = requests.delete(
        url, headers=headers, verify=False, files={"form_field_name": test_file}
    )
    if response.status_code == 200:
        log.info("Bulk delete succeeded")
    else:
        raise TestExecError(
            "Bulk delete failed with status code: %d" % response.status_code
        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Esempio n. 14
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    log.info(type(ceph_conf))
    rgw_service = RGWService()
    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    auth = Auth(user_info)
    rgw = auth.do_auth()

    for cc in range(config.container_count):
        if config.version_enable is True:
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf("global",
                                       ConfigOpts.rgw_swift_versioning_enabled,
                                       "True")
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
            container_name_old = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "old")
            log.info(container_name_old)
            container = swiftlib.resource_op({
                "obj":
                rgw,
                "resource":
                "put_container",
                "kwargs":
                dict(container=container_name_old),
            })
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "new")
            log.info(container_name)
            container = swiftlib.resource_op({
                "obj":
                rgw,
                "resource":
                "put_container",
                "args": [
                    container_name,
                    {
                        "X-Versions-Location": container_name_old
                    },
                ],
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            ls = []
            swift_object_name = ""
            for version_count in range(config.version_count):
                for oc, size in list(config.mapped_sizes.items()):
                    swift_object_name = fill_container(rgw, container_name,
                                                       user_names[0], oc, cc,
                                                       size)
                ls = rgw.get_container(container_name_old)
                ls = list(ls)
            if config.copy_version_object is True:
                old_obj_name = ls[1][config.version_count - 2]["name"]
                log.info(old_obj_name)
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "copy_object",
                    "kwargs":
                    dict(
                        container=container_name_old,
                        obj=old_obj_name,
                        destination=container_name + "/" + swift_object_name,
                    ),
                })
                if container is False:
                    raise TestExecError("Resource execution failed")
                log.info("Successfully copied item")
            else:
                current_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0], tenant=tenant, bucket=container_name)
                num_obj_current = utils.exec_shell_cmd(current_count)
                num_obj_current = json.loads(num_obj_current)
                num_obj_current = (num_obj_current[0].get("usage").get(
                    "rgw.main").get("num_objects"))
                old_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0],
                    tenant=tenant,
                    bucket=container_name_old)
                num_obj_old = utils.exec_shell_cmd(old_count)
                num_obj_old = json.loads(num_obj_old)
                num_obj_old = (num_obj_old[0].get("usage").get("rgw.main").get(
                    "num_objects"))
                version_count_from_config = (
                    config.objects_count *
                    config.version_count) - config.objects_count
                if (num_obj_current == config.objects_count) and (
                        num_obj_old == version_count_from_config):
                    log.info("objects and versioned obbjects are correct")
                else:
                    test_info.failed_status("test failed")

        elif config.object_expire is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    header={"X-Delete-After": 5},
                )
                time.sleep(7)
                container_exists = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "get_object",
                    "args": [container_name, swift_object_name],
                })
                log.info(container_exists)
                if container_exists:
                    msg = "test failed as the objects are still present"
                    test_info.failed_status(msg)
                    raise TestExecError(msg)

        elif config.large_object_upload is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    multipart=True,
                    split_size=config.split_size,
                )
                container_name_new = utils.gen_bucket_name_from_userid(
                    user_info["user_id"], rand_no=str(cc) + "New")
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "put_container",
                    "kwargs":
                    dict(container=container_name_new),
                })
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed")
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "put_object",
                    "kwargs":
                    dict(
                        container=container_name_new,
                        obj=swift_object_name,
                        contents=None,
                        headers={
                            "X-Object-Manifest":
                            container_name + "/" + swift_object_name + "/"
                        },
                    ),
                })
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed")
                if config.large_object_download is True:
                    swift_old_object_path = os.path.join(
                        TEST_DATA_PATH, swift_object_name)
                    swift_object_download_fname = swift_object_name + ".download"
                    log.info("download object name: %s" %
                             swift_object_download_fname)
                    swift_object_download_path = os.path.join(
                        TEST_DATA_PATH, swift_object_download_fname)
                    log.info("download object path: %s" %
                             swift_object_download_path)
                    swift_object_downloaded = rgw.get_object(
                        container_name_new, swift_object_name)
                    with open(swift_object_download_path, "wb") as fp:
                        fp.write(swift_object_downloaded[1])
                    old_object = utils.get_md5(swift_old_object_path)
                    downloaded_obj = utils.get_md5(swift_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" % old_object)
                    log.info("s3_object_uploaded_md5: %s" % downloaded_obj)
                    if str(old_object) == str(downloaded_obj):
                        log.info("md5 match")
                        utils.exec_shell_cmd("rm -rf %s" %
                                             swift_object_download_path)
                    else:
                        raise TestExecError("md5 mismatch")

        else:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(rgw, container_name,
                                                   user_names[0], oc, cc, size)
                # download object
                swift_object_download_fname = swift_object_name + ".download"
                log.info("download object name: %s" %
                         swift_object_download_fname)
                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)
                log.info("download object path: %s" %
                         swift_object_download_path)
                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)
                with open(swift_object_download_path, "w") as fp:
                    fp.write(str(swift_object_downloaded[1]))
                # modify and re-upload
                log.info("appending new message to test_data")
                message_to_append = "adding new msg after download"
                fp = open(swift_object_download_path, "a+")
                fp.write(message_to_append)
                fp.close()
                with open(swift_object_download_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # delete object
                log.info("deleting swift object")
                rgw.delete_object(container_name, swift_object_name)
            # delete container
            log.info("deleting swift container")
            rgw.delete_container(container_name)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    reusable.remove_user(tenant_user_info, tenant=tenant)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()

    # preparing data
    user_names = ['tuffy', 'scooby', 'max']
    tenant = 'tenant'
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info['user_id'], rand_no=cc)
        container = swiftlib.resource_op({
            'obj': rgw,
            'resource': 'put_container',
            'args': [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")
        for oc, size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name(
                '%s.container.%s' % (user_names[0], cc), oc)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            data_info = manage_data.io_generator(object_path, size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name,
                               swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
            # download object
            swift_object_download_fname = swift_object_name + ".download"
            log.info('download object name: %s' % swift_object_download_fname)
            swift_object_download_path = os.path.join(
                TEST_DATA_PATH, swift_object_download_fname)
            log.info('download object path: %s' % swift_object_download_path)
            swift_object_downloaded = rgw.get_object(container_name,
                                                     swift_object_name)
            with open(swift_object_download_path, 'w') as fp:
                fp.write(str(swift_object_downloaded[1]))
            # modify and re-upload
            log.info('appending new message to test_data')
            message_to_append = 'adding new msg after download'
            fp = open(swift_object_download_path, 'a+')
            fp.write(message_to_append)
            fp.close()
            with open(swift_object_download_path, 'r') as fp:
                rgw.put_object(container_name,
                               swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
            # delete object
            log.info('deleting swift object')
            rgw.delete_object(container_name, swift_object_name)
        # delete container
        log.info('deleting swift container')
        rgw.delete_container(container_name)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")