Beispiel #1
0
def upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=False, append_msg=None):
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = config.obj_size
    if append_data is True:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size, op='append',
                                             **{'message': '\n%s' % append_msg})
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict({'access_key': user_info['access_key']}, **data_info)
    s3_obj = s3lib.resource_op({'obj': bucket,
                                'resource': 'Object',
                                'args': [s3_object_name],
                                })
    object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                'resource': 'upload_file',
                                                'args': [s3_object_path],
                                                'extra_info': upload_info})
    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")
    if object_uploaded_status is None:
        log.info('object uploaded')
Beispiel #2
0
def upload_object_with_tagging(
    s3_object_name,
    bucket,
    TEST_DATA_PATH,
    config,
    user_info,
    obj_tag,
    append_data=False,
    append_msg=None,
):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = config.obj_size
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op="append",
            **{"message": "\n%s" % append_msg},
        )
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info("uploading s3 object with object tagging enabled: %s" %
             s3_object_path)
    bucket.put_object(Key=s3_object_name, Body=s3_object_path, Tagging=obj_tag)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # preparing data
    user_names = ['tuffy', 'scooby', 'max']
    tenant1 = 'tenant'
    cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s' % (
    user_names[0], user_names[0], tenant1)
    out = utils.exec_shell_cmd(cmd)
    if out is False:
        raise TestExecError("RGW User creation error")
    log.info('output :%s' % out)
    v1_as_json = json.loads(out)
    log.info('creted user_id: %s' % v1_as_json['user_id'])
    cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full' % (
    tenant1, user_names[0], user_names[0], tenant1)
    out2 = utils.exec_shell_cmd(cmd2)
    if out2 is False:
        raise TestExecError("sub-user creation error")
    v2_as_json = json.loads(out2)
    log.info('created subuser: %s' % v2_as_json['subusers'][0]['id'])
    cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret' % (
    user_names[0], user_names[0], tenant1, tenant1)
    out3 = utils.exec_shell_cmd(cmd3)
    if out3 is False:
        raise TestExecError("secret_key gen error")
    v3_as_json = json.loads(out3)
    log.info('created subuser: %s\nsecret_key generated: %s' % (
    v3_as_json['swift_keys'][0]['user'], v3_as_json['swift_keys'][0]['secret_key']))
    user_info = {'user_id': v3_as_json['swift_keys'][0]['user'],
                 'key': v3_as_json['swift_keys'][0]['secret_key']}
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=cc)
        container = swiftlib.resource_op({'obj': rgw,
                                          'resource': 'put_container',
                                          'args': [container_name]})
        if container is False:
            raise TestExecError("Resource execution failed: container creation faield")
        for oc,size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name('%s.container.%s' % (user_names[0], cc), oc)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            data_info = manage_data.io_generator(object_path, size)
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name, swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def upload_file(bucket_name,
                file_name=None,
                file_size=1024,
                test_data_path=None):
    """
    Uploads file to the bucket
    Args:
        bucket_name(str): Name of the bucket
        file_name(str): Name of the file to be uploaded
        file_size(int): Size of the file to be uploaded, defaults to 1024
        test_data_path(str): Local test data path
    Returns: File information
    """
    # If no file_name passed, it generates file_name and returns in file information
    # It is to have support for file which is already created
    if file_name is None:
        file_name = utils.gen_s3_object_name(bucket_name, 1)

    local_file_path = test_data_path + "/" + file_name
    file_info = io_generator(local_file_path, file_size)
    file_info["name"] = file_name

    upload_file_method = S3CMD(operation="put")
    remote_s3_path = f"s3://{bucket_name}/{file_name}"
    command = upload_file_method.command(
        params=[local_file_path, remote_s3_path])
    try:
        upload_file_response = exec_shell_cmd(command)
        log.debug(f"Response for upload file command: {upload_file_response}")
    except Exception as e:
        raise S3CommandExecError(message=str(e))
    assert "100%" in str(
        upload_file_response), "upload file operation not succeeded"
    return file_info
def fill_container(rgw, container_name, user_id, oc, cc, size):
    """
    Uploads objects to the container
    Args:
        rgw(object): RGW object
        container_name(str): Container name
        user_id(str): User ID
        oc(int): object count
        cc(int): container count
        size(int): Object size
    """
    swift_object_name = utils.gen_s3_object_name("%s.container.%s" % (user_id, cc), oc)
    log.info("object name: %s" % swift_object_name)
    object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
    log.info("object path: %s" % object_path)
    data_info = manage_data.io_generator(object_path, size)
    # upload object
    if data_info is False:
        raise TestExecError("data creation failed")
    log.info("uploading object: %s" % object_path)
    with open(object_path, "r") as fp:
        rgw.put_object(
            container_name,
            swift_object_name,
            contents=fp.read(),
            content_type="text/plain",
        )
    return swift_object_name
def upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info):

    log.info('s3 object name: %s' % s3_object_name)

    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)

    log.info('s3 object path: %s' % s3_object_path)

    s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                         config.objects_size_range['max'])

    data_info = manage_data.io_generator(s3_object_path, s3_object_size)

    if data_info is False:
        TestExecError("data creation failed")

    log.info('uploading s3 object: %s' % s3_object_path)

    upload_info = dict({'access_key': user_info['access_key']}, **data_info)

    object_uploaded_status = s3lib.resource_op({
        'obj':
        bucket,
        'resource':
        'upload_file',
        'args': [s3_object_path, s3_object_name],
        'extra_info':
        upload_info
    })

    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")

    if object_uploaded_status is None:
        log.info('object uploaded')
def upload_object(
    s3_object_name,
    bucket,
    TEST_DATA_PATH,
    config,
    user_info,
    append_data=False,
    append_msg=None,
):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = config.obj_size
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op="append",
            **{"message": "\n%s" % append_msg},
        )
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info("uploading s3 object: %s" % s3_object_path)
    upload_info = dict({"access_key": user_info["access_key"]}, **data_info)
    s3_obj = s3lib.resource_op(
        {
            "obj": bucket,
            "resource": "Object",
            "args": [s3_object_name],
        }
    )
    object_uploaded_status = s3lib.resource_op(
        {
            "obj": s3_obj,
            "resource": "upload_file",
            "args": [s3_object_path],
            "extra_info": upload_info,
        }
    )
    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")
    if object_uploaded_status is None:
        log.info("object uploaded")
Beispiel #8
0
def fill_container(
    rgw,
    container_name,
    user_id,
    oc,
    cc,
    size,
    multipart=False,
    split_size=0,
    header=None,
):
    swift_object_name = utils.gen_s3_object_name(
        "%s.container.%s" % (user_id, cc), oc)
    log.info("object name: %s" % swift_object_name)
    object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
    log.info("object path: %s" % object_path)
    data_info = manage_data.io_generator(object_path, size)
    # upload object
    if multipart == True:
        mp_dir = os.path.join(TEST_DATA_PATH, swift_object_name + ".mp.parts")
        log.info(f"mp part dir: {mp_dir}")
        log.info("making multipart object part dir")
        mkdir = utils.exec_shell_cmd("sudo mkdir %s" % mp_dir)
        if mkdir is False:
            raise TestExecError("mkdir failed creating mp_dir_name")
        utils.split_file(object_path, split_size, mp_dir + "/")
        parts_list = sorted(glob.glob(mp_dir + "/" + "*"))
        log.info("parts_list: %s" % parts_list)
        log.info("no of parts: %s" % len(parts_list))
        for each_part in parts_list:
            log.info("trying to upload part: %s" % each_part)
            with open(each_part, "r") as fp:
                etag = rgw.put_object(
                    container_name,
                    swift_object_name + "/" + each_part,
                    contents=fp.read(),
                    content_type="text/plain",
                    headers=header,
                )
        return swift_object_name
    else:
        if data_info is False:
            raise TestExecError("data creation failed")
        log.info("uploading object: %s" % object_path)
        with open(object_path, "r") as fp:
            rgw.put_object(
                container_name,
                swift_object_name,
                contents=fp.read(),
                content_type="text/plain",
                headers=header,
            )
        return swift_object_name
def test_acls_public_read(u1_rgw_conn, u1, u2_rgw_conn, u1_bucket, u2_bucket):
    # test for public_read
    s3_ops = ResourceOps()
    u1_bucket_acl = s3_ops.resource_op(u1_rgw_conn, 'BucketAcl',
                                       u1_bucket.name)
    log.info('setting bucket acl: %s' % ACLS[1])
    u1_bucket_acl.put(ACL=ACLS[1])
    # access bucket_info of u1_bucket from u2
    log.info('u1 bucket info')
    u1_bucket_info = s3_ops.resource_op(u1_rgw_conn, 'Bucket', u1_bucket.name)
    log.info(u1_bucket_info.name)
    log.info(u1_bucket_info.creation_date)
    log.info(u1_bucket_info.load())
    s3_object_name = utils.gen_s3_object_name(u1_bucket.name, rand_no=0)
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                         config.objects_size_range['max'])
    data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict({'access_key': u1['access_key']}, **data_info)
    object_uploaded_status = s3_ops.resource_op(u1_bucket, 'upload_file',
                                                s3_object_path, s3_object_name,
                                                **upload_info)
    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")
    if object_uploaded_status is None:
        log.info('object uploaded')
    log.info(
        'trying to access u1 bucket and its objects info from u2 after setting u1 bucket acls to public read'
    )
    access_u1_bucket_from_u2 = s3_ops.resource_op(u2_rgw_conn, 'Bucket',
                                                  u1_bucket.name)
    try:
        all_objects = access_u1_bucket_from_u2.objects.all()
        for obj in all_objects:
            log.info('obj name: %s' % obj.key)
    except Exception as e:
        msg = 'access given to read, but still failing to read'
        raise TestExecError(msg)
    log.info('tryring to delete u1_bucket from u2')
    try:
        u1_bucket_deleted_response = access_u1_bucket_from_u2.delete()
        response = HttpResponseParser(u1_bucket_deleted_response)
        log.info(response)
    except Exception as e:
        msg = 'access not given to write, hence fialing'
        log.info(msg)
    else:
        raise TestExecError("acces not given, but still bucket got deleted")
Beispiel #10
0
def upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = utils.get_file_size(
        config.objects_size_range["min"], config.objects_size_range["max"]
    )
    data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info("uploading s3 object: %s" % s3_object_path)
    upload_info = dict({"access_key": user_info["access_key"]}, **data_info)
    object_uploaded_status = s3lib.resource_op(
        {
            "obj": bucket,
            "resource": "upload_file",
            "args": [s3_object_path, s3_object_name],
            "extra_info": upload_info,
        }
    )
    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")
    if object_uploaded_status is None:
        log.info("object uploaded")
Beispiel #11
0
    def write(self, io_type, fname, size=0):
        """
        This function is to write IO on the mount point

        Parameters:
            io_type: basedir | subdir | file
            fname(char): file name
            size(int): size

        Returns:

        """
        # io_type should be: basedir | subdir | file

        log.info("io_type: %s" % io_type)
        log.info("fname: %s" % fname)
        log.info("size: %s" % size)

        s3_conv = NFS_CONVENTIONS.get(io_type)
        ioinfo = IOInfo()

        path = os.path.abspath(self.mnt_point)
        full_path = os.path.join(path, fname)
        log.info("abs_path: %s" % full_path)
        try:

            if io_type == "basedir" or io_type == "subdir":

                log.info("creating dir, type: %s" % io_type)

                os.makedirs(full_path)

                io_info = {
                    "name":
                    os.path.basename(fname),
                    "type":
                    "dir",
                    "s3_convention":
                    s3_conv,
                    "bucket":
                    "self" if s3_conv == "bucket" else fname.split("/")[0],
                    "md5":
                    None,
                }

                log.info("io_info: %s" % io_info)

                ioinfo.add_io_info(self.rgw_user_info["access_key"], io_info)

            if io_type == "file":

                log.info("io_type is file: %s" % io_type)

                log.info("creating file with size: %s" % size)

                finfo = manage_date.io_generator(full_path, size)

                io_info = {
                    "name": os.path.basename(fname),
                    "type": "file",
                    "s3_convention": s3_conv,
                    "bucket": fname.split("/")[0],
                    "md5": finfo["md5"],
                }

                log.info("io_info: %s" % io_info)

                ioinfo.add_io_info(self.rgw_user_info["access_key"], io_info)

        except (Exception) as e:
            log.error("Write IO Execution failed")
            log.error(e)
            return False
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({'obj': rgw_conn,
                                        'resource': 'Bucket',
                                        'args': [bucket_name_to_create]})
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({'obj': bucket,
                                         'resource': 'create',
                                         'args': None,
                                         'extra_info': {'access_key': each_user['access_key']}})
            if created is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops['enable_version'] is True:
                log.info('bucket versionig test on bucket: %s' % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                                       'resource': 'BucketVersioning',
                                                       'args': [bucket.name]})
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                    'resource': 'status',
                                                    'args': None
                                                    })
                if version_status is None:
                    log.info('bucket versioning still not enabled')
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                                           'resource': 'enable',
                                                           'args': None,
                                                           })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info('version enabled')
                    write_bucket_io_info.add_versioning_status(each_user['access_key'],bucket.name,
                                                               VERSIONING_STATUS['ENABLED'])

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, s3_object_size in list(config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info('s3 object name: %s' % s3_object_name)
                        log.info('versioning count: %s' % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        original_data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                            log.info('modifying data: %s' % s3_object_name)
                            modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size,
                                                                          op='append',
                                                                          **{'message': '\nhello for version: %s\n'
                                                                                        % str(vc)})
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info('uploading s3 object: %s' % s3_object_path)
                            upload_info = dict({'access_key': each_user['access_key'],
                                                'versioning_status': VERSIONING_STATUS['ENABLED'],
                                                'version_count_no': vc}, **modified_data_info)
                            s3_obj = s3lib.resource_op({'obj': bucket,
                                                        'resource': 'Object',
                                                        'args': [s3_object_name],
                                                        'extra_info': upload_info, })
                            object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                        'resource': 'upload_file',
                                                                        'args': [modified_data_info['name']],
                                                                        'extra_info': upload_info})
                            if object_uploaded_status is False:
                                raise TestExecError("Resource execution failed: object upload failed")
                            if object_uploaded_status is None:
                                log.info('object uploaded')
                                s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                                log.info('current_version_id: %s' % s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{'version_id': s3_obj.version_id,
                                       'md5_local': upload_info['md5'],
                                       'count_no': vc,
                                       'size': upload_info['size']})
                                log.info('key_version_info: %s' % key_version_info)
                                write_key_io_info.add_versioning_info(each_user['access_key'], bucket.name,
                                                                      s3_object_path, key_version_info)
                                created_versions_count += 1
                                log.info('created_versions_count: %s' % created_versions_count)
                                log.info('adding metadata')
                                metadata1 = {"m_data1": "this is the meta1 for this obj"}
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {"m_data2": "this is the meta2 for this obj"}
                                s3_obj.metadata.update(metadata2)
                                log.info('metadata for this object: %s' % s3_obj.metadata)
                                log.info('metadata count for object: %s' % (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError('metadata not created even adding metadata')
                                versions = bucket.object_versions.filter(Prefix=s3_object_name)
                                created_versions_count_from_s3 = len([v.version_id for v in versions])
                                log.info('created versions count on s3: %s' % created_versions_count_from_s3)
                                if created_versions_count is created_versions_count_from_s3:
                                    log.info('no new versions are created when added metdata')
                                else:
                                    raise TestExecError("version count missmatch, "
                                                        "possible creation of version on adding metadata")
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                          'resource': 'download_file',
                                                                          'args': [s3_object_name,
                                                                                   s3_object_download_path],
                                                                          })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                            log.info('downloaded_md5: %s' % s3_object_downloaded_md5)
                            log.info('uploaded_md5: %s' % modified_data_info['md5'])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info('all versions for the object: %s\n' % s3_object_name)
                        versions = bucket.object_versions.filter(Prefix=s3_object_name)
                        for version in versions:
                            log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                        if config.test_ops.get('set_acl', None) is True:
                            s3_obj_acl = s3lib.resource_op({'obj': rgw_conn,
                                                            'resource': 'ObjectAcl',
                                                            'args': [bucket.name, s3_object_name]})
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL='private')
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info('ACLs set')
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info('getting info for version id: %s' % version.version_id)
                                obj = s3lib.resource_op({'obj': rgw_conn,
                                                         'resource': 'Object',
                                                         'args': [bucket.name, s3_object_name]})
                                log.info('obj get detils :%s\n' % (obj.get(VersionId=version.version_id)))
                        if config.test_ops['copy_to_version'] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice([v.version_id for v in versions])
                            log.info('version_id_to_copy: %s' % version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                            log.info('current version_id: %s' % s3_obj.version_id)
                            copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name,
                                                                         'Key': s3_object_name,
                                                                         'VersionId': version_id_to_copy})
                            log.info('copy_response: %s' % copy_response)
                            if copy_response is None:
                                raise TestExecError("copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info('current_version_id: %s' % s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info('all versions for the object after the copy operation: %s\n' % s3_object_name)
                            for version in versions:
                                log.info(
                                    'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops['delete_object_versions'] is True:
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'], bucket.name,
                                                                              s3_object_path, version.version_id)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                            log.info('available versions for the object')
                            versions = bucket.object_versions.filter(Prefix=s3_object_name)
                            for version in versions:
                                log.info('key_name: %s --> version_id: %s' % (
                                    version.object_key, version.version_id))
                        if config.test_ops.get('delete_from_extra_user') is True:
                            log.info('trying to delete objects from extra user')
                            s3_obj = s3lib.resource_op({'obj': extra_user_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(
                                                                         VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'],
                                                                              bucket.name,
                                                                              s3_object_path,
                                                                              version.version_id)
                                        raise TestExecError("version and deleted, this should not happen")
                                    else:
                                        log.info('version did not delete, expected behaviour')
                                else:
                                    log.info('version did not delete, expected behaviour')
                        if config.local_file_delete is True:
                            log.info('deleting local file')
                            utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
                if config.test_ops['suspend_version'] is True:
                    log.info('suspending versioning')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info('versioning suspended')
                        write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                   VERSIONING_STATUS['SUSPENDED'])
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info('getting all objects in the bucket')
                    objects = s3lib.resource_op({'obj': bucket,
                                                 'resource': 'objects',
                                                 'args': None})
                    log.info('objects :%s' % objects)
                    all_objects = s3lib.resource_op({'obj': objects,
                                                     'resource': 'all',
                                                     'args': None})
                    log.info('all objects: %s' % all_objects)
                    log.info('all objects2 :%s ' % bucket.objects.all())
                    for obj in all_objects:
                        log.info('object_name: %s' % obj.key)
                        versions = bucket.object_versions.filter(Prefix=obj.key)
                        log.info('displaying all versions of the object')
                        for version in versions:
                            log.info(
                                'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                if config.test_ops.get('suspend_from_extra_user') is True:
                    log.info('suspending versioning from extra user')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({'obj': extra_user_conn,
                                                           'resource': 'BucketVersioning',
                                                           'args': [bucket.name]})

                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info('versioning suspended')
                            write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                       VERSIONING_STATUS['SUSPENDED'])
                            raise TestExecError('version suspended, this should not happen')
                    else:
                        log.info('versioning not suspended, expected behaviour')
            if config.test_ops.get('upload_after_suspend') is True:
                log.info('trying to upload after suspending versioning on bucket')
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[oc] + ".after_version_suspending"
                    log.info('s3 object name: %s' % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append",
                                                                     **{
                                                                         'message': '\nhello for non version\n'})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info('uploading s3 object: %s' % s3_object_path)
                    upload_info = dict({'access_key': each_user['access_key'],
                                       'versioning_status': 'suspended'},**non_version_data_info)
                    s3_obj = s3lib.resource_op({'obj': bucket,
                                                'resource': 'Object',
                                                'args': [s3_object_name],
                                                'extra_info': upload_info})
                    object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                'resource': 'upload_file',
                                                                'args': [non_version_data_info['name']],
                                                                'extra_info': upload_info})

                    if object_uploaded_status is False:
                        raise TestExecError("Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info('object uploaded')
                    s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                'resource': 'Object',
                                                'args': [bucket.name, s3_object_name]})
                    log.info('version_id: %s' % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info('Versions are not created after suspending')
                    else:
                        raise TestExecError('Versions are created even after suspending')
                    s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                  'resource': 'download_file',
                                                                  'args': [s3_object_name,
                                                                           s3_object_download_path],
                                                                  })
                    if object_downloaded_status is False:
                        raise TestExecError("Resource execution failed: object download failed")
                    if object_downloaded_status is None:
                        log.info('object downloaded')
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                    log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                    log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5'])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
def test_exec(config):
    test_info = AddTestInfo('create m buckets with n objects')
    try:
        test_info.started_info()
        # get user
        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth_using_client()
            rgw = auth.do_auth()
            bucket_list = []
            buckets = rgw_conn.list_buckets()
            log.info('buckets are %s' % buckets)
            for each_bucket in buckets['Buckets']:
                bucket_list.append(each_bucket['Name'])
            for bucket_name in bucket_list:
                # create 'bucket' resource object
                bucket = rgw.Bucket(bucket_name)
                log.info('In bucket: %s' % bucket_name)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                             config.objects_size_range['max'])
                        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                        if data_info is False:
                            TestExecError("data creation failed")
                        log.info('uploading s3 object: %s' % s3_object_path)
                        upload_info = dict({'access_key': each_user['access_key']}, **data_info)
                        #                                object_uploaded_status = bucket.upload_file(s3_object_path, s3_object_name)
                        object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                    'resource': 'upload_file',
                                                                    'args': [s3_object_path, s3_object_name],
                                                                    'extra_info': upload_info})
                        if object_uploaded_status is False:
                            raise TestExecError("Resource execution failed: object upload failed")
                        if object_uploaded_status is None:
                            log.info('object uploaded')
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' % s3_object_download_path)
                            log.info('downloading to filename: %s' % s3_object_download_name)
                            #                                    object_downloaded_status = bucket.download_file(s3_object_path, s3_object_name)
                            object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                          'resource': 'download_file',
                                                                          'args': [s3_object_name,
                                                                                   s3_object_download_path],
                                                                          })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                    if config.test_ops['delete_bucket_object'] is True:
                        log.info('listing all objects in bucket: %s' % bucket.name)
                        # objects = s3_ops.resource_op(bucket, 'objects', None)
                        objects = s3lib.resource_op({'obj': bucket,
                                                     'resource': 'objects',
                                                     'args': None})
                        log.info('objects :%s' % objects)
                        # all_objects = s3_ops.resource_op(objects, 'all')
                        all_objects = s3lib.resource_op({'obj': objects,
                                                         'resource': 'all',
                                                         'args': None})
                        log.info('all objects: %s' % all_objects)
                        for obj in all_objects:
                            log.info('object_name: %s' % obj.key)
                        log.info('deleting all objects in bucket')
                        # objects_deleted = s3_ops.resource_op(objects, 'delete')
                        objects_deleted = s3lib.resource_op({'obj': objects,
                                                             'resource': 'delete',
                                                             'args': None})
                        log.info('objects_deleted: %s' % objects_deleted)
                        if objects_deleted is False:
                            raise TestExecError('Resource execution failed: Object deletion failed')
                        if objects_deleted is not None:
                            response = HttpResponseParser(objects_deleted[0])
                            if response.status_code == 200:
                                log.info('objects deleted ')
                            else:
                                raise TestExecError("objects deletion failed")
                        else:
                            raise TestExecError("objects deletion failed")
                        # wait for object delete info to sync
                        time.sleep(60)
                        log.info('deleting bucket: %s' % bucket.name)
                        # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')
                        bucket_deleted_status = s3lib.resource_op({'obj': bucket,
                                                                   'resource': 'delete',
                                                                   'args': None})
                        log.info('bucket_deleted_status: %s' % bucket_deleted_status)
                        if bucket_deleted_status is not None:
                            response = HttpResponseParser(bucket_deleted_status)
                            if response.status_code == 204:
                                log.info('bucket deleted ')
                            else:
                                raise TestExecError("bucket deletion failed")
                        else:
                            raise TestExecError("bucket deletion failed")
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("test swift user key gen")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ["tuffy", "scooby", "max"]
        tenant1 = "tenant"
        cmd = (
            'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s'
            % (user_names[0], user_names[0], tenant1, config.cluster_name)
        )
        out = utils.exec_shell_cmd(cmd)
        if out is False:
            raise TestExecError("RGW User creation error")
        log.info("output :%s" % out)
        v1_as_json = json.loads(out)
        log.info("creted user_id: %s" % v1_as_json["user_id"])
        cmd2 = (
            "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s"
            % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name)
        )
        out2 = utils.exec_shell_cmd(cmd2)
        if out2 is False:
            raise TestExecError("sub-user creation error")
        v2_as_json = json.loads(out2)
        log.info("created subuser: %s" % v2_as_json["subusers"][0]["id"])
        cmd3 = (
            "radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret "
            "--cluster %s"
            % (user_names[0], user_names[0], tenant1, tenant1, config.cluster_name)
        )
        out3 = utils.exec_shell_cmd(cmd3)
        if out3 is False:
            raise TestExecError("secret_key gen error")
        v3_as_json = json.loads(out3)
        log.info(
            "created subuser: %s\nsecret_key generated: %s"
            % (
                v3_as_json["swift_keys"][0]["user"],
                v3_as_json["swift_keys"][0]["secret_key"],
            )
        )
        user_info = {
            "user_id": v3_as_json["swift_keys"][0]["user"],
            "key": v3_as_json["swift_keys"][0]["secret_key"],
        }
        auth = Auth(user_info)
        rgw = auth.do_auth()
        for cc in range(config.container_count):
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc
            )
            container = swiftlib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield"
                )
            for oc in range(config.objects_count):
                swift_object_name = utils.gen_s3_object_name(
                    "%s.container.%s" % (user_names[0], cc), oc
                )
                log.info("object name: %s" % swift_object_name)
                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
                log.info("object path: %s" % object_path)
                object_size = utils.get_file_size(
                    config.objects_size_range["min"], config.objects_size_range["max"]
                )
                data_info = manage_data.io_generator(object_path, object_size)
                if data_info is False:
                    TestExecError("data creation failed")
                log.info("uploading object: %s" % object_path)
                with open(object_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def test_exec(config):

    test_info = AddTestInfo('test versioning with objects')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        # create user

        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)

        for each_user in all_users_info:

            # authenticate

            auth = Auth(each_user)
            rgw_conn = auth.do_auth()

            s3_object_names = []

            # create buckets

            log.info('no of buckets to create: %s' % config.bucket_count)

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)

                log.info('creating bucket with name: %s' % bucket_name_to_create)

                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)

                bucket = s3lib.resource_op({'obj': rgw_conn,
                                            'resource': 'Bucket',
                                            'args': [bucket_name_to_create]})

                # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})

                created = s3lib.resource_op({'obj': bucket,
                                             'resource': 'create',
                                             'args': None,
                                             'extra_info': {'access_key': each_user['access_key']}})

                if created is False:
                    raise TestExecError("Resource execution failed: bucket creation faield")

                if created is not None:

                    response = HttpResponseParser(created)

                    if response.status_code == 200:
                       log.info('bucket created')

                    else:
                        raise TestExecError("bucket creation failed")

                else:
                    raise TestExecError("bucket creation failed")

                # getting bucket version object

                if config.test_ops['enable_version'] is True:

                    log.info('bucket versionig test on bucket: %s' % bucket.name)

                    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)

                    bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                                           'resource': 'BucketVersioning',
                                                           'args': [bucket.name]})

                    # checking the versioning status

                    # version_status = s3_ops.resource_op(bucket_versioning, 'status')

                    version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                        'resource': 'status',
                                                        'args': None
                                                        })

                    if version_status is None:

                       log.info('bucket versioning still not enabled')

                    # enabling bucket versioning

                    # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')

                    version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                                               'resource': 'enable',
                                                               'args': None})

                    response = HttpResponseParser(version_enable_status)

                    if response.status_code == 200:
                        log.info('version enabled')

                    else:
                        raise TestExecError("version enable failed")

                    if config.objects_count > 0:



                        log.info('s3 objects to create: %s' % config.objects_count)

                        for oc in range(config.objects_count):

                            # versioning upload

                            s3_object_name = utils.gen_s3_object_name(bucket_name_to_create,str(oc))

                            s3_object_names.append(s3_object_name)

                            log.info('s3 object name: %s' % s3_object_name)

                            log.info('versioning count: %s' % config.version_count)

                            s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                                 config.objects_size_range['max'])

                            s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))

                            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)

                            original_data_info = manage_data.io_generator(s3_object_path, s3_object_size)

                            if original_data_info is False:
                                TestExecError("data creation failed")

                            for vc in range(config.version_count):

                                log.info('version count for %s is %s' % (s3_object_name, str(vc)))

                                log.info('modifying data: %s' % s3_object_name)

                                modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size, data='append',
                                                                     **{'message': '\nhello object for version: %s\n' % str(vc)})

                                if modified_data_info is False:
                                    TestExecError("data modification failed")

                                log.info('uploading s3 object: %s' % s3_object_path)

                                upload_info = dict({'access_key': each_user['access_key']}, **modified_data_info)

                                object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                            'resource': 'upload_file',
                                                                            'args': [modified_data_info['name'], s3_object_name],
                                                                            'extra_info': upload_info})

                                if object_uploaded_status is False:
                                    raise TestExecError("Resource execution failed: object upload failed")

                                if object_uploaded_status is None:
                                    log.info('object uploaded')

                            log.info('all versions for the object: %s\n' % s3_object_name)

                            versions = bucket.object_versions.filter(Prefix=s3_object_name)

                            for version in versions:
                                log.info('key_name: %s --> version_id: %s' %(version.object_key, version.version_id))

                            if config.test_ops['copy_to_version'] is True:

                                # reverting object to one of the versions ( randomly chosen )

                                version_id_to_copy = random.choice([v.version_id for v in versions])

                                log.info('version_id_to_copy: %s' % version_id_to_copy)

                                s3_obj = rgw_conn.Object(bucket.name, s3_object_name)

                                log.info('current version_id: %s' % s3_obj.version_id)

                                copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name,
                                                                             'Key': s3_object_name,
                                                                             'VersionId': version_id_to_copy})

                                log.info('copy_response: %s' % copy_response)

                                if copy_response is None:
                                    raise TestExecError("copy object from version id failed")

                                # current_version_id = copy_response['VersionID']

                                log.info('current_version_id: %s' % s3_obj.version_id )

                                # delete the version_id_to_copy object

                                s3_obj.delete(VersionId=version_id_to_copy)

                                log.info('all versions for the object after the copy operation: %s\n' % s3_object_name)

                                for version in versions:
                                    log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id))

                                # log.info('downloading current s3object: %s' % s3_object_name)

                                # s3_obj.download_file(s3_object_name + ".download")

                            if config.test_ops['delete_object_versions'] is True:

                                log.info('deleting s3_obj keys and its versions')

                                s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                            'resource': 'Object',
                                                            'args': [bucket.name, s3_object_name]})

                                log.info('deleting versions for s3 obj: %s' % s3_object_name)

                                for version in versions:

                                    log.info('trying to delete obj version: %s' % version.version_id)

                                    del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                         'resource': 'delete',
                                                                         'kwargs': dict(VersionId=version.version_id)})

                                    log.info('response:\n%s' % del_obj_version)

                                    if del_obj_version is not None:

                                        response = HttpResponseParser(del_obj_version)

                                        if response.status_code == 204:
                                            log.info('version deleted ')

                                        else:
                                            raise TestExecError("version  deletion failed")

                                    else:
                                        raise TestExecError("version deletion failed")

                    if config.test_ops['suspend_version'] is True:

                        # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                        suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                    'resource': 'suspend',
                                                                    'args': None})

                        response = HttpResponseParser(suspend_version_status)

                        if response.status_code == 200:
                            log.info('versioning suspended')

                        else:
                            raise TestExecError("version suspend failed")

                if config.test_ops['upload_after_suspend'] is True:


                    log.info('trying to upload after suspending versioning on bucket')

                    for s3_object_name in s3_object_names:

                        # non versioning upload

                        log.info('s3 object name: %s' % s3_object_name)

                        s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                             config.objects_size_range['max'])

                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)

                        non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append",
                                                                    **{'message': '\nhello object for non version\n'})

                        if non_version_data_info is False:
                            TestExecError("data creation failed")

                        log.info('uploading s3 object: %s' % s3_object_path)

                        upload_info = dict({'access_key': each_user['access_key']}, **non_version_data_info)

                        object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                    'resource': 'upload_file',
                                                                    'args': [non_version_data_info['name'],
                                                                             s3_object_name],
                                                                    'extra_info': upload_info})

                        if object_uploaded_status is False:
                            raise TestExecError("Resource execution failed: object upload failed")

                        if object_uploaded_status is None:
                            log.info('object uploaded')

                        s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name+".download")

                        object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                      'resource': 'download_file',
                                                                      'args': [s3_object_name,
                                                                               s3_object_download_path],
                                                                      })

                        if object_downloaded_status is False:
                            raise TestExecError("Resource execution failed: object download failed")

                        if object_downloaded_status is None:
                            log.info('object downloaded')

                        # checking md5 of the downloaded file

                        s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)

                        log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                        log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5'])


        test_info.success_status('test passed')

        sys.exit(0)

    except Exception,e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #16
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    # create pool
    pool_name = '.rgw.buckets.special'
    pg_num = '8'
    pgp_num = '8'
    pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
        pool_name, pg_num, pgp_num)
    pool_create_exec = utils.exec_shell_cmd(pool_create)
    if pool_create_exec is False:
        raise TestExecError("Pool creation failed")
    # create realm
    realm_name = 'buz-tickets'
    log.info('creating realm name')
    realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s' % realm_name
    realm_create_exec = utils.exec_shell_cmd(realm_create)
    if realm_create_exec is False:
        raise TestExecError("cmd execution failed")
    # sample output of create realm
    """
    {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
    """
    log.info('modify zonegroup ')
    modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master' % realm_name
    modify_exec = utils.exec_shell_cmd(modify)
    if modify_exec is False:
        raise TestExecError("cmd execution failed")
    # get the zonegroup
    zonegroup_file = 'zonegroup.json'
    get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file
    get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
    if get_zonegroup_exec is False:
        raise TestExecError("cmd execution failed")
    add_to_placement_targets = {"name": "special-placement", "tags": []}
    fp = open(zonegroup_file, 'r')
    zonegroup_txt = fp.read()
    fp.close()
    log.info('got zonegroup info: \n%s' % zonegroup_txt)
    zonegroup = json.loads(zonegroup_txt)
    log.info('adding placement targets')
    zonegroup['placement_targets'].append(add_to_placement_targets)
    with open(zonegroup_file, 'w') as fp:
        json.dump(zonegroup, fp)
    zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file
    zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
    if zonegroup_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone group update completed')
    log.info('getting zone file')
    # get zone
    log.info('getting zone info')
    zone_file = 'zone.json'
    get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
    get_zone_exec = utils.exec_shell_cmd(get_zone)
    if get_zone_exec is False:
        raise TestExecError("cmd execution failed")
    fp = open(zone_file, 'r')
    zone_info = fp.read()
    fp.close()
    log.info('zone_info :\n%s' % zone_info)
    zone_info_cleaned = json.loads(zone_info)
    special_placement_info = {
        "key": "special-placement",
        "val": {
            "index_pool": ".rgw.buckets.index",
            "data_pool": ".rgw.buckets.special",
            "data_extra_pool": ".rgw.buckets.extra"
        }
    }
    log.info('adding  special placement info')
    zone_info_cleaned['placement_pools'].append(special_placement_info)
    with open(zone_file, 'w+') as fp:
        json.dump(zone_info_cleaned, fp)
    zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file
    zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
    if zone_file_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone info updated ')
    zone_group_update_set = 'radosgw-admin period update --commit'
    zone_group_update_set_exec = utils.exec_shell_cmd(zone_group_update_set)
    log.info(zone_group_update_set_exec)
    restarted = rgw_service.restart()
    if restarted is False:
        raise TestExecError("service restart failed")
    if config.rgw_client == 'rgw':
        log.info('client type is rgw')
        rgw_user_info = s3_swift_lib.create_users(1)
        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()
        # create bucket
        bucket_name = utils.gen_bucket_name_from_userid(
            rgw_user_info['user_id'], 0)
        bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
        # create object
        s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                rgw_user_info)
    if config.rgw_client == 'swift':
        log.info('client type is swift')
        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'
        umgmt = UserMgmt()
        umgmt.create_tenant_user(tenant_name=tenant,
                                 user_id=user_names[0],
                                 displayname=user_names[0])
        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0])
        auth = Auth(user_info)
        rgw = auth.do_auth()
        container_name = utils.gen_bucket_name_from_userid(
            user_info['user_id'], rand_no=0)
        container = s3_swift_lib.resource_op({
            'obj': rgw,
            'resource': 'put_container',
            'args': [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

        swift_object_name = utils.gen_s3_object_name(
            '%s.container.%s' % (user_names[0], 0), 0)
        log.info('object name: %s' % swift_object_name)
        object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
        log.info('object path: %s' % object_path)
        object_size = utils.get_file_size(config.objects_size_range['min'],
                                          config.objects_size_range['max'])
        data_info = manage_data.io_generator(object_path, object_size)
        # upload object
        if data_info is False:
            TestExecError("data creation failed")
        log.info('uploading object: %s' % object_path)
        with open(object_path, 'r') as fp:
            rgw.put_object(container_name,
                           swift_object_name,
                           contents=fp.read(),
                           content_type='text/plain')
Beispiel #17
0
def upload_mutipart_object(
    s3_object_name,
    bucket,
    TEST_DATA_PATH,
    config,
    user_info,
    append_data=False,
    append_msg=None,
):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, "split_size") else 5
    log.info("split size: %s" % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op="append",
            **{"message": "\n%s" % append_msg},
        )
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + ".mp.parts")
    log.info("mp part dir: %s" % mp_dir)
    log.info("making multipart object part dir")
    mkdir = utils.exec_shell_cmd("sudo mkdir %s" % mp_dir)
    if mkdir is False:
        raise TestExecError("mkdir failed creating mp_dir_name")
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + "/" + "*"))
    log.info("parts_list: %s" % parts_list)
    log.info("uploading s3 object: %s" % s3_object_path)
    upload_info = dict(
        {
            "access_key": user_info["access_key"],
            "upload_type": "multipart"
        }, **data_info)
    s3_obj = s3lib.resource_op({
        "obj": bucket,
        "resource": "Object",
        "args": [s3_object_name],
    })
    log.info("initiating multipart upload")
    mpu = s3lib.resource_op({
        "obj": s3_obj,
        "resource": "initiate_multipart_upload",
        "args": None,
        "extra_info": upload_info,
    })
    part_number = 1
    parts_info = {"Parts": []}
    log.info("no of parts: %s" % len(parts_list))
    for each_part in parts_list:
        log.info("trying to upload part: %s" % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            "obj":
            part,
            "resource":
            "upload",
            "kwargs":
            dict(Body=open(each_part, mode="rb")),
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info("part uploaded")
                if config.local_file_delete is True:
                    log.info("deleting local file part")
                    utils.exec_shell_cmd("sudo rm -rf %s" % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            "PartNumber": part_number,
            "ETag": part_upload_response["ETag"]
        }
        parts_info["Parts"].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info("curr part_number: %s" % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info("all parts upload completed")
        mpu.complete(MultipartUpload=parts_info)
        log.info("multipart upload complete for key: %s" % s3_object_name)
Beispiel #18
0
def upload_version_object(config, user_info, rgw_conn, s3_object_name,
                          object_size, bucket, TEST_DATA_PATH):
    # versioning upload
    log.info("versioning count: %s" % config.version_count)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    original_data_info = manage_data.io_generator(s3_object_path, object_size)
    if original_data_info is False:
        TestExecError("data creation failed")
    created_versions_count = 0
    for vc in range(config.version_count):
        log.info("version count for %s is %s" % (s3_object_name, str(vc)))
        log.info("modifying data: %s" % s3_object_name)
        modified_data_info = manage_data.io_generator(
            s3_object_path,
            object_size,
            op="append",
            **{"message": "\nhello for version: %s\n" % str(vc)},
        )
        if modified_data_info is False:
            TestExecError("data modification failed")
        log.info("uploading s3 object: %s" % s3_object_path)
        upload_info = dict(
            {
                "access_key": user_info["access_key"],
                "versioning_status": "enabled",
                "version_count_no": vc,
            },
            **modified_data_info,
        )
        s3_obj = s3lib.resource_op({
            "obj": bucket,
            "resource": "Object",
            "args": [s3_object_name],
            "extra_info": upload_info,
        })
        object_uploaded_status = s3lib.resource_op({
            "obj":
            s3_obj,
            "resource":
            "upload_file",
            "args": [modified_data_info["name"]],
            "extra_info":
            upload_info,
        })
        if object_uploaded_status is False:
            raise TestExecError(
                "Resource execution failed: object upload failed")
        if object_uploaded_status is None:
            log.info("object uploaded")
            s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
            log.info("current_version_id: %s" % s3_obj.version_id)
            key_version_info = basic_io_structure.version_info(
                **{
                    "version_id": s3_obj.version_id,
                    "md5_local": upload_info["md5"],
                    "count_no": vc,
                    "size": upload_info["size"],
                })
            log.info("key_version_info: %s" % key_version_info)
            write_key_io_info.add_versioning_info(user_info["access_key"],
                                                  bucket.name, s3_object_path,
                                                  key_version_info)
            created_versions_count += 1
            log.info("created_versions_count: %s" % created_versions_count)
            log.info("adding metadata")
            metadata1 = {"m_data1": "this is the meta1 for this obj"}
            s3_obj.metadata.update(metadata1)
            metadata2 = {"m_data2": "this is the meta2 for this obj"}
            s3_obj.metadata.update(metadata2)
            log.info("metadata for this object: %s" % s3_obj.metadata)
            log.info("metadata count for object: %s" % (len(s3_obj.metadata)))
            if not s3_obj.metadata:
                raise TestExecError(
                    "metadata not created even adding metadata")
            versions = bucket.object_versions.filter(Prefix=s3_object_name)
            created_versions_count_from_s3 = len(
                [v.version_id for v in versions])
            log.info("created versions count on s3: %s" %
                     created_versions_count_from_s3)
            if created_versions_count is created_versions_count_from_s3:
                log.info("no new versions are created when added metadata")
            else:
                raise TestExecError(
                    "version count mismatch, "
                    "possible creation of version on adding metadata")
        s3_object_download_path = os.path.join(TEST_DATA_PATH,
                                               s3_object_name + ".download")
        object_downloaded_status = s3lib.resource_op({
            "obj":
            bucket,
            "resource":
            "download_file",
            "args": [s3_object_name, s3_object_download_path],
        })
        if object_downloaded_status is False:
            raise TestExecError(
                "Resource execution failed: object download failed")
        if object_downloaded_status is None:
            log.info("object downloaded")
        # checking md5 of the downloaded file
        s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
        log.info("downloaded_md5: %s" % s3_object_downloaded_md5)
        log.info("uploaded_md5: %s" % modified_data_info["md5"])
        log.info("deleting downloaded version file")
        utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_download_path)
    log.info("all versions for the object: %s\n" % s3_object_name)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({
                "obj": rgw_conn,
                "resource": "Bucket",
                "args": [bucket_name_to_create]
            })
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({
                "obj": bucket,
                "resource": "create",
                "args": None,
                "extra_info": {
                    "access_key": each_user["access_key"]
                },
            })
            if created is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info("bucket created")
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops["enable_version"] is True:
                log.info("bucket versionig test on bucket: %s" % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({
                    "obj": rgw_conn,
                    "resource": "BucketVersioning",
                    "args": [bucket.name],
                })
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "status",
                    "args": None
                })
                if version_status is None:
                    log.info("bucket versioning still not enabled")
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "enable",
                    "args": None,
                })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info("version enabled")
                    write_bucket_io_info.add_versioning_status(
                        each_user["access_key"],
                        bucket.name,
                        VERSIONING_STATUS["ENABLED"],
                    )

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, s3_object_size in list(
                            config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info("s3 object name: %s" % s3_object_name)
                        log.info("versioning count: %s" % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        original_data_info = manage_data.io_generator(
                            s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info("version count for %s is %s" %
                                     (s3_object_name, str(vc)))
                            log.info("modifying data: %s" % s3_object_name)
                            modified_data_info = manage_data.io_generator(
                                s3_object_path,
                                s3_object_size,
                                op="append",
                                **{
                                    "message":
                                    "\nhello for version: %s\n" % str(vc)
                                })
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info("uploading s3 object: %s" %
                                     s3_object_path)
                            upload_info = dict(
                                {
                                    "access_key":
                                    each_user["access_key"],
                                    "versioning_status":
                                    VERSIONING_STATUS["ENABLED"],
                                    "version_count_no":
                                    vc,
                                }, **modified_data_info)
                            s3_obj = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "Object",
                                "args": [s3_object_name],
                                "extra_info":
                                upload_info,
                            })
                            object_uploaded_status = s3lib.resource_op({
                                "obj":
                                s3_obj,
                                "resource":
                                "upload_file",
                                "args": [modified_data_info["name"]],
                                "extra_info":
                                upload_info,
                            })
                            if object_uploaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object upload failed"
                                )
                            if object_uploaded_status is None:
                                log.info("object uploaded")
                                s3_obj = rgw_conn.Object(
                                    bucket.name, s3_object_name)
                                log.info("current_version_id: %s" %
                                         s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{
                                        "version_id": s3_obj.version_id,
                                        "md5_local": upload_info["md5"],
                                        "count_no": vc,
                                        "size": upload_info["size"],
                                    })
                                log.info("key_version_info: %s" %
                                         key_version_info)
                                write_key_io_info.add_versioning_info(
                                    each_user["access_key"],
                                    bucket.name,
                                    s3_object_path,
                                    key_version_info,
                                )
                                created_versions_count += 1
                                log.info("created_versions_count: %s" %
                                         created_versions_count)
                                log.info("adding metadata")
                                metadata1 = {
                                    "m_data1": "this is the meta1 for this obj"
                                }
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {
                                    "m_data2": "this is the meta2 for this obj"
                                }
                                s3_obj.metadata.update(metadata2)
                                log.info("metadata for this object: %s" %
                                         s3_obj.metadata)
                                log.info("metadata count for object: %s" %
                                         (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError(
                                        "metadata not created even adding metadata"
                                    )
                                versions = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                created_versions_count_from_s3 = len(
                                    [v.version_id for v in versions])
                                log.info("created versions count on s3: %s" %
                                         created_versions_count_from_s3)
                                if (created_versions_count is
                                        created_versions_count_from_s3):
                                    log.info(
                                        "no new versions are created when added metdata"
                                    )
                                else:
                                    raise TestExecError(
                                        "version count missmatch, "
                                        "possible creation of version on adding metadata"
                                    )
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "download_file",
                                "args":
                                [s3_object_name, s3_object_download_path],
                            })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            log.info("downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("uploaded_md5: %s" %
                                     modified_data_info["md5"])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info("all versions for the object: %s\n" %
                                 s3_object_name)
                        versions = bucket.object_versions.filter(
                            Prefix=s3_object_name)
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                        if config.test_ops.get("set_acl", None) is True:
                            s3_obj_acl = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "ObjectAcl",
                                "args": [bucket.name, s3_object_name],
                            })
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL="private")
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info("ACLs set")
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info("getting info for version id: %s" %
                                         version.version_id)
                                obj = s3lib.resource_op({
                                    "obj":
                                    rgw_conn,
                                    "resource":
                                    "Object",
                                    "args": [bucket.name, s3_object_name],
                                })
                                log.info(
                                    "obj get detils :%s\n" %
                                    (obj.get(VersionId=version.version_id)))
                        if config.test_ops["copy_to_version"] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice(
                                [v.version_id for v in versions])
                            log.info("version_id_to_copy: %s" %
                                     version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name,
                                                     s3_object_name)
                            log.info("current version_id: %s" %
                                     s3_obj.version_id)
                            copy_response = s3_obj.copy_from(
                                CopySource={
                                    "Bucket": bucket.name,
                                    "Key": s3_object_name,
                                    "VersionId": version_id_to_copy,
                                })
                            log.info("copy_response: %s" % copy_response)
                            if copy_response is None:
                                raise TestExecError(
                                    "copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info("current_version_id: %s" %
                                     s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info(
                                "all versions for the object after the copy operation: %s\n"
                                % s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops["delete_object_versions"] is True:
                            log.info("deleting s3_obj keys and its versions")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        reusable.delete_version_object(
                                            bucket,
                                            version.version_id,
                                            s3_object_path,
                                            rgw_conn,
                                            each_user,
                                        )
                                    else:
                                        raise TestExecError(
                                            "version  deletion failed")
                                else:
                                    raise TestExecError(
                                        "version deletion failed")
                            log.info("available versions for the object")
                            versions = bucket.object_versions.filter(
                                Prefix=s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                        if config.test_ops.get(
                                "delete_from_extra_user") is True:
                            log.info(
                                "trying to delete objects from extra user")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                extra_user_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        write_key_io_info.delete_version_info(
                                            each_user["access_key"],
                                            bucket.name,
                                            s3_object_path,
                                            version.version_id,
                                        )
                                        raise TestExecError(
                                            "version and deleted, this should not happen"
                                        )
                                    else:
                                        log.info(
                                            "version did not delete, expected behaviour"
                                        )
                                else:
                                    log.info(
                                        "version did not delete, expected behaviour"
                                    )
                        if config.local_file_delete is True:
                            log.info("deleting local file")
                            utils.exec_shell_cmd("sudo rm -rf %s" %
                                                 s3_object_path)
                if config.test_ops["suspend_version"] is True:
                    log.info("suspending versioning")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info("versioning suspended")
                        write_bucket_io_info.add_versioning_status(
                            each_user["access_key"],
                            bucket.name,
                            VERSIONING_STATUS["SUSPENDED"],
                        )
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info("getting all objects in the bucket")
                    objects = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "objects",
                        "args": None
                    })
                    log.info("objects :%s" % objects)
                    all_objects = s3lib.resource_op({
                        "obj": objects,
                        "resource": "all",
                        "args": None
                    })
                    log.info("all objects: %s" % all_objects)
                    log.info("all objects2 :%s " % bucket.objects.all())
                    for obj in all_objects:
                        log.info("object_name: %s" % obj.key)
                        versions = bucket.object_versions.filter(
                            Prefix=obj.key)
                        log.info("displaying all versions of the object")
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                if config.test_ops.get("suspend_from_extra_user") is True:
                    log.info("suspending versioning from extra user")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({
                        "obj":
                        extra_user_conn,
                        "resource":
                        "BucketVersioning",
                        "args": [bucket.name],
                    })

                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info("versioning suspended")
                            write_bucket_io_info.add_versioning_status(
                                each_user["access_key"],
                                bucket.name,
                                VERSIONING_STATUS["SUSPENDED"],
                            )
                            raise TestExecError(
                                "version suspended, this should not happen")
                    else:
                        log.info(
                            "versioning not suspended, expected behaviour")
            if config.test_ops.get("upload_after_suspend") is True:
                log.info(
                    "trying to upload after suspending versioning on bucket")
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[
                        oc] + ".after_version_suspending"
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    non_version_data_info = manage_data.io_generator(
                        s3_object_path,
                        s3_object_size,
                        op="append",
                        **{"message": "\nhello for non version\n"})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info("uploading s3 object: %s" % s3_object_path)
                    upload_info = dict(
                        {
                            "access_key": each_user["access_key"],
                            "versioning_status": "suspended",
                        }, **non_version_data_info)
                    s3_obj = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "Object",
                        "args": [s3_object_name],
                        "extra_info": upload_info,
                    })
                    object_uploaded_status = s3lib.resource_op({
                        "obj":
                        s3_obj,
                        "resource":
                        "upload_file",
                        "args": [non_version_data_info["name"]],
                        "extra_info":
                        upload_info,
                    })

                    if object_uploaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info("object uploaded")
                    s3_obj = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Object",
                        "args": [bucket.name, s3_object_name],
                    })
                    log.info("version_id: %s" % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info("Versions are not created after suspending")
                    else:
                        raise TestExecError(
                            "Versions are created even after suspending")
                    s3_object_download_path = os.path.join(
                        TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({
                        "obj":
                        bucket,
                        "resource":
                        "download_file",
                        "args": [s3_object_name, s3_object_download_path],
                    })
                    if object_downloaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object download failed"
                        )
                    if object_downloaded_status is None:
                        log.info("object downloaded")
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(
                        s3_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" %
                             s3_object_downloaded_md5)
                    log.info("s3_object_uploaded_md5: %s" %
                             non_version_data_info["md5"])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_path)
            if config.test_ops.get("delete_bucket") is True:
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authentication
        auth = Auth(each_user, ssl=config.ssl)
        s3_conn_client = auth.do_auth_using_client()
        # create buckets with object lock configuration
        if config.test_ops["create_bucket"] is True:
            log.info(f"no of buckets to create: {config.bucket_count}")
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info(f"creating bucket with name: {bucket_name_to_create}")
                rgw_ip_and_port = get_rgw_ip_and_port()
                s3_conn_client.create_bucket(
                    Bucket=bucket_name_to_create, ObjectLockEnabledForBucket=True
                )
                # put object lock configuration for bucket
                s3_conn_client.put_object_lock_configuration(
                    Bucket=bucket_name_to_create,
                    ObjectLockConfiguration={
                        "ObjectLockEnabled": "Enabled",
                        "Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}},
                    },
                )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(f"s3 objects to create: {config.objects_count}")
                    for oc, size in list(config.mapped_sizes.items()):
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, 0
                        )
                        log.info(f"s3 object name: {s3_object_name}")
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info(f"s3 object path: {s3_object_path}")
                        log.info("upload type: normal")
                        io_generator(TEST_DATA_PATH + "/" + s3_object_name, size)
                        s3_conn_client.put_object(
                            Body=TEST_DATA_PATH + "/" + s3_object_name,
                            Bucket=bucket_name_to_create,
                            Key=s3_object_name,
                        )
                    log.info("Verify version count")
                    # Verify version count
                    versions = s3_conn_client.list_object_versions(
                        Bucket=bucket_name_to_create
                    )
                    versions_count = len(versions["Versions"])
                    error_message = (
                        f"Expected: {config.objects_count}, Actual: {versions_count}"
                    )
                    if versions_count == config.objects_count:
                        log.info("Expected and actual version count is same")
                    else:
                        raise ObjectVersionCountMismatch(error_message)
                    # Verify delete disabled for object
                    log.info("Verify delete disabled for object")
                    for version_dict in versions["Versions"]:
                        try:
                            s3_conn_client.delete_object(
                                Bucket=bucket_name_to_create,
                                Key=s3_object_name,
                                VersionId=version_dict["VersionId"],
                            )
                            raise AccessDeniedObjectDeleted(
                                "Access denied object deleted"
                            )
                        except boto3exception.ClientError as e:
                            expected_code = "AccessDenied"
                            actual_code = e.response["Error"]["Code"]
                            assert (
                                actual_code == expected_code
                            ), "Expected: {expected_code}, Actual: {actual_code}"

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #21
0
def test_exec(config):
    test_info = AddTestInfo('test swift user key gen')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ['tuffy', 'scooby', 'max']
        tenant1 = 'tenant'
        cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s' \
              % (user_names[0], user_names[0], tenant1, config.cluster_name)
        out = utils.exec_shell_cmd(cmd)
        if out is False:
            raise TestExecError("RGW User creation error")
        log.info('output :%s' % out)
        v1_as_json = json.loads(out)
        log.info('creted user_id: %s' % v1_as_json['user_id'])
        cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \
               % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name)
        out2 = utils.exec_shell_cmd(cmd2)
        if out2 is False:
            raise TestExecError("sub-user creation error")
        v2_as_json = json.loads(out2)
        log.info('created subuser: %s' % v2_as_json['subusers'][0]['id'])
        cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret ' \
               '--cluster %s' % (user_names[0], user_names[0], tenant1, tenant1, config.cluster_name)
        out3 = utils.exec_shell_cmd(cmd3)
        if out3 is False:
            raise TestExecError("secret_key gen error")
        v3_as_json = json.loads(out3)
        log.info('created subuser: %s\nsecret_key generated: %s' %
                 (v3_as_json['swift_keys'][0]['user'],
                  v3_as_json['swift_keys'][0]['secret_key']))
        user_info = {
            'user_id': v3_as_json['swift_keys'][0]['user'],
            'key': v3_as_json['swift_keys'][0]['secret_key']
        }
        auth = Auth(user_info)
        rgw = auth.do_auth()
        for cc in range(config.container_count):
            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=cc)
            container = swiftlib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")
            for oc in range(config.objects_count):
                swift_object_name = utils.gen_s3_object_name(
                    '%s.container.%s' % (user_names[0], cc), oc)
                log.info('object name: %s' % swift_object_name)
                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
                log.info('object path: %s' % object_path)
                object_size = utils.get_file_size(
                    config.objects_size_range['min'],
                    config.objects_size_range['max'])
                data_info = manage_data.io_generator(object_path, object_size)
                if data_info is False:
                    TestExecError("data creation failed")
                log.info('uploading object: %s' % object_path)
                with open(object_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #22
0
def test_exec(config):
    test_info = AddTestInfo('test swift user key gen')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    umgmt = UserMgmt()

    try:
        test_info.started_info()

        # preparing data

        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'

        tenant_user_info = umgmt.create_tenant_user(
            tenant_name=tenant,
            user_id=user_names[0],
            displayname=user_names[0],
            cluster_name=config.cluster_name)

        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0],
                                         cluster_name=config.cluster_name)

        auth = Auth(user_info)

        rgw = auth.do_auth()

        for cc in range(config.container_count):

            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=cc)

            container = swiftlib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })

            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")

            for oc in range(config.objects_count):

                swift_object_name = utils.gen_s3_object_name(
                    '%s.container.%s' % (user_names[0], cc), oc)

                log.info('object name: %s' % swift_object_name)

                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)

                log.info('object path: %s' % object_path)

                object_size = utils.get_file_size(
                    config.objects_size_range['min'],
                    config.objects_size_range['max'])

                data_info = manage_data.io_generator(object_path, object_size)

                # upload object

                if data_info is False:
                    TestExecError("data creation failed")

                log.info('uploading object: %s' % object_path)

                with open(object_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # download object

                swift_object_download_fname = swift_object_name + ".download"

                log.info('download object name: %s' %
                         swift_object_download_fname)

                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)

                log.info('download object path: %s' %
                         swift_object_download_path)

                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)

                with open(swift_object_download_path, 'w') as fp:
                    fp.write(swift_object_downloaded[1])

                # modify and re-upload

                log.info('appending new message to test_data')

                message_to_append = 'adding new msg after download'

                fp = open(swift_object_download_path, 'a+')
                fp.write(message_to_append)
                fp.close()

                with open(swift_object_download_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # delete object

                log.info('deleting swift object')

                rgw.delete_object(container_name, swift_object_name)

            # delete container

            log.info('deleting swift container')

            rgw.delete_container(container_name)

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("test swift user key gen")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    try:
        test_info.started_info()
        # preparing data
        user_names = ["tuffy", "scooby", "max"]
        tenant = "tenant"
        tenant_user_info = umgmt.create_tenant_user(
            tenant_name=tenant,
            user_id=user_names[0],
            displayname=user_names[0],
            cluster_name=config.cluster_name,
        )
        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0],
                                         cluster_name=config.cluster_name)
        auth = Auth(user_info)
        rgw = auth.do_auth()
        for cc in range(config.container_count):
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")
            for oc in range(config.objects_count):
                swift_object_name = utils.gen_s3_object_name(
                    "%s.container.%s" % (user_names[0], cc), oc)
                log.info("object name: %s" % swift_object_name)
                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
                log.info("object path: %s" % object_path)
                object_size = utils.get_file_size(
                    config.objects_size_range["min"],
                    config.objects_size_range["max"])
                data_info = manage_data.io_generator(object_path, object_size)
                # upload object
                if data_info is False:
                    TestExecError("data creation failed")
                log.info("uploading object: %s" % object_path)
                with open(object_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # download object
                swift_object_download_fname = swift_object_name + ".download"
                log.info("download object name: %s" %
                         swift_object_download_fname)
                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)
                log.info("download object path: %s" %
                         swift_object_download_path)
                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)
                with open(swift_object_download_path, "w") as fp:
                    fp.write(swift_object_downloaded[1])
                # modify and re-upload
                log.info("appending new message to test_data")
                message_to_append = "adding new msg after download"
                fp = open(swift_object_download_path, "a+")
                fp.write(message_to_append)
                fp.close()
                with open(swift_object_download_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # delete object
                log.info("deleting swift object")
                rgw.delete_object(container_name, swift_object_name)
            # delete container
            log.info("deleting swift container")
            rgw.delete_container(container_name)
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Beispiel #24
0
    def write(self, io_type, fname, size=0):
        """
            This function is to write IO on the mount point

            Parameters:
                io_type: basedir | subdir | file
                fname(char): file name
                size(int): size

            Returns:

        """
        # io_type should be: basedir | subdir | file

        log.info('io_type: %s' % io_type)
        log.info('fname: %s' % fname)
        log.info('size: %s' % size)

        s3_conv = NFS_CONVENTIONS.get(io_type)
        ioinfo = IOInfo()

        path = os.path.abspath(self.mnt_point)
        full_path = os.path.join(path, fname)
        log.info('abs_path: %s' % full_path)
        try:

            if io_type == 'basedir' or io_type == 'subdir':

                log.info('creating dir, type: %s' % io_type)

                os.makedirs(full_path)

                io_info = {
                    'name':
                    os.path.basename(fname),
                    'type':
                    'dir',
                    's3_convention':
                    s3_conv,
                    'bucket':
                    'self' if s3_conv == 'bucket' else fname.split('/')[0],
                    'md5':
                    None
                }

                log.info('io_info: %s' % io_info)

                ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)

            if io_type == 'file':

                log.info('io_type is file: %s' % io_type)

                log.info('creating file with size: %s' % size)

                finfo = manage_date.io_generator(full_path, size)

                io_info = {
                    'name': os.path.basename(fname),
                    'type': 'file',
                    's3_convention': s3_conv,
                    'bucket': fname.split('/')[0],
                    'md5': finfo['md5']
                }

                log.info('io_info: %s' % io_info)

                ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)

        except (Exception) as e:
            log.error('Write IO Execution failed')
            log.error(e)
            return False
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant1 = "tenant" + "_" + str(random.randrange(1, 100))
    cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s' % (
        user_names[0],
        user_names[0],
        tenant1,
    )
    out = utils.exec_shell_cmd(cmd)
    if out is False:
        raise TestExecError("RGW User creation error")
    log.info("output :%s" % out)
    v1_as_json = json.loads(out)
    log.info("creted user_id: %s" % v1_as_json["user_id"])
    cmd2 = (
        "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full"
        % (tenant1, user_names[0], user_names[0], tenant1))
    out2 = utils.exec_shell_cmd(cmd2)
    if out2 is False:
        raise TestExecError("sub-user creation error")
    v2_as_json = json.loads(out2)
    log.info("created subuser: %s" % v2_as_json["subusers"][0]["id"])
    cmd3 = (
        "radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret"
        % (user_names[0], user_names[0], tenant1, tenant1))
    out3 = utils.exec_shell_cmd(cmd3)
    if out3 is False:
        raise TestExecError("secret_key gen error")
    v3_as_json = json.loads(out3)
    log.info("created subuser: %s\nsecret_key generated: %s" % (
        v3_as_json["swift_keys"][0]["user"],
        v3_as_json["swift_keys"][0]["secret_key"],
    ))
    user_info = {
        "user_id": v3_as_json["swift_keys"][0]["user"],
        "key": v3_as_json["swift_keys"][0]["secret_key"],
    }
    auth = Auth(user_info, is_secure=config.ssl)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info["user_id"], rand_no=cc)
        container = swiftlib.resource_op({
            "obj": rgw,
            "resource": "put_container",
            "args": [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")
        for oc, size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name(
                "%s.container.%s" % (user_names[0], cc), oc)
            log.info("object name: %s" % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info("object path: %s" % object_path)
            data_info = manage_data.io_generator(object_path, size)
            if data_info is False:
                TestExecError("data creation failed")
            log.info("uploading object: %s" % object_path)
            with open(object_path, "r") as fp:
                rgw.put_object(
                    container_name,
                    swift_object_name,
                    contents=fp.read(),
                    content_type="text/plain",
                )
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()

    # preparing data
    user_names = ['tuffy', 'scooby', 'max']
    tenant = 'tenant'
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info['user_id'], rand_no=cc)
        container = swiftlib.resource_op({
            'obj': rgw,
            'resource': 'put_container',
            'args': [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")
        for oc, size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name(
                '%s.container.%s' % (user_names[0], cc), oc)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            data_info = manage_data.io_generator(object_path, size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name,
                               swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
            # download object
            swift_object_download_fname = swift_object_name + ".download"
            log.info('download object name: %s' % swift_object_download_fname)
            swift_object_download_path = os.path.join(
                TEST_DATA_PATH, swift_object_download_fname)
            log.info('download object path: %s' % swift_object_download_path)
            swift_object_downloaded = rgw.get_object(container_name,
                                                     swift_object_name)
            with open(swift_object_download_path, 'w') as fp:
                fp.write(str(swift_object_downloaded[1]))
            # modify and re-upload
            log.info('appending new message to test_data')
            message_to_append = 'adding new msg after download'
            fp = open(swift_object_download_path, 'a+')
            fp.write(message_to_append)
            fp.close()
            with open(swift_object_download_path, 'r') as fp:
                rgw.put_object(container_name,
                               swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
            # delete object
            log.info('deleting swift object')
            rgw.delete_object(container_name, swift_object_name)
        # delete container
        log.info('deleting swift container')
        rgw.delete_container(container_name)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    test_info = AddTestInfo("storage_policy for %s" % config.rgw_client)
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    try:
        # create pool
        pool_name = ".rgw.buckets.special"
        pg_num = "8"
        pgp_num = "8"
        pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
            pool_name,
            pg_num,
            pgp_num,
        )
        pool_create_exec = utils.exec_shell_cmd(pool_create)
        if pool_create_exec is False:
            raise TestExecError("Pool creation failed")
        # create realm
        realm_name = "buz-tickets"
        log.info("creating realm name")
        realm_create = (
            "sudo radosgw-admin realm create --rgw-realm=%s --default" % realm_name
        )
        realm_create_exec = utils.exec_shell_cmd(realm_create)
        if realm_create_exec is False:
            raise TestExecError("cmd execution failed")
        # sample output of create realm
        """
        {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
        
        """
        log.info("modify zonegroup ")
        modify = (
            "sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default"
            % realm_name
        )
        modify_exec = utils.exec_shell_cmd(modify)
        if modify_exec is False:
            raise TestExecError("cmd execution failed")
        # get the zonegroup
        zonegroup_file = "zonegroup.json"
        get_zonegroup = (
            "sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s"
            % zonegroup_file
        )
        get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
        if get_zonegroup_exec is False:
            raise TestExecError("cmd execution failed")
        add_to_placement_targets = {"name": "special-placement", "tags": []}
        fp = open(zonegroup_file, "r")
        zonegroup_txt = fp.read()
        fp.close()
        log.info("got zonegroup info: \n%s" % zonegroup_txt)
        zonegroup = json.loads(zonegroup_txt)
        log.info("adding placement targets")
        zonegroup["placement_targets"].append(add_to_placement_targets)
        with open(zonegroup_file, "w") as fp:
            json.dump(zonegroup, fp)
        zonegroup_set = "sudo radosgw-admin zonegroup set < %s" % zonegroup_file
        zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
        if zonegroup_set_exec is False:
            raise TestExecError("cmd execution failed")
        log.info("zone group update completed")
        log.info("getting zone file")
        # get zone
        log.info("getting zone info")
        zone_file = "zone.json"
        get_zone = "sudo radosgw-admin zone --rgw-zone=default  get > zone.json"
        get_zone_exec = utils.exec_shell_cmd(get_zone)
        if get_zone_exec is False:
            raise TestExecError("cmd execution failed")
        fp = open(zone_file, "r")
        zone_info = fp.read()
        fp.close()
        log.info("zone_info :\n%s" % zone_info)
        zone_info_cleaned = json.loads(zone_info)
        special_placement_info = {
            "key": "special-placement",
            "val": {
                "index_pool": ".rgw.buckets.index",
                "data_pool": ".rgw.buckets.special",
                "data_extra_pool": ".rgw.buckets.extra",
            },
        }
        log.info("adding  special placement info")
        zone_info_cleaned["placement_pools"].append(special_placement_info)
        print(zone_info_cleaned)
        with open(zone_file, "w+") as fp:
            json.dump(zone_info_cleaned, fp)
        zone_file_set = "sudo radosgw-admin zone set < %s" % zone_file
        zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
        if zone_file_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info("zone info updated ")
        restarted = rgw_service.restart()
        if restarted is False:
            raise TestExecError("service restart failed")
        if config.rgw_client == "rgw":
            log.info("client type is rgw")
            rgw_user_info = s3_swift_lib.create_users(1)
            auth = Auth(rgw_user_info)
            rgw_conn = auth.do_auth()
            # create bucket
            bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info["user_id"], 0)
            bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
            # create object
            s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
            resuables.upload_object(
                s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info
            )

        if config.rgw_client == "swift":
            log.info("client type is swift")

            user_names = ["tuffy", "scooby", "max"]
            tenant = "tenant"

            umgmt = UserMgmt()
            umgmt.create_tenant_user(
                tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]
            )

            user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])

            auth = Auth(user_info)
            rgw = auth.do_auth()
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=0
            )
            container = s3_swift_lib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield"
                )
            swift_object_name = utils.gen_s3_object_name(
                "%s.container.%s" % (user_names[0], 0), 0
            )
            log.info("object name: %s" % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info("object path: %s" % object_path)
            object_size = utils.get_file_size(
                config.objects_size_range["min"], config.objects_size_range["max"]
            )
            data_info = manage_data.io_generator(object_path, object_size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info("uploading object: %s" % object_path)
            with open(object_path, "r") as fp:
                rgw.put_object(
                    container_name,
                    swift_object_name,
                    contents=fp.read(),
                    content_type="text/plain",
                )
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def upload_mutipart_object(s3_object_name,
                           bucket,
                           TEST_DATA_PATH,
                           config,
                           user_info,
                           append_data=False,
                           append_msg=None):
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, 'split_size') else 5
    log.info('split size: %s' % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op='append',
            **{'message': '\n%s' % append_msg})
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + '.mp.parts')
    log.info('mp part dir: %s' % mp_dir)
    log.info('making multipart object part dir')
    mkdir = utils.exec_shell_cmd('sudo mkdir %s' % mp_dir)
    if mkdir is False:
        raise TestExecError('mkdir failed creating mp_dir_name')
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + '/' + '*'))
    log.info('parts_list: %s' % parts_list)
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict(
        {
            'access_key': user_info['access_key'],
            'upload_type': 'multipart'
        }, **data_info)
    s3_obj = s3lib.resource_op({
        'obj': bucket,
        'resource': 'Object',
        'args': [s3_object_name],
    })
    log.info('initiating multipart upload')
    mpu = s3lib.resource_op({
        'obj': s3_obj,
        'resource': 'initiate_multipart_upload',
        'args': None,
        'extra_info': upload_info
    })
    part_number = 1
    parts_info = {'Parts': []}
    log.info('no of parts: %s' % len(parts_list))
    for each_part in parts_list:
        log.info('trying to upload part: %s' % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            'obj':
            part,
            'resource':
            'upload',
            'kwargs':
            dict(Body=open(each_part, mode="rb"))
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info('part uploaded')
                if config.local_file_delete is True:
                    log.info('deleting local file part')
                    utils.exec_shell_cmd('sudo rm -rf %s' % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            'PartNumber': part_number,
            'ETag': part_upload_response['ETag']
        }
        parts_info['Parts'].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info('curr part_number: %s' % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info('all parts upload completed')
        mpu.complete(MultipartUpload=parts_info)
        log.info('multipart upload complete for key: %s' % s3_object_name)
Beispiel #29
0
def upload_version_object(config, user_info, rgw_conn, s3_object_name,
                          object_size, bucket, TEST_DATA_PATH):
    # versioning upload
    log.info('versioning count: %s' % config.version_count)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    original_data_info = manage_data.io_generator(s3_object_path, object_size)
    if original_data_info is False:
        TestExecError("data creation failed")
    created_versions_count = 0
    for vc in range(config.version_count):
        log.info('version count for %s is %s' % (s3_object_name, str(vc)))
        log.info('modifying data: %s' % s3_object_name)
        modified_data_info = manage_data.io_generator(
            s3_object_path,
            object_size,
            op='append',
            **{'message': '\nhello for version: %s\n' % str(vc)})
        if modified_data_info is False:
            TestExecError("data modification failed")
        log.info('uploading s3 object: %s' % s3_object_path)
        upload_info = dict(
            {
                'access_key': user_info['access_key'],
                'versioning_status': 'enabled',
                'version_count_no': vc
            }, **modified_data_info)
        s3_obj = s3lib.resource_op({
            'obj': bucket,
            'resource': 'Object',
            'args': [s3_object_name],
            'extra_info': upload_info,
        })
        object_uploaded_status = s3lib.resource_op({
            'obj':
            s3_obj,
            'resource':
            'upload_file',
            'args': [modified_data_info['name']],
            'extra_info':
            upload_info
        })
        if object_uploaded_status is False:
            raise TestExecError(
                "Resource execution failed: object upload failed")
        if object_uploaded_status is None:
            log.info('object uploaded')
            s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
            log.info('current_version_id: %s' % s3_obj.version_id)
            key_version_info = basic_io_structure.version_info(
                **{
                    'version_id': s3_obj.version_id,
                    'md5_local': upload_info['md5'],
                    'count_no': vc,
                    'size': upload_info['size']
                })
            log.info('key_version_info: %s' % key_version_info)
            write_key_io_info.add_versioning_info(user_info['access_key'],
                                                  bucket.name, s3_object_path,
                                                  key_version_info)
            created_versions_count += 1
            log.info('created_versions_count: %s' % created_versions_count)
            log.info('adding metadata')
            metadata1 = {"m_data1": "this is the meta1 for this obj"}
            s3_obj.metadata.update(metadata1)
            metadata2 = {"m_data2": "this is the meta2 for this obj"}
            s3_obj.metadata.update(metadata2)
            log.info('metadata for this object: %s' % s3_obj.metadata)
            log.info('metadata count for object: %s' % (len(s3_obj.metadata)))
            if not s3_obj.metadata:
                raise TestExecError(
                    'metadata not created even adding metadata')
            versions = bucket.object_versions.filter(Prefix=s3_object_name)
            created_versions_count_from_s3 = len(
                [v.version_id for v in versions])
            log.info('created versions count on s3: %s' %
                     created_versions_count_from_s3)
            if created_versions_count is created_versions_count_from_s3:
                log.info('no new versions are created when added metdata')
            else:
                raise TestExecError(
                    "version count missmatch, "
                    "possible creation of version on adding metadata")
        s3_object_download_path = os.path.join(TEST_DATA_PATH,
                                               s3_object_name + ".download")
        object_downloaded_status = s3lib.resource_op({
            'obj':
            bucket,
            'resource':
            'download_file',
            'args': [s3_object_name, s3_object_download_path],
        })
        if object_downloaded_status is False:
            raise TestExecError(
                "Resource execution failed: object download failed")
        if object_downloaded_status is None:
            log.info('object downloaded')
        # checking md5 of the downloaded file
        s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
        log.info('downloaded_md5: %s' % s3_object_downloaded_md5)
        log.info('uploaded_md5: %s' % modified_data_info['md5'])
        log.info('deleting downloaded version file')
        utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_download_path)
    log.info('all versions for the object: %s\n' % s3_object_name)