Ejemplo n.º 1
0
class ReadIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")

    def verify_io(self):
        """
        This function to verify the data of buckets owned by a user

        Data verification happens to all the buckets of a particular user for both versioned and normal buckets
        Parameters:

        Returns:

        """
        log.info("***************Starting Verification*****************")
        data = self.file_op.get_data()
        users = data["users"]
        is_secure = True if utils.is_rgw_secure() else False
        host = socket.gethostbyname(socket.gethostname())

        endpoint_proto = "https" if is_secure else "http"
        endpoint_port = utils.get_radosgw_port_no()
        endpoint_url = f"{endpoint_proto}://{host}:{endpoint_port}"

        for each_user in users:
            log.info("verifying data for the user: \n")
            log.info("user_id: %s" % each_user["user_id"])
            log.info("access_key: %s" % each_user["access_key"])
            log.info("secret_key: %s" % each_user["secret_key"])
            conn = boto3.resource(
                "s3",
                aws_access_key_id=each_user["access_key"],
                aws_secret_access_key=each_user["secret_key"],
                endpoint_url=endpoint_url,
                use_ssl=is_secure,
                verify=False,
            )

            for each_bucket in each_user["bucket"]:
                log.info("verifying data for bucket: %s" % each_bucket["name"])
                bucket_from_s3 = conn.Bucket(each_bucket["name"])
                curr_versioning_status = each_bucket["curr_versioning_status"]
                log.info("curr_versioning_status: %s" % curr_versioning_status)
                if not each_bucket["keys"]:
                    log.info("keys are not created")
                else:
                    no_of_keys = len(each_bucket["keys"])
                    log.info("no_of_keys: %s" % no_of_keys)
                    for each_key in each_bucket["keys"]:
                        versioned_keys = len(each_key["versioning_info"])
                        log.info("versioned_keys: %s" % versioned_keys)
                        if not each_key["versioning_info"]:
                            log.info("not versioned key")
                            verify_key(each_key, bucket_from_s3)
                        else:
                            log.info("versioned key")
                            verify_key_with_version(each_key, bucket_from_s3)
        log.info("verification of data completed")
Ejemplo n.º 2
0
class ReadIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type='yaml')

    def verify_io(self):
        """
            This function to verify the data of buckets owned by a user

            Data verification happens to all the buckets of a particular user for both versioned and normal buckets
            Parameters:

            Returns:

        """
        log.info('***************Starting Verification*****************')
        data = self.file_op.get_data()
        users = data['users']
        for each_user in users:
            log.info('verifying data for the user: \n')
            log.info('user_id: %s' % each_user['user_id'])
            log.info('access_key: %s' % each_user['access_key'])
            log.info('secret_key: %s' % each_user['secret_key'])
            conn = boto3.resource(
                's3',
                aws_access_key_id=each_user['access_key'],
                aws_secret_access_key=each_user['secret_key'],
                endpoint_url='http://%s:%s' % (socket.gethostbyname(
                    socket.gethostname()), int(utils.get_radosgw_port_no())),
                use_ssl=False)
            for each_bucket in each_user['bucket']:
                log.info('verifying data for bucket: %s' % each_bucket['name'])
                bucket_from_s3 = conn.Bucket(each_bucket['name'])
                curr_versioning_status = each_bucket['curr_versioning_status']
                log.info('curr_versioning_status: %s' % curr_versioning_status)
                if not each_bucket['keys']:
                    log.info('keys are not created')
                else:
                    no_of_keys = len(each_bucket['keys'])
                    log.info('no_of_keys: %s' % no_of_keys)
                    for each_key in each_bucket['keys']:
                        versioned_keys = len(each_key['versioning_info'])
                        log.info('versioned_keys: %s' % versioned_keys)
                        if not each_key['versioning_info']:
                            log.info('not versioned key')
                            verify_key(each_key, bucket_from_s3)
                        else:
                            log.info('versioned key')
                            verify_key_with_version(each_key, bucket_from_s3)
        log.info('verification of data completed')
Ejemplo n.º 3
0
class ReadIOInfoOnS3(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type='yaml')
        self.rgw_conn = None
        self.rgw_conn2 = None
        self.buckets = []
        self.objects = []
        self.io = None

    def initialize_verify_io(self):
        log.info('***************Starting Verification*****************')
        data = self.file_op.get_data()
        rgw_user_info = data['users'][0]
        log.info('verifying data for the user: \n')
        auth = Auth(rgw_user_info)
        self.rgw_conn = auth.do_auth()
        self.rgw_conn2 = auth.do_auth_using_client()
        self.io = rgw_user_info['io']

        for each_io in self.io:
            if each_io['s3_convention'] == 'bucket':
                self.buckets.append(each_io['name'])
            if each_io['s3_convention'] == 'object':
                temp = {
                    'name': each_io['name'],
                    'md5': each_io['md5'],
                    'bucket': each_io['bucket'],
                    'type': each_io['type']
                }
                self.objects.append(temp)

        log.info('buckets:\n%s' % self.buckets)
        for object in self.objects:
            log.info('object: %s' % object)
        log.info('verification of buckets starting')

    def verify_if_bucket_created(self):
        # getting list of buckets of rgw user
        buckets_from_s3 = self.rgw_conn2.list_buckets()
        print(buckets_from_s3)
        buckets_info = buckets_from_s3['Buckets']
        bucket_names_from_s3 = [x['Name'] for x in buckets_info]
        log.info('bucket names from s3: %s' % bucket_names_from_s3)
        log.info('bucket names from yaml: %s' % self.buckets)
        comp_val = set(self.buckets) == set(bucket_names_from_s3)
        return comp_val

    def verify_if_objects_created(self):
        log.info('verification of s3 objects')
        for each_key in self.objects:
            log.info('verifying data for key: %s' %
                     os.path.basename(each_key['name']))
            log.info('bucket: %s' % each_key['bucket'])
            key_from_s3 = self.rgw_conn.Object(
                each_key['bucket'], os.path.basename(each_key['name']))
            log.info('got key name from s3: %s' % key_from_s3.key)

            if each_key['type'] == 'file':
                log.info('verifying md5')
                log.info('md5_local: %s' % each_key['md5'])
                key_from_s3.download_file('download.temp')
                downloaded_md5 = utils.get_md5('download.temp')
                log.info('md5_from_s3: %s' % downloaded_md5)
                if each_key['md5'] != downloaded_md5:
                    raise TestExecError("md5 not matched")
                utils.exec_shell_cmd('sudo rm -rf download.temp')
Ejemplo n.º 4
0
class ReadIOInfoOnS3(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")
        self.rgw_conn = None
        self.rgw_conn2 = None
        self.buckets = []
        self.objects = []
        self.io = None

    def initialize_verify_io(self):
        log.info("***************Starting Verification*****************")
        data = self.file_op.get_data()
        rgw_user_info = data["users"][0]
        log.info("verifying data for the user: \n")
        auth = Auth(rgw_user_info)
        self.rgw_conn = auth.do_auth()
        self.rgw_conn2 = auth.do_auth_using_client()
        self.io = rgw_user_info["io"]

        for each_io in self.io:
            if each_io["s3_convention"] == "bucket":
                self.buckets.append(each_io["name"])
            if each_io["s3_convention"] == "object":
                temp = {
                    "name": each_io["name"],
                    "md5": each_io["md5"],
                    "bucket": each_io["bucket"],
                    "type": each_io["type"],
                }
                self.objects.append(temp)

        log.info("buckets:\n%s" % self.buckets)
        for object in self.objects:
            log.info("object: %s" % object)
        log.info("verification of buckets starting")

    def verify_if_bucket_created(self):
        # getting list of buckets of rgw user
        buckets_from_s3 = self.rgw_conn2.list_buckets()
        print(buckets_from_s3)
        buckets_info = buckets_from_s3["Buckets"]
        bucket_names_from_s3 = [x["Name"] for x in buckets_info]
        log.info("bucket names from s3: %s" % bucket_names_from_s3)
        log.info("bucket names from yaml: %s" % self.buckets)
        comp_val = set(self.buckets) == set(bucket_names_from_s3)
        return comp_val

    def verify_if_objects_created(self):
        log.info("verification of s3 objects")
        for each_key in self.objects:
            log.info("verifying data for key: %s" % os.path.basename(each_key["name"]))
            log.info("bucket: %s" % each_key["bucket"])
            key_from_s3 = self.rgw_conn.Object(
                each_key["bucket"], os.path.basename(each_key["name"])
            )
            log.info("got key name from s3: %s" % key_from_s3.key)

            if each_key["type"] == "file":
                log.info("verifying md5")
                log.info("md5_local: %s" % each_key["md5"])
                key_from_s3.download_file("download.temp")
                downloaded_md5 = utils.get_md5("download.temp")
                log.info("md5_from_s3: %s" % downloaded_md5)
                if each_key["md5"] != downloaded_md5:
                    raise TestExecError("md5 not matched")
                utils.exec_shell_cmd("sudo rm -rf download.temp")
Ejemplo n.º 5
0
class ReadIOInfoOnNFS(object):
    def __init__(
        self,
        mount_point,
        yaml_fname=IO_INFO_FNAME,
    ):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type='yaml')
        self.mount_point = mount_point
        self.base_dirs = []
        self.files = []

    def initialize_verify_io(self):
        log.info('***************Starting Verification*****************')
        data = self.file_op.get_data()
        user_info = data['users'][0]

        for each_bucket in user_info['bucket']:
            path = os.path.join(self.mount_point,
                                os.path.basename(each_bucket['name']))
            base_dir_full_path = os.path.abspath(path)
            self.base_dirs.append(base_dir_full_path)
            if not each_bucket['keys']:
                log.info('keys are not created')
            else:
                for each_key in each_bucket['keys']:
                    path = os.path.join(self.mount_point,
                                        os.path.basename(each_bucket['name']),
                                        os.path.basename(each_key['name']))
                    files_full_path = os.path.abspath(path)
                    temp = {
                        'file': files_full_path,
                        'md5': each_key['md5_local'],
                        'bucket': each_bucket['name']
                    }
                    self.files.append(temp)
        log.info('basedirs:\n%s' % self.base_dirs)
        log.info('files:\n%s' % self.files)

    def verify_if_basedir_created(self):
        # verify basedir and files created
        log.info('verifying basedir')
        for basedir in self.base_dirs:
            log.info('verifying existence for: %s' % basedir)
            created = os.path.exists(basedir)
            if not created:
                raise TestExecError("basedir not exists")
            log.info('basedir created')
        log.info('basedir verification complete, basedirs exists')

    def verify_if_files_created(self):
        if not self.files:
            log.info('no files are created')
        else:
            log.info('verifying files')
            for each_file in self.files:
                log.info('verifying existence for: %s' % each_file['file'])
                created = os.path.exists(each_file['file'])
                if not created:
                    raise TestExecError("files not created")
                log.info('file created')
                md5 = utils.get_md5(each_file['file'])
                log.info('md5 on nfs mount point: %s' % md5)
                log.info('md5 on rgw_client: %s' % each_file['md5'])
                if md5 != each_file['md5']:
                    raise TestExecError("md5 not matched")
                log.info('md5 matched')
            log.info(
                'verification of files complete, files exists and data intact')
class ReadIOInfoOnNFS(object):
    def __init__(
        self,
        mount_point,
        yaml_fname=IO_INFO_FNAME,
    ):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")
        self.mount_point = mount_point
        self.base_dirs = []
        self.files = []

    def initialize_verify_io(self):
        log.info("***************Starting Verification*****************")
        data = self.file_op.get_data()
        user_info = data["users"][0]

        for each_bucket in user_info["bucket"]:
            path = os.path.join(self.mount_point, os.path.basename(each_bucket["name"]))
            base_dir_full_path = os.path.abspath(path)
            self.base_dirs.append(base_dir_full_path)
            if not each_bucket["keys"]:
                log.info("keys are not created")
            else:
                for each_key in each_bucket["keys"]:
                    path = os.path.join(
                        self.mount_point,
                        os.path.basename(each_bucket["name"]),
                        os.path.basename(each_key["name"]),
                    )
                    files_full_path = os.path.abspath(path)
                    temp = {
                        "file": files_full_path,
                        "md5": each_key["md5_local"],
                        "bucket": each_bucket["name"],
                    }
                    self.files.append(temp)
        log.info("basedirs:\n%s" % self.base_dirs)
        log.info("files:\n%s" % self.files)

    def verify_if_basedir_created(self):
        # verify basedir and files created
        log.info("verifying basedir")
        for basedir in self.base_dirs:
            log.info("verifying existence for: %s" % basedir)
            created = os.path.exists(basedir)
            if not created:
                raise TestExecError("basedir not exists")
            log.info("basedir created")
        log.info("basedir verification complete, basedirs exists")

    def verify_if_files_created(self):
        if not self.files:
            log.info("no files are created")
        else:
            log.info("verifying files")
            for each_file in self.files:
                log.info("verifying existence for: %s" % each_file["file"])
                created = os.path.exists(each_file["file"])
                if not created:
                    raise TestExecError("files not created")
                log.info("file created")
                md5 = utils.get_md5(each_file["file"])
                log.info("md5 on nfs mount point: %s" % md5)
                log.info("md5 on rgw_client: %s" % each_file["md5"])
                if md5 != each_file["md5"]:
                    raise TestExecError("md5 not matched")
                log.info("md5 matched")
            log.info("verification of files complete, files exists and data intact")