Exemple #1
0
    def operation_on_nfs(self, mount_point, op_code):

        time.sleep(300)  # sleep for 300 secs before operations start

        opstatus = []
        status = {}

        log.info('operation started-------------- : %s' % op_code)

        fp = FileOps(self.json_fname, type='json')

        json_data = fp.get_data()

        buckets_info = json_data['buckets']

        for bucket_name, key in buckets_info.items():

            log.info('got bucket_name: %s' % bucket_name)

            local_bucket = os.path.abspath(
                os.path.join(mount_point, bucket_name))

            print 'local bucket: --------------- %s' % local_bucket

            local_keys = utils.get_all_in_dir(local_bucket)

            log.info('local key: %s' % local_bucket)
            log.info('local keys: %s' % local_keys)

            for key_info in key['keys']:

                local_key = os.path.join(local_bucket, key_info['key_name'])

                if key_info['is_type'] == 'file':

                    log.info('operation on  key: %s' % key_info['key_name'])
                    log.info('local key: ------------------ %s' % local_key)

                    if op_code == 'move':

                        status['bucket_name'] = bucket_name

                        status['key_name'] = key_info['key_name']
                        status['op_code'] = op_code

                        new_key_path = local_key + ".moved"
                        new_name = key_info['key_name'] + ".moved"

                        cmd = 'sudo mv %s %s' % (os.path.abspath(local_key),
                                                 os.path.abspath(new_key_path))

                        log.info('cmd_to_move: %s' % cmd)

                        time.sleep(5)

                        ret_val = os.system(cmd)

                        if ret_val == 0:

                            key_info['opcode']['move']['old_name'] = key_info[
                                'key_name']
                            key_info['key_name'] = new_name
                            fp.add_data(json_data)
                            status['op_code_status'] = True
                        else:
                            log.info('move failed: %s' % local_key)
                            status['op_code_status'] = False

                    if op_code == 'edit':
                        try:

                            log.info('editing file: %s' % local_key)

                            key_modify = open(local_key, 'a+')
                            key_modify.write(
                                'file opened from NFS and added this messages')
                            key_modify.close()

                            key_info['opcode']['edit'][
                                'new_md5'] = utils.get_md5(
                                    os.path.abspath(local_key))
                            key_info['md5_local'] = utils.get_md5(
                                os.path.abspath(local_key))
                            key_info['md5_on_s3'] = None

                            status['op_code_status'] = True

                        except Exception, e:
                            log.info('could not edit')
                            log.error(e)
                            status['op_code_status'] = False

                    if op_code == 'delete':

                        status['bucket_name'] = bucket_name

                        status['key_name'] = key_info['key_name']
                        status['op_code'] = op_code

                        log.info('deleting key: %s' % key_info['key_name'])

                        # ret_val = os.system('sudo rm -rf %s' % (local_key))
                        try:

                            os.unlink(local_key)

                            key_info['opcode']['delete']['deleted'] = True
                            fp.add_data(json_data)
                            status['op_code_status'] = True
                            log.info('deleted key: %s' % key_info['key_name'])

                        except (Exception, OSError), e:

                            log.error('deleting key: %s failed' %
                                      key_info['key_name'])

                            key_info['opcode']['delete']['deleted'] = False
                            log.error('delete failed: %s' % local_key)
                            log.error(e)
                            status['op_code_status'] = False

                opstatus.append(status)
Exemple #2
0
    def verify_s3(self, op_type=None):

        time.sleep(300)  # sleep for 300 secs

        kstatus = []

        fp = FileOps(self.json_fname, type='json')

        json_data = fp.get_data()

        buckets_info = json_data['buckets']

        for bucket_name, key in buckets_info.items():

            log.info('got bucket_name: %s' % bucket_name)
            bucket = self.bucket_conn.get(bucket_name)

            for key_info in key['keys']:

                key_name_to_find = key_info['key_name']

                log.info('verifying key: %s' % key_name_to_find)

                status = {}

                status['bucket_name'] = bucket_name

                keyop = KeyOp(bucket['bucket'])
                info = keyop.get(key_name_to_find)

                status['key_name'] = key_name_to_find
                status['type'] = key_info['is_type']
                md5_on_s3 = key_info['md5_on_s3']

                if info is None:
                    status['exists'] = False
                else:
                    status['exists'] = True

                    if key_info['is_type'] == 'file':

                        if op_type == 'edit':
                            if key_info['md5_local'] == md5_on_s3:
                                status['md5_matched'] = True
                            else:
                                status['md5_matched'] = False

                        else:

                            print key_info['md5_local']
                            print md5_on_s3

                            if key_info['md5_local'] == info.etag[1:-1]:
                                status['md5_matched'] = True
                            else:
                                status['md5_matched'] = False

                            if key_info['size'] == info.size:
                                status['size_matched'] = True
                            else:
                                status['size_matched'] = False

                kstatus.append(status)

        log.info('keys verification status :\n')

        [log.info('%s \n' % ks) for ks in kstatus]

        return kstatus
Exemple #3
0
    def verify_nfs(self, mount_point, op_type=None):

        time.sleep(300)  # sleep for 300 secs

        kstatus = []

        fp = FileOps(self.json_fname, type='json')

        json_data = fp.get_data()

        buckets_info = json_data['buckets']

        for bucket_name, key in buckets_info.items():

            log.info('got bucket_name: %s' % bucket_name)

            local_bucket = os.path.abspath(
                os.path.join(mount_point, bucket_name))

            print 'local bucket: --------------- %s' % local_bucket

            for key_info in key['keys']:

                log.info('verifying key: %s' % key_info['key_name'])

                status = {}

                # status['bucket_name'] = bucket_name

                local_key = os.path.join(local_bucket, key_info['key_name'])

                if key_info['key_name'] in os.path.basename(local_key):

                    status['key_name'] = key_info['key_name']

                    status['exists'] = os.path.exists(local_key)

                    log.info('local key: %s' % local_key)

                    if op_type == 'edit':

                        log.info('in operation: -----> edit')

                        # size = os.path.getsize(local_key)
                        # md5 = utils.get_md5(local_key)

                        md5_local = key_info['md5_local']
                        md5_on_s3 = key_info['md5_on_s3']

                        if md5_local == md5_on_s3:

                            status['md5_matched'] = True
                        else:
                            status['md5_matched'] = False

                    else:

                        if status['exists']:

                            size = os.path.getsize(local_key)
                            md5 = utils.get_md5(local_key)

                            if size == key_info['size']:
                                status['size_matched'] = True
                            else:
                                status['size_matched'] = False

                            if md5 == key_info['md5_on_s3']:
                                status['md5_matched'] = True
                                log.info(key_info['md5_on_s3'])
                                log.info(md5)
                            else:
                                status['md5_matched'] = False

                log.info('status of this key: %s' % status)

                kstatus.append(status)

        [log.info('%s \n' % ks) for ks in kstatus]

        return kstatus
Exemple #4
0
 def __init__(self, yaml_fname=IO_INFO_FNAME):
     self.yaml_fname = yaml_fname
     self.file_op = FileOps(self.yaml_fname, type="yaml")
Exemple #5
0
class ReadIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")

    def verify_io(self):
        data = self.file_op.get_data()
        users = data["users"]
        try:
            for each_user in users:
                log.info("verifying data for the user: \n")
                log.info("user_id: %s" % each_user["user_id"])
                log.info("access_key: %s" % each_user["access_key"])
                log.info("secret_key: %s" % each_user["secret_key"])
                conn = boto.connect_s3(
                    aws_access_key_id=each_user["access_key"],
                    aws_secret_access_key=each_user["secret_key"],
                    host=socket.gethostname(),
                    port=int(utils.get_radosgw_port_no()),
                    is_secure=False,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                for each_bucket in each_user["bucket"]:
                    log.info("verifying data for bucket: %s" %
                             each_bucket["name"])
                    if each_bucket["test_op_code"] == "delete":
                        bucket_from_s3 = conn.lookup(each_bucket["name"])
                        if bucket_from_s3 is None:
                            log.info("bucket deleted")
                            log.info(
                                "cannot verify objects as objects will be deleted since bucket does not exist"
                            )
                        if bucket_from_s3 is not None:
                            log.info("Bucket exists")
                            raise Exception("Bucket exists")
                    else:
                        bucket_from_s3 = conn.get_bucket(each_bucket["name"])
                        if not each_bucket["keys"]:
                            log.info("keys are not created")
                        else:
                            for each_key in each_bucket["keys"]:
                                log.info("verifying data for key: %s" %
                                         each_key["name"])
                                if each_key["test_op_code"] == "create":
                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key["name"])
                                    log.info("verifying size")
                                    log.info("size from yaml: %s" %
                                             each_key["size"])
                                    log.info("size from s3: %s" %
                                             key_from_s3.size)
                                    if int(each_key["size"]) != int(
                                            key_from_s3.size):
                                        raise Exception("Size not matched")
                                    log.info("verifying md5")
                                    log.info("md5_on_s3_from yaml: %s" %
                                             each_key["md5_on_s3"])
                                    log.info("md5_on_s3: %s" %
                                             key_from_s3.etag.replace('"', ""))
                                    if each_key[
                                            "md5_on_s3"] != key_from_s3.etag.replace(
                                                '"', ""):
                                        raise Exception("Md5 not matched")
                                    log.info(
                                        "verification complete for the key: %s"
                                        % key_from_s3.name)
                                if each_key["test_op_code"] == "delete":
                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key["name"])
                                    if key_from_s3 is None:
                                        log.info("key deleted")
                                    if key_from_s3 is not None:
                                        log.info("key exists")
                                        raise Exception("Key is not deleted")
            log.info("verification of data completed, data intact")
        except Exception as e:
            log.error(e)
            log.error("verification failed")
            exit(1)
    def __init__(self, ceph_conf_path='/etc/ceph/ceph.conf'):

        self.ceph_conf_path = ceph_conf_path

        FileOps.__init__(self, self.ceph_conf_path, type='ceph.conf')
        ConfigParse.__init__(self, self.ceph_conf_path)
Exemple #7
0
    def operation_on_s3(self, op_code=None):
        time.sleep(300)  # sleep for 300 secs before operation starts
        log.info("operation on s3 started with opcode: %s" % op_code)
        ks_op_status = []
        fp = FileOps(self.json_fname, type="json")
        json_data = fp.get_data()
        buckets_info = json_data["buckets"]
        for bucket_name, key in list(buckets_info.items()):
            log.info("got bucket_name: %s" % bucket_name)
            bucket = self.bucket_conn.get(bucket_name)
            for key_info in key["keys"]:
                key_name = key_info["key_name"]
                log.info("verifying key: %s" % key_name)
                status = dict()
                status["op_code"] = op_code
                status["bucket_name"] = bucket_name
                keyop = KeyOp(bucket["bucket"])
                kinfo = keyop.get(key_name)
                print("got key_info -------------------------- from s3 :%s" % kinfo)
                if op_code == "move":
                    try:
                        log.info("in move operation")
                        new_key_name = key_name + ".moved"
                        kinfo.copy(bucket_name, new_key_name)
                        kinfo.delete()
                        key_info["opcode"]["move"]["old_name"] = key_name
                        key_info["key_name"] = new_key_name
                        fp.add_data(json_data)
                        status["op_code_status"] = True
                    except Exception as e:
                        log.error(e)
                        status["op_code_status"] = False
                if op_code == "delete":
                    try:
                        log.info("in delete operation")
                        kinfo.delete()
                        key_info["opcode"]["delete"]["deleted"] = True
                        fp.add_data(json_data)
                        status["op_code_status"] = True
                    except Exception as e:
                        log.error(e)
                        status["op_code_status"] = False
                if op_code == "edit":

                    try:
                        put_contents_or_download = PutContentsFromFile(
                            kinfo, self.json_fname
                        )
                        log.info("in edit or modify file")
                        # download the file from s3
                        download_fname = key_name + ".downloaded"
                        downloaded_f = put_contents_or_download.get(download_fname)
                        print("-------------------------------%s" % downloaded_f)
                        if not downloaded_f["status"]:
                            raise Exception("download failed")
                        new_text = (
                            "downloded from s3 and uploading back with this message"
                        )
                        log.info("file downloaded, string to add: %s" % new_text)
                        f = open(download_fname, "a")
                        f.write(new_text)
                        f.close()
                        put_contents_or_download.put(download_fname)
                        log.info("file uploaded")
                        status["op_code_status"] = True
                    except Exception as e:
                        log.info("operation could not complete")
                        log.error(e)
                        status["op_code_status"] = False
                ks_op_status.append(status)
        [log.info(st) for st in ks_op_status]
        return ks_op_status
Exemple #8
0
 def operation_on_nfs(self, mount_point, op_code):
     time.sleep(300)  # sleep for 300 secs before operations start
     opstatus = []
     status = {}
     log.info("operation started-------------- : %s" % op_code)
     fp = FileOps(self.json_fname, type="json")
     json_data = fp.get_data()
     buckets_info = json_data["buckets"]
     for bucket_name, key in list(buckets_info.items()):
         log.info("got bucket_name: %s" % bucket_name)
         local_bucket = os.path.abspath(os.path.join(mount_point, bucket_name))
         print("local bucket: --------------- %s" % local_bucket)
         local_keys = utils.get_all_in_dir(local_bucket)
         log.info("local key: %s" % local_bucket)
         log.info("local keys: %s" % local_keys)
         for key_info in key["keys"]:
             local_key = os.path.join(local_bucket, key_info["key_name"])
             if key_info["is_type"] == "file":
                 log.info("operation on  key: %s" % key_info["key_name"])
                 log.info("local key: ------------------ %s" % local_key)
                 if op_code == "move":
                     status["bucket_name"] = bucket_name
                     status["key_name"] = key_info["key_name"]
                     status["op_code"] = op_code
                     new_key_path = local_key + ".moved"
                     new_name = key_info["key_name"] + ".moved"
                     cmd = "sudo mv %s %s" % (
                         os.path.abspath(local_key),
                         os.path.abspath(new_key_path),
                     )
                     log.info("cmd_to_move: %s" % cmd)
                     time.sleep(5)
                     ret_val = os.system(cmd)
                     if ret_val == 0:
                         key_info["opcode"]["move"]["old_name"] = key_info[
                             "key_name"
                         ]
                         key_info["key_name"] = new_name
                         fp.add_data(json_data)
                         status["op_code_status"] = True
                     else:
                         log.info("move failed: %s" % local_key)
                         status["op_code_status"] = False
                 if op_code == "edit":
                     try:
                         log.info("editing file: %s" % local_key)
                         key_modify = open(local_key, "a+")
                         key_modify.write(
                             "file opened from NFS and added this messages"
                         )
                         key_modify.close()
                         key_info["opcode"]["edit"]["new_md5"] = utils.get_md5(
                             os.path.abspath(local_key)
                         )
                         key_info["md5_local"] = utils.get_md5(
                             os.path.abspath(local_key)
                         )
                         key_info["md5_on_s3"] = None
                         status["op_code_status"] = True
                     except Exception as e:
                         log.info("could not edit")
                         log.error(e)
                         status["op_code_status"] = False
                 if op_code == "delete":
                     status["bucket_name"] = bucket_name
                     status["key_name"] = key_info["key_name"]
                     status["op_code"] = op_code
                     log.info("deleting key: %s" % key_info["key_name"])
                     # ret_val = os.system('sudo rm -rf %s' % (local_key))
                     try:
                         os.unlink(local_key)
                         key_info["opcode"]["delete"]["deleted"] = True
                         fp.add_data(json_data)
                         status["op_code_status"] = True
                         log.info("deleted key: %s" % key_info["key_name"])
                     except (Exception, OSError) as e:
                         log.error("deleting key: %s failed" % key_info["key_name"])
                         key_info["opcode"]["delete"]["deleted"] = False
                         log.error("delete failed: %s" % local_key)
                         log.error(e)
                         status["op_code_status"] = False
             opstatus.append(status)
     [log.info(st) for st in opstatus]
     return opstatus