コード例 #1
0
    def verify_s3(self):

        fp = FileOps(self.json_fname, type='json')

        json_data = fp.get_data()

        buckets_list = json_data['buckets'].keys()

        bstatus = []

        for each_bucket in buckets_list:

            log.info('getting bucket info for base dir: %s' % each_bucket)

            status = {}

            info = self.bucket.get(each_bucket)

            if not info['status']:
                status['exists'] = False
            else:
                status['exists'] = True
                status['bucket_name'] = info['bucket']

            bstatus.append(status)

        log.info('bucket verification status :\n')

        [log.info('%s \n' % bs) for bs in bstatus]

        return bstatus
コード例 #2
0
class AddIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")
        self.io_structure = IOInfoStructure()

    def initialize(self):
        initial_data = self.io_structure.initial()
        log.info("initial_data: %s" % (initial_data))
        self.file_op.add_data(initial_data)

    def add_user_info(self, **user):
        user_info = self.io_structure.user(**user)
        log.info("got user info structure: %s" % user_info)
        yaml_data = self.file_op.get_data()
        log.info("got yaml data %s" % yaml_data)
        yaml_data["users"].append(user_info)
        log.info("data to add: %s" % yaml_data)
        self.file_op.add_data(yaml_data)

    def add_bucket_info(self, access_key, **bucket):
        bucket_info = self.io_structure.bucket(**bucket)
        yaml_data = self.file_op.get_data()
        indx = None
        for i, k in enumerate(yaml_data["users"]):
            if k["access_key"] == access_key:
                indx = i
                break
        yaml_data["users"][indx]["bucket"].append(bucket_info)
        self.file_op.add_data(yaml_data)

    def add_keys_info(self, access_key, bucket_name, **key):
        yaml_data = self.file_op.get_data()
        access_key_indx = None
        bucket_indx = None
        for i, k in enumerate(yaml_data["users"]):
            if k["access_key"] == access_key:
                access_key_indx = i
                break
        for i, k in enumerate(yaml_data["users"][access_key_indx]["bucket"]):
            if k["name"] == bucket_name:
                bucket_indx = i
                break
        key_info = self.io_structure.key(**key)
        yaml_data["users"][access_key_indx]["bucket"][bucket_indx][
            "keys"].append(key_info)
        self.file_op.add_data(yaml_data)
コード例 #3
0
 def verify_nfs(self):
     get_dir_list = lambda dir_path: os.listdir(dir_path)
     compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
     base_dirs = get_dir_list(os.path.abspath(self.mount_point))
     [log.info('base_dirs: %s' % dir) for dir in base_dirs]
     fp = FileOps(self.json_fname, type='json')
     json_data = fp.get_data()
     buckets_list = list(json_data['buckets'].keys())
     [log.info('bucket list: %s' % bucket) for bucket in buckets_list]
     bstatus = compare(base_dirs, buckets_list)
     log.info('bucket comparision status: %s' % bstatus)
     return bstatus
コード例 #4
0
 def verify_nfs(self, mount_point, op_type=None):
     time.sleep(300)  # sleep for 300 secs
     kstatus = []
     fp = FileOps(self.json_fname, type='json')
     json_data = fp.get_data()
     buckets_info = json_data['buckets']
     for bucket_name, key in list(buckets_info.items()):
         log.info('got bucket_name: %s' % bucket_name)
         local_bucket = os.path.abspath(
             os.path.join(mount_point, bucket_name))
         print('local bucket: --------------- %s' % local_bucket)
         for key_info in key['keys']:
             log.info('verifying key: %s' % key_info['key_name'])
             status = {}
             # status['bucket_name'] = bucket_name
             local_key = os.path.join(local_bucket, key_info['key_name'])
             if key_info['key_name'] in os.path.basename(local_key):
                 status['key_name'] = key_info['key_name']
                 status['exists'] = os.path.exists(local_key)
                 log.info('local key: %s' % local_key)
                 if op_type == 'edit':
                     log.info('in operation: -----> edit')
                     # size = os.path.getsize(local_key)
                     # md5 = utils.get_md5(local_key)
                     md5_local = key_info['md5_local']
                     md5_on_s3 = key_info['md5_on_s3']
                     if md5_local == md5_on_s3:
                         status['md5_matched'] = True
                     else:
                         status['md5_matched'] = False
                 else:
                     if status['exists']:
                         size = os.path.getsize(local_key)
                         md5 = utils.get_md5(local_key)
                         if size == key_info['size']:
                             status['size_matched'] = True
                         else:
                             status['size_matched'] = False
                         if md5 == key_info['md5_on_s3']:
                             status['md5_matched'] = True
                             log.info(key_info['md5_on_s3'])
                             log.info(md5)
                         else:
                             status['md5_matched'] = False
             log.info('status of this key: %s' % status)
             kstatus.append(status)
     [log.info('%s \n' % ks) for ks in kstatus]
     return kstatus
コード例 #5
0
 def verify_nfs(self, mount_point, op_type=None):
     time.sleep(300)  # sleep for 300 secs
     kstatus = []
     fp = FileOps(self.json_fname, type="json")
     json_data = fp.get_data()
     buckets_info = json_data["buckets"]
     for bucket_name, key in list(buckets_info.items()):
         log.info("got bucket_name: %s" % bucket_name)
         local_bucket = os.path.abspath(os.path.join(mount_point, bucket_name))
         print("local bucket: --------------- %s" % local_bucket)
         for key_info in key["keys"]:
             log.info("verifying key: %s" % key_info["key_name"])
             status = {}
             # status['bucket_name'] = bucket_name
             local_key = os.path.join(local_bucket, key_info["key_name"])
             if key_info["key_name"] in os.path.basename(local_key):
                 status["key_name"] = key_info["key_name"]
                 status["exists"] = os.path.exists(local_key)
                 log.info("local key: %s" % local_key)
                 if op_type == "edit":
                     log.info("in operation: -----> edit")
                     # size = os.path.getsize(local_key)
                     # md5 = utils.get_md5(local_key)
                     md5_local = key_info["md5_local"]
                     md5_on_s3 = key_info["md5_on_s3"]
                     if md5_local == md5_on_s3:
                         status["md5_matched"] = True
                     else:
                         status["md5_matched"] = False
                 else:
                     if status["exists"]:
                         size = os.path.getsize(local_key)
                         md5 = utils.get_md5(local_key)
                         if size == key_info["size"]:
                             status["size_matched"] = True
                         else:
                             status["size_matched"] = False
                         if md5 == key_info["md5_on_s3"]:
                             status["md5_matched"] = True
                             log.info(key_info["md5_on_s3"])
                             log.info(md5)
                         else:
                             status["md5_matched"] = False
             log.info("status of this key: %s" % status)
             kstatus.append(status)
     [log.info("%s \n" % ks) for ks in kstatus]
     return kstatus
コード例 #6
0
 def verify_s3(self):
     fp = FileOps(self.json_fname, type="json")
     json_data = fp.get_data()
     buckets_list = list(json_data["buckets"].keys())
     bstatus = []
     for each_bucket in buckets_list:
         log.info("getting bucket info for base dir: %s" % each_bucket)
         status = {}
         info = self.bucket.get(each_bucket)
         if not info["status"]:
             status["exists"] = False
         else:
             status["exists"] = True
             status["bucket_name"] = info["bucket"]
         bstatus.append(status)
     log.info("bucket verification status :\n")
     [log.info("%s \n" % bs) for bs in bstatus]
     return bstatus
コード例 #7
0
 def verify_s3(self, op_type=None):
     time.sleep(300)  # sleep for 300 secs
     kstatus = []
     fp = FileOps(self.json_fname, type='json')
     json_data = fp.get_data()
     buckets_info = json_data['buckets']
     for bucket_name, key in list(buckets_info.items()):
         log.info('got bucket_name: %s' % bucket_name)
         bucket = self.bucket_conn.get(bucket_name)
         for key_info in key['keys']:
             key_name_to_find = key_info['key_name']
             log.info('verifying key: %s' % key_name_to_find)
             status = {}
             status['bucket_name'] = bucket_name
             keyop = KeyOp(bucket['bucket'])
             info = keyop.get(key_name_to_find)
             status['key_name'] = key_name_to_find
             status['type'] = key_info['is_type']
             md5_on_s3 = key_info['md5_on_s3']
             if info is None:
                 status['exists'] = False
             else:
                 status['exists'] = True
                 if key_info['is_type'] == 'file':
                     if op_type == 'edit':
                         if key_info['md5_local'] == md5_on_s3:
                             status['md5_matched'] = True
                         else:
                             status['md5_matched'] = False
                     else:
                         print(key_info['md5_local'])
                         print(md5_on_s3)
                         if key_info['md5_local'] == info.etag[1:-1]:
                             status['md5_matched'] = True
                         else:
                             status['md5_matched'] = False
                         if key_info['size'] == info.size:
                             status['size_matched'] = True
                         else:
                             status['size_matched'] = False
             kstatus.append(status)
     log.info('keys verification status :\n')
     [log.info('%s \n' % ks) for ks in kstatus]
     return kstatus
コード例 #8
0
 def verify_s3(self, op_type=None):
     time.sleep(300)  # sleep for 300 secs
     kstatus = []
     fp = FileOps(self.json_fname, type="json")
     json_data = fp.get_data()
     buckets_info = json_data["buckets"]
     for bucket_name, key in list(buckets_info.items()):
         log.info("got bucket_name: %s" % bucket_name)
         bucket = self.bucket_conn.get(bucket_name)
         for key_info in key["keys"]:
             key_name_to_find = key_info["key_name"]
             log.info("verifying key: %s" % key_name_to_find)
             status = {}
             status["bucket_name"] = bucket_name
             keyop = KeyOp(bucket["bucket"])
             info = keyop.get(key_name_to_find)
             status["key_name"] = key_name_to_find
             status["type"] = key_info["is_type"]
             md5_on_s3 = key_info["md5_on_s3"]
             if info is None:
                 status["exists"] = False
             else:
                 status["exists"] = True
                 if key_info["is_type"] == "file":
                     if op_type == "edit":
                         if key_info["md5_local"] == md5_on_s3:
                             status["md5_matched"] = True
                         else:
                             status["md5_matched"] = False
                     else:
                         print(key_info["md5_local"])
                         print(md5_on_s3)
                         if key_info["md5_local"] == info.etag[1:-1]:
                             status["md5_matched"] = True
                         else:
                             status["md5_matched"] = False
                         if key_info["size"] == info.size:
                             status["size_matched"] = True
                         else:
                             status["size_matched"] = False
             kstatus.append(status)
     log.info("keys verification status :\n")
     [log.info("%s \n" % ks) for ks in kstatus]
     return kstatus
コード例 #9
0
    def operation_on_s3(self, op_code=None):
        time.sleep(300)  # sleep for 300 secs before operation starts
        log.info('operation on s3 started with opcode: %s' % op_code)
        ks_op_status = []
        fp = FileOps(self.json_fname, type='json')
        json_data = fp.get_data()
        buckets_info = json_data['buckets']
        for bucket_name, key in list(buckets_info.items()):
            log.info('got bucket_name: %s' % bucket_name)
            bucket = self.bucket_conn.get(bucket_name)
            for key_info in key['keys']:
                key_name = key_info['key_name']
                log.info('verifying key: %s' % key_name)
                status = dict()
                status['op_code'] = op_code
                status['bucket_name'] = bucket_name
                keyop = KeyOp(bucket['bucket'])
                kinfo = keyop.get(key_name)
                print('got key_info -------------------------- from s3 :%s' %
                      kinfo)
                if op_code == 'move':
                    try:
                        log.info('in move operation')
                        new_key_name = key_name + ".moved"
                        kinfo.copy(bucket_name, new_key_name)
                        kinfo.delete()
                        key_info['opcode']['move']['old_name'] = key_name
                        key_info['key_name'] = new_key_name
                        fp.add_data(json_data)
                        status['op_code_status'] = True
                    except Exception as e:
                        log.error(e)
                        status['op_code_status'] = False
                if op_code == 'delete':
                    try:
                        log.info('in delete operation')
                        kinfo.delete()
                        key_info['opcode']['delete']['deleted'] = True
                        fp.add_data(json_data)
                        status['op_code_status'] = True
                    except Exception as e:
                        log.error(e)
                        status['op_code_status'] = False
                if op_code == 'edit':

                    try:
                        put_contents_or_download = PutContentsFromFile(
                            kinfo, self.json_fname)
                        log.info('in edit or modify file')
                        # download the file from s3
                        download_fname = key_name + ".downloaded"
                        downloaded_f = put_contents_or_download.get(
                            download_fname)
                        print('-------------------------------%s' %
                              downloaded_f)
                        if not downloaded_f['status']:
                            raise Exception("download failed")
                        new_text = 'downloded from s3 and uploading back with this message'
                        log.info('file downloaded, string to add: %s' %
                                 new_text)
                        f = open(download_fname, 'a')
                        f.write(new_text)
                        f.close()
                        put_contents_or_download.put(download_fname)
                        log.info('file uploaded')
                        status['op_code_status'] = True
                    except Exception as e:
                        log.info('operation could not complete')
                        log.error(e)
                        status['op_code_status'] = False
                ks_op_status.append(status)
        [log.info(st) for st in ks_op_status]
        return ks_op_status
コード例 #10
0
 def operation_on_nfs(self, mount_point, op_code):
     time.sleep(300)  # sleep for 300 secs before operations start
     opstatus = []
     status = {}
     log.info('operation started-------------- : %s' % op_code)
     fp = FileOps(self.json_fname, type='json')
     json_data = fp.get_data()
     buckets_info = json_data['buckets']
     for bucket_name, key in list(buckets_info.items()):
         log.info('got bucket_name: %s' % bucket_name)
         local_bucket = os.path.abspath(
             os.path.join(mount_point, bucket_name))
         print('local bucket: --------------- %s' % local_bucket)
         local_keys = utils.get_all_in_dir(local_bucket)
         log.info('local key: %s' % local_bucket)
         log.info('local keys: %s' % local_keys)
         for key_info in key['keys']:
             local_key = os.path.join(local_bucket, key_info['key_name'])
             if key_info['is_type'] == 'file':
                 log.info('operation on  key: %s' % key_info['key_name'])
                 log.info('local key: ------------------ %s' % local_key)
                 if op_code == 'move':
                     status['bucket_name'] = bucket_name
                     status['key_name'] = key_info['key_name']
                     status['op_code'] = op_code
                     new_key_path = local_key + ".moved"
                     new_name = key_info['key_name'] + ".moved"
                     cmd = 'sudo mv %s %s' % (os.path.abspath(local_key),
                                              os.path.abspath(new_key_path))
                     log.info('cmd_to_move: %s' % cmd)
                     time.sleep(5)
                     ret_val = os.system(cmd)
                     if ret_val == 0:
                         key_info['opcode']['move']['old_name'] = key_info[
                             'key_name']
                         key_info['key_name'] = new_name
                         fp.add_data(json_data)
                         status['op_code_status'] = True
                     else:
                         log.info('move failed: %s' % local_key)
                         status['op_code_status'] = False
                 if op_code == 'edit':
                     try:
                         log.info('editing file: %s' % local_key)
                         key_modify = open(local_key, 'a+')
                         key_modify.write(
                             'file opened from NFS and added this messages')
                         key_modify.close()
                         key_info['opcode']['edit'][
                             'new_md5'] = utils.get_md5(
                                 os.path.abspath(local_key))
                         key_info['md5_local'] = utils.get_md5(
                             os.path.abspath(local_key))
                         key_info['md5_on_s3'] = None
                         status['op_code_status'] = True
                     except Exception as e:
                         log.info('could not edit')
                         log.error(e)
                         status['op_code_status'] = False
                 if op_code == 'delete':
                     status['bucket_name'] = bucket_name
                     status['key_name'] = key_info['key_name']
                     status['op_code'] = op_code
                     log.info('deleting key: %s' % key_info['key_name'])
                     # ret_val = os.system('sudo rm -rf %s' % (local_key))
                     try:
                         os.unlink(local_key)
                         key_info['opcode']['delete']['deleted'] = True
                         fp.add_data(json_data)
                         status['op_code_status'] = True
                         log.info('deleted key: %s' % key_info['key_name'])
                     except (Exception, OSError) as e:
                         log.error('deleting key: %s failed' %
                                   key_info['key_name'])
                         key_info['opcode']['delete']['deleted'] = False
                         log.error('delete failed: %s' % local_key)
                         log.error(e)
                         status['op_code_status'] = False
             opstatus.append(status)
     [log.info(st) for st in opstatus]
     return opstatus
コード例 #11
0
class ReadIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):

        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type='yaml')

    def verify_io(self):

        data = self.file_op.get_data()

        users = data['users']

        try:

            for each_user in users:

                log.info('verifying data for the user: \n')
                log.info('user_id: %s' % each_user['user_id'])
                log.info('access_key: %s' % each_user['access_key'])
                log.info('secret_key: %s' % each_user['secret_key'])

                conn = boto.connect_s3(
                    aws_access_key_id=each_user['access_key'],
                    aws_secret_access_key=each_user['secret_key'],
                    host=socket.gethostname(),
                    port=int(utils.get_radosgw_port_no()),
                    is_secure=False,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat())

                for each_bucket in each_user['bucket']:

                    log.info('verifying data for bucket: %s' %
                             each_bucket['name'])

                    if each_bucket['test_op_code'] == 'delete':

                        bucket_from_s3 = conn.lookup(each_bucket['name'])

                        if bucket_from_s3 is None:

                            log.info('bucket deleted')
                            log.info(
                                'cannot verify objects as objects will be deleted since bucket does not exist'
                            )

                        if bucket_from_s3 is not None:
                            log.info('Bucket exists')
                            raise Exception, "Bucket exists"

                    else:

                        bucket_from_s3 = conn.get_bucket(each_bucket['name'])

                        if not each_bucket['keys']:

                            log.info('keys are not created')

                        else:

                            for each_key in each_bucket['keys']:

                                log.info('verifying data for key: %s' %
                                         each_key['name'])

                                if each_key['test_op_code'] == 'create':

                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key['name'])

                                    log.info('verifying size')

                                    log.info('size from yaml: %s' %
                                             each_key['size'])
                                    log.info('size from s3: %s' %
                                             key_from_s3.size)

                                    if int(each_key['size']) != int(
                                            key_from_s3.size):
                                        raise Exception, "Size not matched"

                                    log.info('verifying md5')

                                    log.info('md5_on_s3_from yaml: %s' %
                                             each_key['md5_on_s3'])
                                    log.info('md5_on_s3: %s' %
                                             key_from_s3.etag.replace('"', ''))

                                    if each_key[
                                            'md5_on_s3'] != key_from_s3.etag.replace(
                                                '"', ''):
                                        raise Exception, "Md5 not matched"

                                    log.info(
                                        'verification complete for the key: %s'
                                        % key_from_s3.name)

                                if each_key['test_op_code'] == 'delete':

                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key['name'])

                                    if key_from_s3 is None:
                                        log.info('key deleted')

                                    if key_from_s3 is not None:
                                        log.info('key exists')
                                        raise Exception, "Key is not deleted"

            log.info('verification of data completed, data intact')

        except Exception, e:
            log.error(e)
            log.error('verification failed')
            exit(1)
コード例 #12
0
class ReadIOInfo(object):
    def __init__(self, yaml_fname=IO_INFO_FNAME):
        self.yaml_fname = yaml_fname
        self.file_op = FileOps(self.yaml_fname, type="yaml")

    def verify_io(self):
        data = self.file_op.get_data()
        users = data["users"]
        try:
            for each_user in users:
                log.info("verifying data for the user: \n")
                log.info("user_id: %s" % each_user["user_id"])
                log.info("access_key: %s" % each_user["access_key"])
                log.info("secret_key: %s" % each_user["secret_key"])
                conn = boto.connect_s3(
                    aws_access_key_id=each_user["access_key"],
                    aws_secret_access_key=each_user["secret_key"],
                    host=socket.gethostname(),
                    port=int(utils.get_radosgw_port_no()),
                    is_secure=False,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                for each_bucket in each_user["bucket"]:
                    log.info("verifying data for bucket: %s" %
                             each_bucket["name"])
                    if each_bucket["test_op_code"] == "delete":
                        bucket_from_s3 = conn.lookup(each_bucket["name"])
                        if bucket_from_s3 is None:
                            log.info("bucket deleted")
                            log.info(
                                "cannot verify objects as objects will be deleted since bucket does not exist"
                            )
                        if bucket_from_s3 is not None:
                            log.info("Bucket exists")
                            raise Exception("Bucket exists")
                    else:
                        bucket_from_s3 = conn.get_bucket(each_bucket["name"])
                        if not each_bucket["keys"]:
                            log.info("keys are not created")
                        else:
                            for each_key in each_bucket["keys"]:
                                log.info("verifying data for key: %s" %
                                         each_key["name"])
                                if each_key["test_op_code"] == "create":
                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key["name"])
                                    log.info("verifying size")
                                    log.info("size from yaml: %s" %
                                             each_key["size"])
                                    log.info("size from s3: %s" %
                                             key_from_s3.size)
                                    if int(each_key["size"]) != int(
                                            key_from_s3.size):
                                        raise Exception("Size not matched")
                                    log.info("verifying md5")
                                    log.info("md5_on_s3_from yaml: %s" %
                                             each_key["md5_on_s3"])
                                    log.info("md5_on_s3: %s" %
                                             key_from_s3.etag.replace('"', ""))
                                    if each_key[
                                            "md5_on_s3"] != key_from_s3.etag.replace(
                                                '"', ""):
                                        raise Exception("Md5 not matched")
                                    log.info(
                                        "verification complete for the key: %s"
                                        % key_from_s3.name)
                                if each_key["test_op_code"] == "delete":
                                    key_from_s3 = bucket_from_s3.get_key(
                                        each_key["name"])
                                    if key_from_s3 is None:
                                        log.info("key deleted")
                                    if key_from_s3 is not None:
                                        log.info("key exists")
                                        raise Exception("Key is not deleted")
            log.info("verification of data completed, data intact")
        except Exception as e:
            log.error(e)
            log.error("verification failed")
            exit(1)
コード例 #13
0
    def operation_on_s3(self, op_code=None):
        time.sleep(300)  # sleep for 300 secs before operation starts
        log.info("operation on s3 started with opcode: %s" % op_code)
        ks_op_status = []
        fp = FileOps(self.json_fname, type="json")
        json_data = fp.get_data()
        buckets_info = json_data["buckets"]
        for bucket_name, key in list(buckets_info.items()):
            log.info("got bucket_name: %s" % bucket_name)
            bucket = self.bucket_conn.get(bucket_name)
            for key_info in key["keys"]:
                key_name = key_info["key_name"]
                log.info("verifying key: %s" % key_name)
                status = dict()
                status["op_code"] = op_code
                status["bucket_name"] = bucket_name
                keyop = KeyOp(bucket["bucket"])
                kinfo = keyop.get(key_name)
                print("got key_info -------------------------- from s3 :%s" % kinfo)
                if op_code == "move":
                    try:
                        log.info("in move operation")
                        new_key_name = key_name + ".moved"
                        kinfo.copy(bucket_name, new_key_name)
                        kinfo.delete()
                        key_info["opcode"]["move"]["old_name"] = key_name
                        key_info["key_name"] = new_key_name
                        fp.add_data(json_data)
                        status["op_code_status"] = True
                    except Exception as e:
                        log.error(e)
                        status["op_code_status"] = False
                if op_code == "delete":
                    try:
                        log.info("in delete operation")
                        kinfo.delete()
                        key_info["opcode"]["delete"]["deleted"] = True
                        fp.add_data(json_data)
                        status["op_code_status"] = True
                    except Exception as e:
                        log.error(e)
                        status["op_code_status"] = False
                if op_code == "edit":

                    try:
                        put_contents_or_download = PutContentsFromFile(
                            kinfo, self.json_fname
                        )
                        log.info("in edit or modify file")
                        # download the file from s3
                        download_fname = key_name + ".downloaded"
                        downloaded_f = put_contents_or_download.get(download_fname)
                        print("-------------------------------%s" % downloaded_f)
                        if not downloaded_f["status"]:
                            raise Exception("download failed")
                        new_text = (
                            "downloded from s3 and uploading back with this message"
                        )
                        log.info("file downloaded, string to add: %s" % new_text)
                        f = open(download_fname, "a")
                        f.write(new_text)
                        f.close()
                        put_contents_or_download.put(download_fname)
                        log.info("file uploaded")
                        status["op_code_status"] = True
                    except Exception as e:
                        log.info("operation could not complete")
                        log.error(e)
                        status["op_code_status"] = False
                ks_op_status.append(status)
        [log.info(st) for st in ks_op_status]
        return ks_op_status
コード例 #14
0
 def operation_on_nfs(self, mount_point, op_code):
     time.sleep(300)  # sleep for 300 secs before operations start
     opstatus = []
     status = {}
     log.info("operation started-------------- : %s" % op_code)
     fp = FileOps(self.json_fname, type="json")
     json_data = fp.get_data()
     buckets_info = json_data["buckets"]
     for bucket_name, key in list(buckets_info.items()):
         log.info("got bucket_name: %s" % bucket_name)
         local_bucket = os.path.abspath(os.path.join(mount_point, bucket_name))
         print("local bucket: --------------- %s" % local_bucket)
         local_keys = utils.get_all_in_dir(local_bucket)
         log.info("local key: %s" % local_bucket)
         log.info("local keys: %s" % local_keys)
         for key_info in key["keys"]:
             local_key = os.path.join(local_bucket, key_info["key_name"])
             if key_info["is_type"] == "file":
                 log.info("operation on  key: %s" % key_info["key_name"])
                 log.info("local key: ------------------ %s" % local_key)
                 if op_code == "move":
                     status["bucket_name"] = bucket_name
                     status["key_name"] = key_info["key_name"]
                     status["op_code"] = op_code
                     new_key_path = local_key + ".moved"
                     new_name = key_info["key_name"] + ".moved"
                     cmd = "sudo mv %s %s" % (
                         os.path.abspath(local_key),
                         os.path.abspath(new_key_path),
                     )
                     log.info("cmd_to_move: %s" % cmd)
                     time.sleep(5)
                     ret_val = os.system(cmd)
                     if ret_val == 0:
                         key_info["opcode"]["move"]["old_name"] = key_info[
                             "key_name"
                         ]
                         key_info["key_name"] = new_name
                         fp.add_data(json_data)
                         status["op_code_status"] = True
                     else:
                         log.info("move failed: %s" % local_key)
                         status["op_code_status"] = False
                 if op_code == "edit":
                     try:
                         log.info("editing file: %s" % local_key)
                         key_modify = open(local_key, "a+")
                         key_modify.write(
                             "file opened from NFS and added this messages"
                         )
                         key_modify.close()
                         key_info["opcode"]["edit"]["new_md5"] = utils.get_md5(
                             os.path.abspath(local_key)
                         )
                         key_info["md5_local"] = utils.get_md5(
                             os.path.abspath(local_key)
                         )
                         key_info["md5_on_s3"] = None
                         status["op_code_status"] = True
                     except Exception as e:
                         log.info("could not edit")
                         log.error(e)
                         status["op_code_status"] = False
                 if op_code == "delete":
                     status["bucket_name"] = bucket_name
                     status["key_name"] = key_info["key_name"]
                     status["op_code"] = op_code
                     log.info("deleting key: %s" % key_info["key_name"])
                     # ret_val = os.system('sudo rm -rf %s' % (local_key))
                     try:
                         os.unlink(local_key)
                         key_info["opcode"]["delete"]["deleted"] = True
                         fp.add_data(json_data)
                         status["op_code_status"] = True
                         log.info("deleted key: %s" % key_info["key_name"])
                     except (Exception, OSError) as e:
                         log.error("deleting key: %s failed" % key_info["key_name"])
                         key_info["opcode"]["delete"]["deleted"] = False
                         log.error("delete failed: %s" % local_key)
                         log.error(e)
                         status["op_code_status"] = False
             opstatus.append(status)
     [log.info(st) for st in opstatus]
     return opstatus