def multipart_upload(self, buckets_created): object_size = self.objects_size_range min_object_size = object_size['min'] max_object_size = object_size['max'] for bucket in buckets_created: for object_count in range(self.objects_count): key_name = bucket.name + "." + str(object_count) + ".key" + ".mpFile" if not os.path.exists(key_name): size = utils.get_file_size(min_object_size, max_object_size) log.info('size of the file to create %s' % size) log.info('file does not exists, so creating the file') filename = utils.create_file(key_name, size) else: log.info('file exists') filename = os.path.abspath(key_name) md5 = utils.get_md5(filename) log.info('got filename %s' % filename) log.debug('got file dirname %s' % os.path.dirname(filename)) json_file = os.path.join(os.path.dirname(filename), os.path.basename(filename) + ".json") log.info('json_file_name %s' % json_file) multipart = MultipartPut(bucket, filename) multipart.break_at_part_no = self.break_upload_at_part_no multipart.cancel_multpart = self.set_cancel_multipart multipart.iniate_multipart(json_file) put = multipart.put() print(put['status']) if not put['status']: raise AssertionError(put['msgs'])
def get(self, filename): log.debug('function: %s' % self.get.__name__) log.info('getting the contents of file %s:' % self.key) log.info('download or get the file to filename: %s' % filename) """ :param: filename: mention the filename which will be used to get the contents from s3 to this file. can be different from the original filename :return: dictionary, args: 1. status: True for successful download or False for failed download, 2. msgs : error messages """ try: self.key.get_contents_to_filename(filename) md5_on_s3 = self.key.etag.replace('"', '') md5_local = utils.get_md5(filename) if md5_on_s3 == md5_local: md5_match = "match" else: md5_match = "no match" key_details = { 'key_name': os.path.basename(filename), 'key_name_os_s3': self.key.name, 'size': os.stat(filename).st_size, 'md5_local': md5_local, 'md5_on_s3': md5_on_s3, 'md5_match': md5_match, 'opcode': { "edit": { "new_md5": None }, "move": { "new_name": None }, "delete": { "deleted": None } } } self.jkey.add(self.key.bucket.name, **key_details) download_status = {'status': True} except (exception.BotoClientError, exception.S3ResponseError, Exception), e: log.error(e) download_status = {'status': False, 'msgs': e}
def put(self, filename, test_op_code='create'): log.debug('function: %s' % self.put.__name__) log.info('upload of file: %s' % filename) """ :param filename: filename i.e along with location :return: dictionary, args: 1. status: True for successful upload or False for failed upload, 2. msgs : error messages """ try: self.key.set_contents_from_filename(filename) md5_on_s3 = self.key.etag.replace('"', '') key_details = { 'key_name': self.key.key, 'size': os.stat(filename).st_size, 'md5_local': utils.get_md5(filename), 'md5_on_s3': md5_on_s3, 'opcode': { "edit": { "new_md5": None }, "move": { "new_name": None }, "delete": { "deleted": None } } } self.jkey.add(self.key.bucket.name, **key_details) self.add_io_info.add_keys_info( self.key.bucket.connection.access_key, self.key.bucket.name, **{ 'key_name': self.key.key, 'size': os.stat(filename).st_size, 'md5_on_s3': md5_on_s3, 'upload_type': 'normal', 'test_op_code': test_op_code }) upload_status = {'status': True} except (exception.BotoClientError, exception.S3ResponseError), e: log.error(e) upload_status = {'status': False, 'msgs': e}
def verify_nfs(self, mount_point, op_type=None): time.sleep(300) # sleep for 300 secs kstatus = [] fp = FileOps(self.json_fname, type='json') json_data = fp.get_data() buckets_info = json_data['buckets'] for bucket_name, key in list(buckets_info.items()): log.info('got bucket_name: %s' % bucket_name) local_bucket = os.path.abspath( os.path.join(mount_point, bucket_name)) print('local bucket: --------------- %s' % local_bucket) for key_info in key['keys']: log.info('verifying key: %s' % key_info['key_name']) status = {} # status['bucket_name'] = bucket_name local_key = os.path.join(local_bucket, key_info['key_name']) if key_info['key_name'] in os.path.basename(local_key): status['key_name'] = key_info['key_name'] status['exists'] = os.path.exists(local_key) log.info('local key: %s' % local_key) if op_type == 'edit': log.info('in operation: -----> edit') # size = os.path.getsize(local_key) # md5 = utils.get_md5(local_key) md5_local = key_info['md5_local'] md5_on_s3 = key_info['md5_on_s3'] if md5_local == md5_on_s3: status['md5_matched'] = True else: status['md5_matched'] = False else: if status['exists']: size = os.path.getsize(local_key) md5 = utils.get_md5(local_key) if size == key_info['size']: status['size_matched'] = True else: status['size_matched'] = False if md5 == key_info['md5_on_s3']: status['md5_matched'] = True log.info(key_info['md5_on_s3']) log.info(md5) else: status['md5_matched'] = False log.info('status of this key: %s' % status) kstatus.append(status) [log.info('%s \n' % ks) for ks in kstatus] return kstatus
def verify_nfs(self, mount_point, op_type=None): time.sleep(300) # sleep for 300 secs kstatus = [] fp = FileOps(self.json_fname, type="json") json_data = fp.get_data() buckets_info = json_data["buckets"] for bucket_name, key in list(buckets_info.items()): log.info("got bucket_name: %s" % bucket_name) local_bucket = os.path.abspath(os.path.join(mount_point, bucket_name)) print("local bucket: --------------- %s" % local_bucket) for key_info in key["keys"]: log.info("verifying key: %s" % key_info["key_name"]) status = {} # status['bucket_name'] = bucket_name local_key = os.path.join(local_bucket, key_info["key_name"]) if key_info["key_name"] in os.path.basename(local_key): status["key_name"] = key_info["key_name"] status["exists"] = os.path.exists(local_key) log.info("local key: %s" % local_key) if op_type == "edit": log.info("in operation: -----> edit") # size = os.path.getsize(local_key) # md5 = utils.get_md5(local_key) md5_local = key_info["md5_local"] md5_on_s3 = key_info["md5_on_s3"] if md5_local == md5_on_s3: status["md5_matched"] = True else: status["md5_matched"] = False else: if status["exists"]: size = os.path.getsize(local_key) md5 = utils.get_md5(local_key) if size == key_info["size"]: status["size_matched"] = True else: status["size_matched"] = False if md5 == key_info["md5_on_s3"]: status["md5_matched"] = True log.info(key_info["md5_on_s3"]) log.info(md5) else: status["md5_matched"] = False log.info("status of this key: %s" % status) kstatus.append(status) [log.info("%s \n" % ks) for ks in kstatus] return kstatus
def put(self, filename, test_op_code="create"): log.debug("function: %s" % self.put.__name__) log.info("upload of file: %s" % filename) """ :param filename: filename i.e along with location :return: dictionary, args: 1. status: True for successful upload or False for failed upload, 2. msgs : error messages """ try: self.key.set_contents_from_filename(filename) md5_on_s3 = self.key.etag.replace('"', "") key_details = { "key_name": self.key.key, "size": os.stat(filename).st_size, "md5_local": utils.get_md5(filename), "md5_on_s3": md5_on_s3, "opcode": { "edit": {"new_md5": None}, "move": {"new_name": None}, "delete": {"deleted": None}, }, } self.jkey.add(self.key.bucket.name, **key_details) self.add_io_info.add_keys_info( self.key.bucket.connection.access_key, self.key.bucket.name, **{ "key_name": self.key.key, "size": os.stat(filename).st_size, "md5_on_s3": md5_on_s3, "upload_type": "normal", "test_op_code": test_op_code, } ) upload_status = {"status": True} except (exception.BotoClientError, exception.S3ResponseError) as e: log.error(e) upload_status = {"status": False, "msgs": e} return upload_status
def operation_on_nfs(self, mount_point, op_code): time.sleep(300) # sleep for 300 secs before operations start opstatus = [] status = {} log.info('operation started-------------- : %s' % op_code) fp = FileOps(self.json_fname, type='json') json_data = fp.get_data() buckets_info = json_data['buckets'] for bucket_name, key in list(buckets_info.items()): log.info('got bucket_name: %s' % bucket_name) local_bucket = os.path.abspath( os.path.join(mount_point, bucket_name)) print('local bucket: --------------- %s' % local_bucket) local_keys = utils.get_all_in_dir(local_bucket) log.info('local key: %s' % local_bucket) log.info('local keys: %s' % local_keys) for key_info in key['keys']: local_key = os.path.join(local_bucket, key_info['key_name']) if key_info['is_type'] == 'file': log.info('operation on key: %s' % key_info['key_name']) log.info('local key: ------------------ %s' % local_key) if op_code == 'move': status['bucket_name'] = bucket_name status['key_name'] = key_info['key_name'] status['op_code'] = op_code new_key_path = local_key + ".moved" new_name = key_info['key_name'] + ".moved" cmd = 'sudo mv %s %s' % (os.path.abspath(local_key), os.path.abspath(new_key_path)) log.info('cmd_to_move: %s' % cmd) time.sleep(5) ret_val = os.system(cmd) if ret_val == 0: key_info['opcode']['move']['old_name'] = key_info[ 'key_name'] key_info['key_name'] = new_name fp.add_data(json_data) status['op_code_status'] = True else: log.info('move failed: %s' % local_key) status['op_code_status'] = False if op_code == 'edit': try: log.info('editing file: %s' % local_key) key_modify = open(local_key, 'a+') key_modify.write( 'file opened from NFS and added this messages') key_modify.close() key_info['opcode']['edit'][ 'new_md5'] = utils.get_md5( os.path.abspath(local_key)) key_info['md5_local'] = utils.get_md5( os.path.abspath(local_key)) key_info['md5_on_s3'] = None status['op_code_status'] = True except Exception as e: log.info('could not edit') log.error(e) status['op_code_status'] = False if op_code == 'delete': status['bucket_name'] = bucket_name status['key_name'] = key_info['key_name'] status['op_code'] = op_code log.info('deleting key: %s' % key_info['key_name']) # ret_val = os.system('sudo rm -rf %s' % (local_key)) try: os.unlink(local_key) key_info['opcode']['delete']['deleted'] = True fp.add_data(json_data) status['op_code_status'] = True log.info('deleted key: %s' % key_info['key_name']) except (Exception, OSError) as e: log.error('deleting key: %s failed' % key_info['key_name']) key_info['opcode']['delete']['deleted'] = False log.error('delete failed: %s' % local_key) log.error(e) status['op_code_status'] = False opstatus.append(status) [log.info(st) for st in opstatus] return opstatus
def create(self, file_type=None): log.info('in sub dir create') nest_level = self.config['sub_dir_count'] files = self.config['Files'] jkeys = JKeys(self.json_fname) for base_dir in self.base_dir_list: log.info('base_dir name: %s' % base_dir) subdirs = ['dir' + str(i) for i in range(nest_level)] log.info('subdirs to create: %s' % subdirs) for dir in subdirs: nest = os.path.join(base_dir, dir) log.info('creating dir :%s' % nest) os.makedirs(nest) self.created.append(nest) key_name = dir + '/' dir_info = { 'key_name': key_name, 'size': 0, 'md5_matched': None, 'md5_on_s3': None, 'md5_local': None, 'is_type': 'dir', 'opcode': { 'move': { 'old_name': None }, 'delete': { 'deleted': None }, 'edit': { 'new_md5': 0 } } } log.info('sub dir info -------------------------------- \n%s' % dir_info) jkeys.add(os.path.basename(base_dir), **dir_info) for no in range(files['files_in_dir']): fname = os.path.join(nest, 'file' + str(no)) log.info('creating file :%s' % fname) if file_type == 'text': fcreate = 'base64 /dev/urandom | head -c %sM > %s' % ( files['size'], fname) else: fcreate = 'sudo dd if=/dev/urandom of=%s bs=%sM count=1' % ( fname, files['size']) log.info('fcreate command: %s' % fcreate) os.system(fcreate) fname_created = fname.split(base_dir)[1].lstrip('/') file_info = { 'key_name': fname_created, 'size': os.stat(fname).st_size, 'md5_local': utils.get_md5(fname), 'md5_on_s3': None, 'is_type': 'file', 'opcode': { 'move': { 'old_name': None }, 'delete': { 'deleted': None }, 'edit': { 'new_md5': 0 } } } log.info( 'file info -------------------------------- \n%s' % file_info) jkeys.add(os.path.basename(base_dir), **file_info) self.created.append(fname) [log.info('created :%s' % d) for d in self.created] return self.created
def operation_on_nfs(self, mount_point, op_code): time.sleep(300) # sleep for 300 secs before operations start opstatus = [] status = {} log.info("operation started-------------- : %s" % op_code) fp = FileOps(self.json_fname, type="json") json_data = fp.get_data() buckets_info = json_data["buckets"] for bucket_name, key in list(buckets_info.items()): log.info("got bucket_name: %s" % bucket_name) local_bucket = os.path.abspath(os.path.join(mount_point, bucket_name)) print("local bucket: --------------- %s" % local_bucket) local_keys = utils.get_all_in_dir(local_bucket) log.info("local key: %s" % local_bucket) log.info("local keys: %s" % local_keys) for key_info in key["keys"]: local_key = os.path.join(local_bucket, key_info["key_name"]) if key_info["is_type"] == "file": log.info("operation on key: %s" % key_info["key_name"]) log.info("local key: ------------------ %s" % local_key) if op_code == "move": status["bucket_name"] = bucket_name status["key_name"] = key_info["key_name"] status["op_code"] = op_code new_key_path = local_key + ".moved" new_name = key_info["key_name"] + ".moved" cmd = "sudo mv %s %s" % ( os.path.abspath(local_key), os.path.abspath(new_key_path), ) log.info("cmd_to_move: %s" % cmd) time.sleep(5) ret_val = os.system(cmd) if ret_val == 0: key_info["opcode"]["move"]["old_name"] = key_info[ "key_name" ] key_info["key_name"] = new_name fp.add_data(json_data) status["op_code_status"] = True else: log.info("move failed: %s" % local_key) status["op_code_status"] = False if op_code == "edit": try: log.info("editing file: %s" % local_key) key_modify = open(local_key, "a+") key_modify.write( "file opened from NFS and added this messages" ) key_modify.close() key_info["opcode"]["edit"]["new_md5"] = utils.get_md5( os.path.abspath(local_key) ) key_info["md5_local"] = utils.get_md5( os.path.abspath(local_key) ) key_info["md5_on_s3"] = None status["op_code_status"] = True except Exception as e: log.info("could not edit") log.error(e) status["op_code_status"] = False if op_code == "delete": status["bucket_name"] = bucket_name status["key_name"] = key_info["key_name"] status["op_code"] = op_code log.info("deleting key: %s" % key_info["key_name"]) # ret_val = os.system('sudo rm -rf %s' % (local_key)) try: os.unlink(local_key) key_info["opcode"]["delete"]["deleted"] = True fp.add_data(json_data) status["op_code_status"] = True log.info("deleted key: %s" % key_info["key_name"]) except (Exception, OSError) as e: log.error("deleting key: %s failed" % key_info["key_name"]) key_info["opcode"]["delete"]["deleted"] = False log.error("delete failed: %s" % local_key) log.error(e) status["op_code_status"] = False opstatus.append(status) [log.info(st) for st in opstatus] return opstatus
def create(self, file_type=None): log.info("in sub dir create") nest_level = self.config["sub_dir_count"] files = self.config["Files"] jkeys = JKeys(self.json_fname) for base_dir in self.base_dir_list: log.info("base_dir name: %s" % base_dir) subdirs = ["dir" + str(i) for i in range(nest_level)] log.info("subdirs to create: %s" % subdirs) for dir in subdirs: nest = os.path.join(base_dir, dir) log.info("creating dir :%s" % nest) os.makedirs(nest) self.created.append(nest) key_name = dir + "/" dir_info = { "key_name": key_name, "size": 0, "md5_matched": None, "md5_on_s3": None, "md5_local": None, "is_type": "dir", "opcode": { "move": {"old_name": None}, "delete": {"deleted": None}, "edit": {"new_md5": 0}, }, } log.info( "sub dir info -------------------------------- \n%s" % dir_info ) jkeys.add(os.path.basename(base_dir), **dir_info) for no in range(files["files_in_dir"]): fname = os.path.join(nest, "file" + str(no)) log.info("creating file :%s" % fname) if file_type == "text": fcreate = "base64 /dev/urandom | head -c %sM > %s" % ( files["size"], fname, ) else: fcreate = "sudo dd if=/dev/urandom of=%s bs=%sM count=1" % ( fname, files["size"], ) log.info("fcreate command: %s" % fcreate) os.system(fcreate) fname_created = fname.split(base_dir)[1].lstrip("/") file_info = { "key_name": fname_created, "size": os.stat(fname).st_size, "md5_local": utils.get_md5(fname), "md5_on_s3": None, "is_type": "file", "opcode": { "move": {"old_name": None}, "delete": {"deleted": None}, "edit": {"new_md5": 0}, }, } log.info( "file info -------------------------------- \n%s" % file_info ) jkeys.add(os.path.basename(base_dir), **file_info) self.created.append(fname) [log.info("created :%s" % d) for d in self.created] return self.created