def set_bucket_quota(self, uid, max_objects): cmd = 'radosgw-admin quota set --uid=%s --quota-scope=bucket --max-objects=%s' % ( uid, max_objects) status = utils.exec_shell_cmd(cmd) if not status[0]: raise AssertionError(status[1]) log.info('quota set complete')
def create_admin_user(self, username, displayname, cluster_name='ceph'): try: add_io_info = AddIOInfo() cmd = 'radosgw-admin user create --uid=%s --display-name=%s --cluster %s' % ( username, displayname, cluster_name) log.info('cmd: %s' % cmd) variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) v = variable.stdout.read() v_as_json = json.loads(v) # log.info(v_as_json) user_details = {} user_details['user_id'] = v_as_json['user_id'] user_details['display_name'] = v_as_json['display_name'] user_details['access_key'] = v_as_json['keys'][0]['access_key'] user_details['secret_key'] = v_as_json['keys'][0]['secret_key'] add_io_info.add_user_info( **{ 'user_id': user_details['user_id'], 'access_key': user_details['access_key'], 'secret_key': user_details['secret_key'] }) return user_details except subprocess.CalledProcessError as e: error = e.output + str(e.returncode) log.error(error) return False
def verify_s3(self): fp = FileOps(self.json_fname, type='json') json_data = fp.get_data() buckets_list = json_data['buckets'].keys() bstatus = [] for each_bucket in buckets_list: log.info('getting bucket info for base dir: %s' % each_bucket) status = {} info = self.bucket.get(each_bucket) if not info['status']: status['exists'] = False else: status['exists'] = True status['bucket_name'] = info['bucket'] bstatus.append(status) log.info('bucket verification status :\n') [log.info('%s \n' % bs) for bs in bstatus] return bstatus
def delete_keys(self, delete_bucket=True): log.info('deleted buckets with keys') for bucket_name in self.bucket_names: log.info('ops on bucket name: %s' % bucket_name) bucket = self.bucket_ops.get(bucket_name) all_keys_in_bucket = bucket['bucket'].list() if all_keys_in_bucket: log.info('got all keys in bucket: %s' % all_keys_in_bucket) key_op = KeyOp(bucket['bucket']) log.info('delete of all keys') keys_deleted = key_op.multidelete_keys(all_keys_in_bucket) if keys_deleted is None: log.error('key not deleted') raise AssertionError log.info('all keys deleted')
def get(self, bucket_name, json_file=None): log.debug("function: %s" % self.get.__name__) log.info("in get bucket") """ :param bucket_name:string :rtype: dict :return: get_bucket_stack: args: status: True, if got bucket or False, no get bucket attribs: bucket object msgs: error messges """ try: bucket = self.connection.get_bucket(bucket_name) if json_file is not None: add_bucket_to_json = JBucket(json_file) add_bucket_to_json.add(bucket_name) get_bucket_stack = {"status": True, "bucket": bucket} except (exception.S3ResponseError, exception.AWSConnectionError) as e: log.error(e) get_bucket_stack = {"status": False, "msgs": e} return get_bucket_stack
def refresh_json_data(self): log.info("loading / refreshing json file") json_data = self.get_data() self.total_parts_count = json_data["total_parts"] self.remaining_file_parts = json_data["remaining_parts"] self.key_name = json_data["key_name"] self.mp_id = json_data["mp_id"]
def check_if_bucket_empty(bucket): log.debug("function: %s" % check_if_bucket_empty.__name__) log.info("checking if bucket is empty") """ :param bucket: bucket object :rtype: dict :return: check_for_empty_stack: args: 1.contents: empty list ( [] ) or list of buckets 2.msgs: error messages """ try: bucket_contents = bucket.list() check_for_empty_stack = {"contents": bucket_contents} except (exception.S3ResponseError, exception.AWSConnectionError) as e: log.error(e) check_for_empty_stack = {"contents": [], "msgs": e} return check_for_empty_stack
def set_user_grant(self, bucket, grants): """ :param acls: send acls in form of {'permission' : <permission type>, 'user_id' : canonical_user_id, 'recursive' : bool } persmission type : (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL) :param bucket: buckect object """ if grants is not None: try: log.debug('setting grants %s' % grants) bucket.add_user_grant(permission=grants['permission'], user_id=grants['user_id'], recursive=grants['recursive']) acp = bucket.get_acl() for grant in acp.acl.grants: log.info('grants set: %s on %s' % (grant.permission, grant.id)) return True except (exception.S3ResponseError, exception.BotoClientError) as e: log.error(e) return False else: log.info('not setting any acls')
def delete(self, bucket_name): log.debug('function: %s' % self.delete.__name__) log.info('in delete bucket') """ :param bucket_name: string :rtype: dict :return: delete_bucket_stack: args: status: True, if bucket is deleted or False if not deleted msgs: error messages """ try: self.connection.delete_bucket(bucket_name) delete_bucket_stack = {'status': True} except exception.S3ResponseError, e: log.error(e) delete_bucket_stack = {'status': False, 'msgs': e}
def create_admin_user(self, username, displayname, cluster_name="ceph"): try: add_io_info = AddIOInfo() cmd = ( 'radosgw-admin user create --uid="%s" --display-name="%s" --cluster %s' % (username, displayname, cluster_name)) log.info("cmd: %s" % cmd) variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) v = variable.stdout.read() v_as_json = json.loads(v) # log.info(v_as_json) user_details = {} user_details["user_id"] = v_as_json["user_id"] user_details["display_name"] = v_as_json["display_name"] user_details["access_key"] = v_as_json["keys"][0]["access_key"] user_details["secret_key"] = v_as_json["keys"][0]["secret_key"] add_io_info.add_user_info( **{ "user_id": user_details["user_id"], "access_key": user_details["access_key"], "secret_key": user_details["secret_key"], }) return user_details except subprocess.CalledProcessError as e: error = e.output + str(e.returncode) log.error(error) return False
def test_exec_read(config): grants = {'permission': 'READ', 'user_id': None, 'recursive': True} test_info = AddTestInfo( 'Test with read permission on buckets for all users') add_io_info = AddIOInfo() add_io_info.initialize() try: # test case starts test_info.started_info() all_user_details = rgw_lib.create_users(config.user_count) user1 = all_user_details[0] log.info('user1: %s' % user1) all_user_details.pop(0) u1 = ObjectOps(config, user1) for each_user in all_user_details: u2 = ObjectOps(config, each_user) u2_canonical_id = u2.canonical_id log.info('canonical id of u2: %s' % u2_canonical_id) grants['user_id'] = u2_canonical_id u1.grants = None u1.create_bucket() u1.set_bucket_properties() u2.bucket_names = u1.bucket_names u2.buckets_created = u1.buckets_created u2.grants = None u2.set_bucket_properties() # set permissions and read u1.grants = grants u1.set_bucket_properties() u2.bucket_names = u1.bucket_names u2.buckets_created = u1.buckets_created u2.grants = None u2.set_bucket_properties() test_info.success_status('test completed') except AssertionError, e: log.error(e) test_info.failed_status('test failed: %s' % e) sys.exit(1)
def __init__(self, name): log.debug('class: %s' % self.__class__.__name__) self.name = name log.info('process_name: %s' % self.name) self.process = None
def list_all_buckets(connection): log.debug("function: %s" % list_all_buckets.__name__) log.info("listing all buckets") """ :param connection: AWS authentication connection :rtype: dict :return: list_buckets_stack: args: 1.attribs: list of all buckets or None 2. msgs: error messages """ try: all_buckets = connection.get_all_buckets() list_buckets_stack = {"all_buckets": all_buckets} except (exception.S3ResponseError, exception.AWSConnectionError) as e: log.error(e) list_buckets_stack = {"all_buckets": None, "msgs": e} return list_buckets_stack
def delete(self, key_name, version_id=None): log.debug('function: %s' % self.delete.__name__) log.debug('in delete key %s:' % key_name) """ :param key_name: string :return: deleted key object.. or None try to check delete_marker was created for this delete. """ try: key_deleted = self.bucket.delete_key(key_name, version_id=version_id) log.info('key_name: %s' % key_name) log.info('version_id: %s' % version_id) return key_deleted except (exception.BotoClientError, exception.S3ResponseError), e: log.error(e) return None
def multipart_upload(self, buckets_created): object_size = self.objects_size_range min_object_size = object_size['min'] max_object_size = object_size['max'] for bucket in buckets_created: for object_count in range(self.objects_count): key_name = bucket.name + "." + str(object_count) + ".key" + ".mpFile" if not os.path.exists(key_name): size = utils.get_file_size(min_object_size, max_object_size) log.info('size of the file to create %s' % size) log.info('file does not exists, so creating the file') filename = utils.create_file(key_name, size) else: log.info('file exists') filename = os.path.abspath(key_name) md5 = utils.get_md5(filename) log.info('got filename %s' % filename) log.debug('got file dirname %s' % os.path.dirname(filename)) json_file = os.path.join(os.path.dirname(filename), os.path.basename(filename) + ".json") log.info('json_file_name %s' % json_file) multipart = MultipartPut(bucket, filename) multipart.break_at_part_no = self.break_upload_at_part_no multipart.cancel_multpart = self.set_cancel_multipart multipart.iniate_multipart(json_file) put = multipart.put() print(put['status']) if not put['status']: raise AssertionError(put['msgs'])
def ganesha_restart(self): log.info('restarting ganesha services') # self.ganesha_stop() # self.ganesha_start() log.info('restarting services using systemctl') cmd = 'sudo systemctl restart nfs-ganesha' utils.exec_shell_cmd(cmd)
def check_contents(self): log.debug('function: %s' % self.check_contents.__name__) log.info('checking contents or getting the string val') """ can also be used for getting the contents. i.e download :return: string_exists_status (dictionary): args: 1. status: True 2. contents: contents of string 3. msgs: error messages """ try: string_contents = self.key.get_contents_as_string() string_exists_status = { 'status': True, 'contents': string_contents } except exception.BotoClientError, e: log.error(e) string_exists_status = {'status': False, 'msgs': e}
def test_exec(config): test_info = AddTestInfo('multipart Upload') add_io_info = AddIOInfo() add_io_info.initialize() try: # test case starts test_info.started_info() all_user_details = rgw_lib.create_users(config.user_count) log.info('multipart upload enabled') for each_user in all_user_details: config.objects_count = 1 rgw = ObjectOps(config, each_user) buckets = rgw.create_bucket() rgw.multipart_upload(buckets) test_info.success_status('test completed') sys.exit(0) except AssertionError, e: log.error(e) test_info.failed_status('test failed: %s' % e) sys.exit(1)
def ganesha_restart(self): log.info("restarting ganesha services") # self.ganesha_stop() # self.ganesha_start() log.info("restarting services using systemctl") cmd = "sudo systemctl restart nfs-ganesha" utils.exec_shell_cmd(cmd)
def do_auth(self): log.debug('function: %s' % self.do_auth.__name__) try: log.info('got the credentials') # conn = S3Connection(self.ak, self.sk) self.dump_to_json_upload() self.dump_to_json_download() conn = boto.connect_s3( aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, host=self.hostname, port=self.port, is_secure=self.is_secure, calling_format=boto.s3.connection.OrdinaryCallingFormat()) log.info('acess_key %s\nsecret_key %s' % (self.access_key, self.secret_key)) auth_stack = { 'status': True, 'conn': conn, 'upload_json_file': self.json_file_upload, 'download_json_file': self.json_file_download } except (boto.s3.connection.HostRequiredError, exception.AWSConnectionError, Exception), e: log.error('connection failed') log.error(e) auth_stack = {'status': False, 'msgs': e}
def refresh_json_data(self): log.info('loading / refreshing json file') json_data = self.get_data() self.total_parts_count = json_data['total_parts'] self.remaining_file_parts = json_data['remaining_parts'] self.key_name = json_data['key_name'] self.mp_id = json_data['mp_id']
def delete(self, bucket_name): log.debug("function: %s" % self.delete.__name__) log.info("in delete bucket") """ :param bucket_name: string :rtype: dict :return: delete_bucket_stack: args: status: True, if bucket is deleted or False if not deleted msgs: error messages """ try: self.connection.delete_bucket(bucket_name) delete_bucket_stack = {"status": True} except exception.S3ResponseError as e: log.error(e) delete_bucket_stack = {"status": False, "msgs": e} return delete_bucket_stack
def ganesha_start(self): log.info('starting nfs-ganesha services') # cmd = 'sudo /usr/bin/ganesha.nfsd -f /etc/ganesha/ganesha.conf' # utils.exec_shell_cmd(cmd) cmd = 'sudo systemctl enable nfs-ganesha ' utils.exec_shell_cmd(cmd) cmd = 'sudo systemctl start nfs-ganesha ' utils.exec_shell_cmd(cmd)
def get(self, filename): log.debug('function: %s' % self.get.__name__) log.info('getting the contents of file %s:' % self.key) log.info('download or get the file to filename: %s' % filename) """ :param: filename: mention the filename which will be used to get the contents from s3 to this file. can be different from the original filename :return: dictionary, args: 1. status: True for successful download or False for failed download, 2. msgs : error messages """ try: self.key.get_contents_to_filename(filename) md5_on_s3 = self.key.etag.replace('"', '') md5_local = utils.get_md5(filename) if md5_on_s3 == md5_local: md5_match = "match" else: md5_match = "no match" key_details = { 'key_name': os.path.basename(filename), 'key_name_os_s3': self.key.name, 'size': os.stat(filename).st_size, 'md5_local': md5_local, 'md5_on_s3': md5_on_s3, 'md5_match': md5_match, 'opcode': { "edit": { "new_md5": None }, "move": { "new_name": None }, "delete": { "deleted": None } } } self.jkey.add(self.key.bucket.name, **key_details) download_status = {'status': True} except (exception.BotoClientError, exception.S3ResponseError, Exception), e: log.error(e) download_status = {'status': False, 'msgs': e}
def kernel_stop(self): log.info('stopping nfs kernel services') cmd = 'systemctl stop nfs-server.service' utils.exec_shell_cmd(cmd) cmd = 'systemctl disable nfs-server.service' utils.exec_shell_cmd(cmd)
def enable_bucket_quota(self, uid): cmd = 'radosgw-admin quota enable --quota-scope=bucket --uid=%s' % uid status = utils.exec_shell_cmd(cmd) if not status[0]: raise AssertionError, status[1] log.info('quota set complete')
def create_json_data(self): log.info("creating json data") json_data = { "mp_id": self.mp_id, "key_name": self.key_name, "total_parts": self.total_parts_count, "bucket_name": self.bucket_name, "remaining_parts": self.remaining_file_parts, } return json_data
def delete_bucket(self): for bucket_name in self.bucket_names: bucket_deleted = self.bucket_ops.delete(bucket_name) if not bucket_deleted['status']: raise AssertionError, bucket_deleted['msgs'] log.info('bucket deleted')
def backup(self, uname): """ backup existing config """ original_fname = os.path.join(self.conf_path, self.fname) log.info('original file name: %s' % original_fname) backup_fname = os.path.join(str(self.conf_path), str(self.fname) + '.%s' % uname + '.bkp') log.info('backup file name: %s' % backup_fname) cmd = 'sudo mv %s %s' % (original_fname, backup_fname) utils.exec_shell_cmd(cmd)
def create_json_data(self): log.info('creating json data') json_data = { 'mp_id': self.mp_id, 'key_name': self.key_name, 'total_parts': self.total_parts_count, 'bucket_name': self.bucket_name, 'remaining_parts': self.remaining_file_parts } return json_data