def test_load_settings(self, Settings_mock): RGWClientTestCase._mock_settings(Settings_mock) RGWClient._load_settings() # Also test import of awsauth.S3Auth self.assertEqual(RGWClient._host, 'host') self.assertEqual(RGWClient._port, 42) self.assertEqual(RGWClient._ssl, True) self.assertEqual(RGWClient._ADMIN_PATH, 'ADMIN_RESOURCE') self.assertEqual(RGWClient._SYSTEM_USERID, 'USER_ID') instance = RGWClient._user_instances[RGWClient._SYSTEM_USERID] self.assertEqual(instance.userid, 'USER_ID')
def bucket_get(request): params = request.GET.copy() if 'bucket' not in params: raise ValidationError('No bucket parameter provided') response = proxy_view(request, 'bucket') # Get the response content. content = json.loads(response.content) # Check if the bucket is shared via NFS and append the # 'is_referenced' attribute. query = GaneshaExport.objects.filter(path__in=params['bucket']) content['is_referenced'] = bool(query) # Append the 'endpoint' attribute. instance = RGWClient.admin_instance() scheme = 'https' if instance._ssl else 'http' content['endpoint'] = { 's3': '{}://{}.{}'.format(scheme, content['bucket'], instance.service_url), 'swift': '{}://{}/v1/{}/{}'.format(scheme, instance.service_url, content['owner'], content['bucket']) } # Update the response content. response.content = json.dumps(content) return response
def buckets(request): if 'userid' not in request.GET: raise ValidationError('No userid parameter provided') try: return Response({'buckets': RGWClient.instance(request.GET['userid']).get_buckets()}) except RequestException as e: logger.error(e) return Response({'buckets': []})
def check_rgw_admin_permissions(): try: if not RGWClient.admin_instance().is_system_user(): raise UnavailableModule(Reason.RGW_NOT_SYSTEM_USER, None) except RequestException as ex: if ex.status_code and ex.status_code in [401, 403]: raise UnavailableModule(Reason.RGW_NOT_SYSTEM_USER, None) else: raise UnavailableModule(Reason.RGW_HTTP_PROBLEM, str(ex))
def get(self, request): if 'user_id' not in request.GET or \ 'admin_path' not in request.GET or \ 'access_key' not in request.GET or \ 'secret_key' not in request.GET or \ 'host' not in request.GET or \ 'port' not in request.GET or \ 'use_ssl' not in request.GET: raise ValidationError( '"host", "port", "user_id", "admin_path", "access_key", ' '"secret_key", and "use_ssl" params are required') if not all((request.GET['user_id'], request.GET['admin_path'], request.GET['access_key'], request.GET['secret_key'], request.GET['host'], request.GET['port'], request.GET['use_ssl'])): return Response({ 'success': False, 'message': 'Configuration incomplete' }) rgwclient = RGWClient(request.GET['user_id'], request.GET['access_key'], request.GET['secret_key'], request.GET['host'], request.GET['port'], request.GET['admin_path'], request.GET['use_ssl'] == 'true') response = _check_rest_client_connection(rgwclient) if not response['success']: return Response(response) try: if rgwclient.is_system_user(): return Response({'success': True}) except RequestException: pass return Response({ 'success': False, 'message': 'Non system user capabilities, or wrong admin resource path' })
def proxy_view(request, path): try: result = RGWClient.admin_instance().proxy(request.method, path, request.GET.copy(), request.body) return HttpResponse(result, status=200) except RequestException as e: if not e.status_code: raise Exception(str(e)) return HttpResponse(e.content, status=e.status_code) except RGWClient.NoCredentialsException: return NoCredentialsResponse()
def fsals(request): res = DeepSea.instance().nfs_get_fsals_available() if 'CEPH' in res: if not CephFSUtil: res = [f for f in res if f != 'CEPH'] else: cluster = FsidContext(request=request, module_name='ceph_nfs').cluster try: if not CephFSUtil.instance(cluster).status(): res = [f for f in res if f != 'CEPH'] except libcephfs.PermissionError: res = [f for f in res if f != 'CEPH'] if 'RGW' in res: try: if not RGWClient.admin_instance().is_service_online(): res = [f for f in res if f != 'RGW'] if not RGWClient.admin_instance().is_system_user(): res = [f for f in res if f != 'RGW'] except (RGWClient.NoCredentialsException, RequestException): res = [f for f in res if f != 'RGW'] return Response({'fsals': res})
def check_deepsea_nfs_api(fsid): def map_status_code(status_code, resp_content): _table = { '401': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '403': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '500': (Reason.DEEPSEA_NFS_RUNNER_ERROR, resp_content) } if str(status_code) not in _table: raise UnavailableModule( Reason.DEEPSEA_NFS_UNKNOWN_PROBLEM, "DeepSea server returned status_code={}, content=\n{}".format( status_code, resp_content)) raise UnavailableModule(*_table[str(status_code)]) try: hosts = DeepSea.instance().nfs_get_hosts() if not hosts: raise UnavailableModule(Reason.DEEPSEA_NFS_NO_HOSTS, "No hosts found with ganesha role") fsals = DeepSea.instance().nfs_get_fsals_available() if not fsals: raise UnavailableModule(Reason.DEEPSEA_NFS_NO_FSALS, "No fsals supported by this cluster") if fsals == ['CEPH']: check_cephfs_api(fsid) elif fsals == ['RGW']: try: if not RGWClient.admin_instance().is_service_online(): raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, None) if not RGWClient.admin_instance().is_system_user(): raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, None) except (RGWClient.NoCredentialsException, RequestException) as e: raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, str(e)) except RequestException as e: if e.status_code: return map_status_code(e.status_code, e.content) raise UnavailableModule(Reason.DEEPSEA_NFS_UNKNOWN_PROBLEM, str(e))
def bucket_create(request): try: params = request.GET.copy() if 'bucket' not in params: raise ValidationError('No bucket parameter provided') if 'uid' not in params: raise ValidationError('No uid parameter provided') result = RGWClient.instance(params['uid']).create_bucket(params['bucket']) return HttpResponse(result, status=200) except RequestException as e: if not e.status_code: raise Exception(str(e)) return HttpResponse(e.content, status=e.status_code) except RGWClient.NoCredentialsException: return NoCredentialsResponse()
def check_rgw_connection(): def raise_for_error_number(errno): table = { '111': Reason.RGW_CONNECTION_REFUSED, '-2': Reason.RGW_UNKNOWN_HOST, '110': Reason.RGW_CONNECTION_TIMEOUT, '113': Reason.RGW_NO_ROUTE_TO_HOST, } raise UnavailableModule( table[errno] if errno in table else Reason.RGW_CONN_UNKNOWN_PROBLEM, None) def raise_for_status_code(status_code, message=None): table = { '401': Reason.RGW_FAILED_AUTHENTICATION, '403': Reason.RGW_FAILED_AUTHENTICATION, '500': Reason.RGW_INTERNAL_SERVER_ERROR, } if str(status_code) not in table: raise UnavailableModule( Reason.RGW_HTTP_PROBLEM, "RGW server returned status_code={}".format(status_code)) raise UnavailableModule(table[str(status_code)], message) try: online = RGWClient.admin_instance().is_service_online() if not online: raise UnavailableModule(Reason.RGW_HTTP_PROBLEM, "Unexpected RGW response output") except RequestException as ex: if ex.conn_errno and ex.conn_strerror: raise_for_error_number(ex.conn_errno) elif ex.status_code: raise_for_status_code( ex.status_code, ex.content if ex.status_code == 500 else None) raise UnavailableModule(Reason.RGW_HTTP_PROBLEM, str(ex))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): context = GaneshaExport.objects.nodb_context cluster = context.cluster self.path = self.path.strip() if self.fsal == 'CEPH' and self.path[-1] == '/' and len(self.path) > 1: self.path = self.path[:-1] export_models = [e for e in self.__class__.objects.all()] if hasattr(self, 'id'): export_models = [e for e in export_models if e.id != self.id] self._validate([e for e in export_models if e.host == self.host], cluster) try: rgw_is_online = RGWClient.admin_instance().is_service_online() except (RGWClient.NoCredentialsException, RequestException): rgw_is_online = False if self.fsal == 'RGW' and not rgw_is_online: raise Exception( "RGW REST service is not online, please check if service is running " "and openATTIC configuration settings") if self.fsal == 'CEPH' and not CephFSUtil.instance(cluster).dir_exists( self.path): cephfs = get_dbus_object("/cephfs") cephfs.cephfs_mkdirs(cluster.fsid, self.path) elif self.fsal == 'RGW': rgw = RGWClient.instance(self.rgwUserId) try: exists = rgw.bucket_exists(self.path, self.rgwUserId) logger.debug( 'Checking existence of RGW bucket "%s" for user "%s": %s', self.path, self.rgwUserId, exists) except RequestException as exp: if exp.status_code == 403: raise Exception( 'Bucket "{}" already exists, and belongs to other user.' .format(self.path)) else: raise exp if not exists: logger.info('Creating new RGW bucket "%s" for user "%s"', self.path, self.rgwUserId) rgw.create_bucket(self.path) if not hasattr(self, 'exportId'): # new export case setattr( self, 'exportId', GaneshaExport._gen_export_id( [e for e in export_models if e.host == self.host])) setattr(self, 'id', '{}:{}'.format(self.host, self.exportId)) logger.info("Creating new export host=%s exportId=%s path=%s", self.id, self.exportId, self.path) else: # update export case # update id and exportId due to host change if self.id.split(':')[0] != self.host: self.exportId = GaneshaExport._gen_export_id( [e for e in export_models if e.host == self.host]) self.id = '{}:{}'.format(self.host, self.exportId) status = DeepSea.instance().nfs_status_exports() GaneshaExport.save_exports(export_models + [self], []) if status[self.host]['active']: async_deploy_exports.delay(self.host) super(GaneshaExport, self).save(force_insert, force_update, using, update_fields)
def save_exports(export_models, empty_hosts): try: rgw_is_online = RGWClient.admin_instance().is_service_online() except (RGWClient.NoCredentialsException, RequestException): rgw_is_online = False for e in export_models: # fetch keys for all RGW exports if e.fsal == 'RGW' and rgw_is_online: keys = RGWClient.admin_instance().get_user_keys(e.rgwUserId) setattr(e, 'rgwAccessKey', keys['access_key']) setattr(e, 'rgwSecretKey', keys['secret_key']) hosts_exports = [] export_ids = [] for e in export_models: export_ids.append(e.id) host_exports_list = [ ht for ht in hosts_exports if ht['host'] == e.host ] if not host_exports_list: host_exports_list = [{'host': e.host, 'exports': []}] hosts_exports.append(host_exports_list[0]) exports = host_exports_list[0]['exports'] export = { 'export_id': e.exportId, 'path': e.path, 'pseudo': e.pseudo, 'tag': e.tag, 'access_type': e.accessType, 'squash': e.squash, 'protocols': [int(p[4:]) for p in e.protocols], 'transports': e.transports, 'fsal': { 'name': e.fsal }, 'client_blocks': e.clientBlocks if e.clientBlocks else [] } for client_block in export['client_blocks']: if 'accessType' in client_block: client_block['access_type'] = client_block['accessType'] client_block.pop('accessType') if e.fsal == 'RGW': # get userId RGW credentials from rgw module export['fsal']['user_id'] = e.rgwUserId if hasattr(e, 'rgwAccessKey') and hasattr(e, 'rgwSecretKey'): export['fsal']['access_key_id'] = e.rgwAccessKey export['fsal']['secret_access_key'] = e.rgwSecretKey exports.append(export) for host in empty_hosts: hosts_exports.append({'host': host, 'exports': []}) result = DeepSea.instance().nfs_save_exports(json.dumps(hosts_exports)) if not result['success']: logger.error("Error saving NFS-Ganesha exports: %s", result['message']) raise Exception( 'DeepSea Error: saving NFS-Ganesha exports\n{}'.format( result['message'])) else: logger.info("NFS Ganesha exports saved successfully: %s", export_ids)