def test_deepsea_pillar_items_success(self): with mock.patch("requests.Session") as mock_requests_session: resp = mock.MagicMock() resp.ok = True resp.status_code = 200 resp.json.return_value = { 'return': [{ 'minion1': { 'roles': ['storage', 'mon', 'igw'], 'public_network': '10.1.0.0/24', 'cluster_network': '10.1.0.0/24', 'fsid': 'aaabbb', }, 'minion2': { 'roles': ['storage', 'rgw'], 'public_network': '10.1.0.0/24', 'cluster_network': '10.1.0.0/24', 'fsid': 'aaabbb', } }] } mock_requests_session().post.side_effect = [self._login_resp, resp] api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') res = api.pillar_items() self.assertTrue(mock_requests_session().post.called) self.assertTrue(api._is_logged_in()) self.assertEqual( res, { 'minion1': { 'roles': ['storage', 'mon', 'igw'], 'public_network': '10.1.0.0/24', 'cluster_network': '10.1.0.0/24', 'fsid': 'aaabbb', }, 'minion2': { 'roles': ['storage', 'rgw'], 'public_network': '10.1.0.0/24', 'cluster_network': '10.1.0.0/24', 'fsid': 'aaabbb', } })
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): self._validate() targets = [] old_target = None for target in self.__class__.objects.all(): if target.targetId == self.targetId: old_target = target continue targets.append(target) if self.newTargetId and self.newTargetId != self.targetId: same_target_id = [ t for t in targets if t.targetId == self.newTargetId ] if same_target_id: raise Exception('Target ID: "{}" already exists'.format( self.target_id)) self.targetId = self.newTargetId targets.append(self) lrbd = LRBDUi(targets) if DeepSea.instance().iscsi_save(lrbd.lrbd_conf_json()): logger.info("Saved iSCSI Targets:\n%s", [t.targetId for t in targets]) else: logger.info("Failed to save iSCSI Targets") status = DeepSea.instance().iscsi_status() if status: minions = iSCSITarget.extract_hostnames(self.portals) if old_target: minions = minions.union( iSCSITarget.extract_hostnames(old_target.portals)) task = tasks.async_deploy_exports.delay(list(minions)) logger.info("Scheduled deploy of iSCSI exports: taskqueue_id=%s", task.id) super(iSCSITarget, self).save(force_insert, force_update, using, update_fields)
def test_deepsea_service_offline_request_error(self): with mock.patch("requests.Session") as mock_requests_session: resp_post = mock.MagicMock() resp_post.ok = True resp_post.status_code = 200 resp_post.json.return_value = {'return': [{'token': 'validtoken'}]} mock_requests_session().post.return_value = resp_post resp = mock.MagicMock() resp.ok = False resp.status_code = 404 mock_requests_session().get.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') with self.assertRaises(RequestException): api.is_service_online() self.assertTrue(mock_requests_session().get.called) self.assertTrue(api._is_logged_in())
def get_all_objects(context, query): # currently context.fsid will be ignored because DeepSea still # does not support multiple Ceph clusters interfaces = DeepSea.instance().iscsi_interfaces() return [ iSCSIInterface(**iSCSIInterface.make_model_args(i)) for i in interfaces ]
def test_deepsea_login_response_format_error(self): with mock.patch("rest_client.TimeoutRequestsSession") as mock_requests_session: resp = mock.MagicMock() resp.ok = True resp.status_code = 200 resp.json.return_value = { 'return': {'invalidtoken': 'validtoken'} } mock_requests_session().post.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') with self.assertRaises(BadResponseFormatException): api._login() self.assertTrue(mock_requests_session().post.called) self.assertEqual(mock_requests_session().post.call_args[1]['data'], {'username': '******', 'password': '******', 'eauth': 'auto'}) self.assertFalse(api._is_logged_in()) self.assertEqual(api.token, None)
def test_deepsea_login_success(self): with mock.patch("requests.Session") as mock_requests_session: resp = mock.MagicMock() resp.ok = True resp.status_code = 200 resp.json.return_value = {'return': [{'token': 'validtoken'}]} mock_requests_session().post.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') api._login() self.assertTrue(mock_requests_session().post.called) self.assertEqual(mock_requests_session().post.call_args[1]['data'], { 'username': '******', 'password': '******', 'eauth': 'auto' }) self.assertTrue(api._is_logged_in()) self.assertEqual(api.token, 'validtoken')
def bulk_delete(self, request, *args, **kwargs): targets = iSCSITarget.objects.all() status = DeepSea.instance().iscsi_status() if 'targetIds' not in request.DATA or not isinstance( request.DATA['targetIds'], list): logger.error("JSON input is not an array") raise Exception() targets_to_delete = [tid.strip() for tid in request.DATA['targetIds']] new_targets = [ t for t in targets if t.targetId not in targets_to_delete ] if len(new_targets) == len(targets): logger.info("No iSCSI target deleted") return Response() lrbd = LRBDUi(new_targets) if DeepSea.instance().iscsi_save(lrbd.lrbd_conf_json()): logger.info("Deleted iSCSI targets %s", targets_to_delete) else: logger.info("Failed to delete iSCSI targets %s", targets_to_delete) raise Exception('Failed to delete iSCSI targets') if new_targets and status: portals_to_deploy_lists = [ t.portals for t in targets if t.targetId in targets_to_delete ] portals_to_deploy = [ val for sublist in portals_to_deploy_lists for val in sublist ] minions = iSCSITarget.extract_hostnames(portals_to_deploy) task = tasks.async_deploy_exports.delay(list(minions)) logger.info("Scheduled deploy of iSCSI exports: taskqueue_id=%s", task.id) elif status: task = tasks.async_stop_exports.delay() logger.info("Scheduled stop of iSCSI: taskqueue_id=%s", task.id) return Response()
def test_deepsea_service_online(self): with mock.patch("rest_client.TimeoutRequestsSession") as mock_requests_session: resp_post = mock.MagicMock() resp_post.ok = True resp_post.status_code = 200 resp_post.json.return_value = { 'return': [{'token': 'validtoken'}] } mock_requests_session().post.return_value = resp_post resp = mock.MagicMock() resp.ok = True resp.status_code = 200 resp.json.return_value = {'return': 'Welcome'} mock_requests_session().get.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') self.assertTrue(api.is_service_online()) self.assertTrue(mock_requests_session().get.called) self.assertTrue(api._is_logged_in())
def test_deepsea_login_fail_401(self): with mock.patch("requests.Session") as mock_requests_session: resp = mock.MagicMock() resp.ok = False resp.status_code = 401 mock_requests_session().post.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') with self.assertRaises(RequestException) as context: api._login() self.assertEqual(context.exception.status_code, 401) self.assertTrue(mock_requests_session().post.called) self.assertEqual(mock_requests_session().post.call_args[1]['data'], { 'username': '******', 'password': '******', 'eauth': 'auto' }) self.assertFalse(api._is_logged_in()) self.assertEqual(api.token, None)
def check_deepsea_version(min_version): message = "Minimum DeepSea version required is {}".format(min_version) try: deepsea_version = DeepSea.instance().get_deepsea_version() if not 'version' in deepsea_version: raise UnavailableModule(Reason.DEEPSEA_OLD_VERSION, message) version = deepsea_version['version'] if not version: raise UnavailableModule(Reason.DEEPSEA_OLD_VERSION, message) if StrictVersion(version) < StrictVersion(min_version): raise UnavailableModule(Reason.DEEPSEA_OLD_VERSION, message) except RequestException: raise UnavailableModule(Reason.DEEPSEA_OLD_VERSION, message)
def get_all_objects(context, query): # currently context.fsid will be ignored because DeepSea still # does not support multiple Ceph clusters config = DeepSea.instance().iscsi_config() targets = [t.to_ui_dict() for t in LRBDConf(config).targets()] for t in targets: t['newTargetId'] = None targets = [ iSCSITarget(**iSCSITarget.make_model_args(t)) for t in targets ] return targets
def check_deepsea_connection(): def map_errno_to_reason(errno): _table = { '111': Reason.DEEPSEA_CONNECTION_REFUSED, '-2': Reason.DEEPSEA_UNKNOWN_HOST, '110': Reason.DEEPSEA_CONNECTION_TIMEOUT, '113': Reason.DEEPSEA_NO_ROUTE_TO_HOST, } raise UnavailableModule( _table[errno] if errno in _table else Reason.DEEPSEA_CONN_UNKNOWN_PROBLEM, None) def map_status_code(status_code, message=None): _table = { '401': Reason.DEEPSEA_FAILED_AUTHENTICATION, '403': Reason.DEEPSEA_FAILED_AUTHENTICATION, '500': Reason.DEEPSEA_INTERNAL_SERVER_ERROR, } if str(status_code) not in _table: raise UnavailableModule( Reason.DEEPSEA_HTTP_PROBLEM, "DeepSea server returned status_code={}".format(status_code)) raise UnavailableModule(_table[str(status_code)], message) if not DeepSea.instance().is_configured(): raise UnavailableModule(Reason.DEEPSEA_INCOMPLETE_CONFIGURATION) try: online = DeepSea.instance().is_service_online() if not online: raise UnavailableModule(Reason.DEEPSEA_HTTP_PROBLEM, "Unexpected DeepSea response output") except RequestException as ex: if ex.conn_errno and ex.conn_strerror: return map_errno_to_reason(ex.conn_errno) elif ex.status_code: return map_status_code( ex.status_code, ex.content if ex.status_code == 500 else None) raise UnavailableModule(Reason.DEEPSEA_HTTP_PROBLEM, str(ex))
def check_deepsea_nfs_api(fsid): def map_status_code(status_code, resp_content): _table = { '401': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '403': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '500': (Reason.DEEPSEA_NFS_RUNNER_ERROR, resp_content) } if str(status_code) not in _table: raise UnavailableModule( Reason.DEEPSEA_NFS_UNKNOWN_PROBLEM, "DeepSea server returned status_code={}, content=\n{}".format( status_code, resp_content)) raise UnavailableModule(*_table[str(status_code)]) try: hosts = DeepSea.instance().nfs_get_hosts() if not hosts: raise UnavailableModule(Reason.DEEPSEA_NFS_NO_HOSTS, "No hosts found with ganesha role") fsals = DeepSea.instance().nfs_get_fsals_available() if not fsals: raise UnavailableModule(Reason.DEEPSEA_NFS_NO_FSALS, "No fsals supported by this cluster") if fsals == ['CEPH']: check_cephfs_api(fsid) elif fsals == ['RGW']: try: if not RGWClient.admin_instance().is_service_online(): raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, None) if not RGWClient.admin_instance().is_system_user(): raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, None) except (RGWClient.NoCredentialsException, RequestException) as e: raise UnavailableModule(Reason.OPENATTIC_NFS_NO_RGW, str(e)) except RequestException as e: if e.status_code: return map_status_code(e.status_code, e.content) raise UnavailableModule(Reason.DEEPSEA_NFS_UNKNOWN_PROBLEM, str(e))
def test_deepsea_keys_login_error(self): with mock.patch("requests.Session") as mock_requests_session: login_resp_err = mock.MagicMock() login_resp_err.ok = False login_resp_err.status_code = 503 mock_requests_session().post.side_effect = [ self._login_resp, login_resp_err ] resp_un = mock.MagicMock() resp_un.ok = False resp_un.status_code = 401 mock_requests_session().get.side_effect = [resp_un] api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') with self.assertRaises(RequestException) as context: api.key_list() self.assertTrue(mock_requests_session().post.called) self.assertTrue(mock_requests_session().get.called) self.assertEqual(context.exception.status_code, 503)
def test_deepsea_iscsi_interfaces(self): with mock.patch( "rest_client.TimeoutRequestsSession") as mock_requests_session: login_resp = mock.MagicMock() login_resp.ok = True login_resp.status_code = 200 login_resp.json.return_value = { 'return': [{ 'token': 'validtoken' }] } rest_iscsi = mock.MagicMock() rest_iscsi.ok = True rest_iscsi.status_code = 200 rest_iscsi.json.return_value = { 'return': [{ 'minion1': ['192.168.10.101', '192.168.121.41'], 'minion2': ['192.168.10.102', '192.168.121.42'] }] } mock_requests_session().post.side_effect = [login_resp, rest_iscsi] api = DeepSea('localhost', 8000, 'auto', 'admin', 'admin') res = api.iscsi_interfaces() self.assertTrue(mock_requests_session().post.called) self.assertTrue(api._is_logged_in()) self.assertEqual( res, [{ 'hostname': 'minion1', 'interfaces': ['192.168.10.101', '192.168.121.41'] }, { 'hostname': 'minion2', 'interfaces': ['192.168.10.102', '192.168.121.42'] }])
def fsals(request): res = DeepSea.instance().nfs_get_fsals_available() if 'CEPH' in res: if not CephFSUtil: res = [f for f in res if f != 'CEPH'] else: cluster = FsidContext(request=request, module_name='ceph_nfs').cluster try: if not CephFSUtil.instance(cluster).status(): res = [f for f in res if f != 'CEPH'] except libcephfs.PermissionError: res = [f for f in res if f != 'CEPH'] if 'RGW' in res: try: if not RGWClient.admin_instance().is_service_online(): res = [f for f in res if f != 'RGW'] if not RGWClient.admin_instance().is_system_user(): res = [f for f in res if f != 'RGW'] except (RGWClient.NoCredentialsException, RequestException): res = [f for f in res if f != 'RGW'] return Response({'fsals': res})
def bulk_delete(self, request, *args, **kwargs): delper = 'ceph_nfs.delete_ganeshaexport' allper = User.objects.get(username=request.user).get_all_permissions() if delper in allper: exports = GaneshaExport.objects.all() hosts = set([e.host for e in exports]) if 'exportIds' not in request.DATA or not isinstance( request.DATA['exportIds'], list): logger.error("JSON input is not an array") raise ValidationError("JSON input is not an array") exports_to_delete = [ eid.strip() for eid in request.DATA['exportIds'] ] logger.info("Deleting exports: %s", exports_to_delete) new_exports = [e for e in exports if e.id not in exports_to_delete] if len(new_exports) == len(exports): logger.info("No Ganesha export deleted") return Response() status = DeepSea.instance().nfs_status_exports() empty_hosts = hosts - (set(e.host for e in new_exports)) GaneshaExport.save_exports(new_exports, empty_hosts) for host in empty_hosts: if status[host]['active']: async_stop_exports.delay(host) for host in set(e.host for e in new_exports): if status[host]['active']: async_deploy_exports.delay(host) return Response() else: return Response( status=403, data={ "detail": "You do not have permission to perform this action." })
def _load_settings(): if all((Settings.RGW_API_HOST, Settings.RGW_API_PORT, Settings.RGW_API_SCHEME, Settings.RGW_API_ADMIN_RESOURCE, Settings.RGW_API_USER_ID, Settings.RGW_API_ACCESS_KEY, Settings.RGW_API_SECRET_KEY)): logger.info("Using local RGW settings to connect to RGW REST API") credentials = { 'host': Settings.RGW_API_HOST, 'port': Settings.RGW_API_PORT, 'scheme': Settings.RGW_API_SCHEME, 'admin_path': Settings.RGW_API_ADMIN_RESOURCE, 'user_id': Settings.RGW_API_USER_ID, 'access_key': Settings.RGW_API_ACCESS_KEY, 'secret_key': Settings.RGW_API_SECRET_KEY } else: try: credentials = DeepSea.instance().get_rgw_api_credentials() if not credentials: logger.error( "DeepSea failed to give the credentials info.") raise RGWClient.NoCredentialsException() logger.info( "Using DeepSea RGW settings to connect to RGW REST API") except RequestException as e: logger.exception(e) raise RGWClient.NoCredentialsException() RGWClient._host = credentials['host'] RGWClient._port = credentials['port'] RGWClient._ssl = credentials['scheme'] == 'https' logger.info("Creating new connection for user: %s", credentials['user_id']) RGWClient._ADMIN_PATH = credentials['admin_path'] RGWClient._SYSTEM_USERID = credentials['user_id'] RGWClient._user_instances[RGWClient._SYSTEM_USERID] = RGWClient( credentials['user_id'], credentials['access_key'], credentials['secret_key'])
def get(self, request): if 'host' not in request.GET or \ 'port' not in request.GET or \ 'eauth' not in request.GET: raise ValidationError( '"host", "port", and "eauth" params are required') if request.GET['eauth'] == 'auto' and \ ('username' not in request.GET or 'password' not in request.GET): raise ValidationError( '"username", and "password" params are required') if request.GET['eauth'] == 'sharedsecret' and \ ('username' not in request.GET or 'shared_secret' not in request.GET): raise ValidationError( '"username", and "shared_secret" params are required') password = request.GET['shared_secret'] \ if request.GET['eauth'] == 'sharedsecret' else request.GET['password'] deepsea = DeepSea(request.GET['host'], request.GET['port'], request.GET['eauth'], request.GET['username'], password) return Response(_check_rest_client_connection(deepsea))
def check_deepsea_iscsi_api(): def map_status_code(status_code, resp_content): _table = { '401': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '403': (Reason.DEEPSEA_FAILED_AUTHENTICATION, None), '500': (Reason.DEEPSEA_ISCSI_RUNNER_ERROR, resp_content) } if str(status_code) not in _table: raise UnavailableModule( Reason.DEEPSEA_ISCSI_UNKNOWN_PROBLEM, "DeepSea server returned status_code={}, content=\n{}".format( status_code, resp_content)) raise UnavailableModule(*_table[str(status_code)]) try: interfaces = DeepSea.instance().iscsi_interfaces() if not interfaces: raise UnavailableModule(Reason.DEEPSEA_ISCSI_NO_INTERFACES, "No hosts found with igw role") except RequestException as e: if e.status_code: return map_status_code(e.status_code, e.content) raise UnavailableModule(Reason.DEEPSEA_ISCSI_UNKNOWN_PROBLEM, str(e))
def check_rgw_credentials(): if not all((Settings.RGW_API_HOST, Settings.RGW_API_PORT, Settings.RGW_API_SCHEME, Settings.RGW_API_ADMIN_RESOURCE, Settings.RGW_API_ACCESS_KEY, Settings.RGW_API_SECRET_KEY)): try: check_deepsea_connection() except UnavailableModule as ex: raise UnavailableModule(Reason.OPENATTIC_RGW_NO_DEEPSEA_CONN, { 'deepsea_status': { 'reason': ex.reason, 'messsage': ex.message } }) try: credentials = DeepSea.instance().get_rgw_api_credentials() if not credentials: raise UnavailableModule(Reason.OPENATTIC_RGW_NO_DEEPSEA_CRED, None) except RequestException as ex: raise UnavailableModule(Reason.OPENATTIC_RGW_NO_DEEPSEA_CRED, str(ex))
def test_deepsea_singleton(self): api = DeepSea.instance() api2 = DeepSea.instance() self.assertEqual(api, api2)
def async_deploy_exports(minion=None): return DeepSea.instance().nfs_deploy_exports(minion)
def iscsi_status(request): return Response(DeepSea.instance().iscsi_status())
def test_deepsea_get_minions_no_role(self): """Regression for OP-2507: DeepSea: "roles" is missing from response""" with mock.patch("requests.Session") as mock_requests_session: resp_pillar = mock.MagicMock() resp_pillar.ok = True resp_pillar.status_code = 200 resp_pillar.json.return_value = { 'return': [{ 'minion1': { "time_init": "ntp", "rgw_configurations": { "rgw": { "users": { "email": "*****@*****.**", "system": True, "name": "Admin", "uid": "admin" } } }, "available_roles": [ "storage", "admin", "mon", "mds", "mgr", "igw", "openattic", "rgw", "ganesha", "client-cephfs", "client-radosgw", "client-iscsi", "client-nfs", "master" ], "benchmark": { "log-file-directory": "/var/log/cephfs_bench_logs", "job-file-directory": "/run/cephfs_bench_jobs", "default-collection": "simple.yml", "work-directory": "/run/cephfs_bench" }, "master_minion": "master_minion.openattic.org", "time_server": "master_minion.openattic.org", "igw_config": "default-ui", "cluster": "ceph", "public_network": "10.0.0.0/19", "cluster_network": "10.0.0.0/19", "stage_prep_master": "default", "fsid": "c0f85b6a-70d7-4c49-81fa-64ed80069e24" }, }] } mock_requests_session().post.side_effect = [ self._login_resp, resp_pillar ] resp = mock.MagicMock() resp.ok = True resp.status_code = 200 resp.json.return_value = { 'return': { 'minions': ['minion1'], 'minions_pre': [], 'minions_denied': [], 'minions_rejected': [] } } mock_requests_session().get.return_value = resp api = DeepSea('localhost', 8000, 'auto', 'hello', 'world') res = api.get_minions() self.assertEqual(res, [{ 'benchmark': { 'log-file-directory': '/var/log/cephfs_bench_logs', 'job-file-directory': '/run/cephfs_bench_jobs', 'default-collection': 'simple.yml', 'work-directory': '/run/cephfs_bench' }, 'master_minion': 'master_minion.openattic.org', 'time_server': 'master_minion.openattic.org', 'igw_config': 'default-ui', 'cluster': 'ceph', 'fsid': 'c0f85b6a-70d7-4c49-81fa-64ed80069e24', 'time_init': 'ntp', 'rgw_configurations': { 'rgw': { 'users': { 'uid': 'admin', 'email': '*****@*****.**', 'name': 'Admin', 'system': True } } }, 'available_roles': [ 'storage', 'admin', 'mon', 'mds', 'mgr', 'igw', 'openattic', 'rgw', 'ganesha', 'client-cephfs', 'client-radosgw', 'client-iscsi', 'client-nfs', 'master' ], 'hostname': 'minion1', 'key_status': 'accepted', 'public_network': '10.0.0.0/19', 'cluster_network': '10.0.0.0/19', 'stage_prep_master': 'default' }])
def async_status_exports(): return DeepSea.instance().nfs_status_exports()
def async_stop_exports(minion=None): return DeepSea.instance().nfs_stop_exports(minion)
def status(request): return Response(DeepSea.instance().nfs_status_exports())
def hosts(request): return Response({'hosts': DeepSea.instance().nfs_get_hosts()})
def async_stop_exports(minions=None): if minions is None: minions = [] return DeepSea.instance().iscsi_undeploy(minions)