def _mock_orchestrator(self, enable): # mock nfs services orch_nfs_services = [ ServiceDescription(spec=NFSServiceSpec(service_id='foo')) ] if enable else [] # pylint: disable=protected-access ganesha.Ganesha._get_orch_nfs_services = Mock( return_value=orch_nfs_services) # mock nfs daemons def _get_nfs_instances(service_name=None): if not enable: return [] instances = { 'nfs.foo': [ DaemonDescription(daemon_id='foo.host_a', status=1), DaemonDescription(daemon_id='foo.host_b', status=1) ], 'nfs.bar': [DaemonDescription(daemon_id='bar.host_c', status=1)] } if service_name is not None: return instances[service_name] result = [] for _, daemons in instances.items(): result.extend(daemons) return result ganesha.GaneshaConfOrchestrator._get_orch_nfs_instances = Mock( side_effect=_get_nfs_instances)
def test_get_unique_name(self, ssh_module): existing = [ ServiceDescription(service_instance='mon.a') ] new_mon = ssh_module.get_unique_name(existing, 'mon') assert new_mon.startswith('mon.') assert new_mon != 'mon.a'
def test_service_ls(self, cephadm_module): with self._with_host(cephadm_module, 'test'): c = cephadm_module.list_daemons(refresh=True) assert wait(cephadm_module, c) == [] ps = PlacementSpec(hosts=['test'], count=1) c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps)) [out] = wait(cephadm_module, c) match_glob(out, "Deployed mds.name.* on host 'test'") c = cephadm_module.list_daemons() def remove_id(dd): out = dd.to_json() del out['daemon_id'] return out assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [ { 'daemon_type': 'mds', 'hostname': 'test', 'status': 1, 'status_desc': 'starting'} ] ps = PlacementSpec(hosts=['test'], count=1) spec = ServiceSpec('rgw', 'r.z', placement=ps) c = cephadm_module.apply_rgw(spec) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' c = cephadm_module.describe_service() out = [o.to_json() for o in wait(cephadm_module, c)] expected = [ { 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]}, 'service_id': 'name', 'service_name': 'mds.name', 'service_type': 'mds', 'status': {'running': 1, 'size': 0}, 'unmanaged': True }, { 'placement': { 'count': 1, 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}] }, 'rgw_realm': 'r', 'rgw_zone': 'z', 'service_id': 'r.z', 'service_name': 'rgw.r.z', 'service_type': 'rgw', 'status': {'running': 0, 'size': 1} } ] assert out == expected assert [ServiceDescription.from_json(o).to_json() for o in expected] == expected assert_rm_service(cephadm_module, 'rgw.r.z') assert_rm_daemon(cephadm_module, 'mds.name', 'test')
def _list_services(service_type=None, service_id=None, node_name=None): service_descs = [] for service in services: if node_name is None or service['nodename'] == node_name: desc = ServiceDescription(nodename=service['nodename'], service_type=service['service_type'], service_instance=service['service_instance']) service_descs.append(desc) return service_descs
def test_scale_up(self, _apply_mds, _describe_service, _list_daemons, _get, mds_autoscaler_module: MDSAutoscaler): daemons = Completion(value=[ DaemonDescription(hostname='myhost', daemon_type='mds', daemon_id='fs_name.myhost.a'), DaemonDescription(hostname='myhost', daemon_type='mds', daemon_id='fs_name.myhost.b'), ]) daemons.finalize() _list_daemons.return_value = daemons services = Completion(value=[ ServiceDescription(spec=ServiceSpec(service_type='mds', service_id='fs_name', placement=PlacementSpec( count=2))) ]) services.finalize() _describe_service.return_value = services apply = Completion(value='') apply.finalize() _apply_mds.return_value = apply _get.return_value = { 'filesystems': [{ 'mdsmap': { 'fs_name': 'fs_name', 'in': [{ 'name': 'mds.fs_name.myhost.a', }], 'standby_count_wanted': 2, } }], 'standbys': [{ 'name': 'mds.fs_name.myhost.b', }], } mds_autoscaler_module.notify('fs_map', None) _apply_mds.assert_called_with( ServiceSpec(service_type='mds', service_id='fs_name', placement=PlacementSpec(count=3)))
def _mock_orchestrator(self, enable: bool) -> Iterator: self.io_mock = MagicMock() self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock self.io_mock.read = self._ioctl_read_mock self.io_mock.stat = self._ioctl_stat_mock self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock self.io_mock.write_full.side_effect = self._ioctx_write_full_mock self.io_mock.remove_object.side_effect = self._ioctx_remove_mock # mock nfs services orch_nfs_services = [ ServiceDescription(spec=NFSServiceSpec(service_id=self.cluster_id)) ] if enable else [] def mock_exec(cls, args): u = { "user_id": "abc", "display_name": "foo", "email": "", "suspended": 0, "max_buckets": 1000, "subusers": [], "keys": [{ "user": "******", "access_key": "the_access_key", "secret_key": "the_secret_key" }], "swift_keys": [], "caps": [], "op_mask": "read, write, delete", "default_placement": "", "default_storage_class": "", "placement_tags": [], "bucket_quota": { "enabled": False, "check_on_raw": False, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "user_quota": { "enabled": False, "check_on_raw": False, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "temp_url_keys": [], "type": "rgw", "mfa_ids": [] } if args[2] == 'list': return 0, json.dumps([u]), '' return 0, json.dumps(u), '' with mock.patch('nfs.module.Module.describe_service') as describe_service, \ mock.patch('nfs.module.Module.rados') as rados, \ mock.patch('nfs.export.available_clusters', return_value=[self.cluster_id]), \ mock.patch('nfs.export.restart_nfs_service'), \ mock.patch('nfs.export.ExportMgr._exec', mock_exec), \ mock.patch('nfs.export.check_fs', return_value=True), \ mock.patch('nfs.export_utils.check_fs', return_value=True), \ mock.patch('nfs.export.ExportMgr._create_user_key', return_value='thekeyforclientabc'): rados.open_ioctx.return_value.__enter__.return_value = self.io_mock rados.open_ioctx.return_value.__exit__ = mock.Mock( return_value=None) describe_service.return_value = OrchResult(orch_nfs_services) self._reset_temp_store() yield
def test_get_unique_name(self, cephadm_module): existing = [ServiceDescription(service_instance='mon.a')] new_mon = cephadm_module.get_unique_name('myhost', existing, 'mon') assert new_mon.startswith('mon.') assert new_mon != 'mon.a' assert '.myhost.' in new_mon
assert OrchestratorEvent.from_json(e.to_json()) == e def test_handle_command(): cmd = { 'prefix': 'orch daemon add', 'daemon_type': 'mon', 'placement': 'smithi044:[v2:172.21.15.44:3301,v1:172.21.15.44:6790]=c', } m = OrchestratorCli('orchestrator', 0, 0) r = m._handle_command(None, cmd) assert r == HandleCommandResult( retval=-2, stdout='', stderr='No orchestrator configured (try `ceph orch set backend`)') r = OrchResult([ServiceDescription(spec=ServiceSpec(service_type='osd'), running=123)]) @mock.patch("orchestrator.OrchestratorCli.describe_service", return_value=r) def test_orch_ls(_describe_service): cmd = { 'prefix': 'orch ls', } m = OrchestratorCli('orchestrator', 0, 0) r = m._handle_command(None, cmd) out = 'NAME PORTS RUNNING REFRESHED AGE PLACEMENT \n' \ 'osd 123 - - ' assert r == HandleCommandResult(retval=0, stdout=out, stderr='') def test_preview_table_osd_smoke():
def test_get_unique_name(): o = SSHOrchestrator('module_name', 0, 0) existing = [ServiceDescription(service_instance='mon.a')] new_mon = o.get_unique_name(existing, 'mon') assert new_mon.startswith('mon.') assert new_mon != 'mon.a'
def _mock_orchestrator(self, enable: bool) -> Iterator: self.io_mock = MagicMock() self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock self.io_mock.read = self._ioctl_read_mock self.io_mock.stat = self._ioctl_stat_mock self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock self.io_mock.write_full.side_effect = self._ioctx_write_full_mock self.io_mock.remove_object.side_effect = self._ioctx_remove_mock # mock nfs services cluster_info = self.clusters['foo'] orch_nfs_services = [ ServiceDescription( spec=NFSServiceSpec(service_id='foo', pool=cluster_info['pool'], namespace=cluster_info['namespace'])) ] if enable else [] """ # mock nfs daemons def _get_nfs_instances(service_name=None): if not enable: return [] instances = { 'nfs.foo': [ DaemonDescription(daemon_id='foo.host_a', status=1), DaemonDescription(daemon_id='foo.host_b', status=1) ], 'nfs.bar': [ DaemonDescription(daemon_id='bar.host_c', status=1) ] } if service_name is not None: return instances[service_name] result = [] for _, daemons in instances.items(): result.extend(daemons) return result """ def mock_exec(cls, args): u = { "user_id": "abc", "display_name": "foo", "email": "", "suspended": 0, "max_buckets": 1000, "subusers": [], "keys": [{ "user": "******", "access_key": "the_access_key", "secret_key": "the_secret_key" }], "swift_keys": [], "caps": [], "op_mask": "read, write, delete", "default_placement": "", "default_storage_class": "", "placement_tags": [], "bucket_quota": { "enabled": False, "check_on_raw": False, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "user_quota": { "enabled": False, "check_on_raw": False, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "temp_url_keys": [], "type": "rgw", "mfa_ids": [] } if args[2] == 'list': return 0, json.dumps([u]), '' return 0, json.dumps(u), '' with mock.patch('nfs.module.Module.describe_service') as describe_service, \ mock.patch('nfs.module.Module.rados') as rados, \ mock.patch('nfs.export.available_clusters', return_value=self.clusters.keys()), \ mock.patch('nfs.export.restart_nfs_service'), \ mock.patch('nfs.export.ExportMgr._exec', mock_exec), \ mock.patch('nfs.export.check_fs', return_value=True), \ mock.patch('nfs.export_utils.check_fs', return_value=True), \ mock.patch('nfs.export.ExportMgr._create_user_key', return_value='thekeyforclientabc'): rados.open_ioctx.return_value.__enter__.return_value = self.io_mock rados.open_ioctx.return_value.__exit__ = mock.Mock( return_value=None) describe_service.return_value = OrchResult(orch_nfs_services) self._reset_temp_store() yield