def _daemon_add_mon(self, placement=None): placement = PlacementSpec.from_string(placement) placement.validate() spec = ServiceSpec('mon', placement=placement) completion = self.add_mon(spec) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _mds_add(self, fs_name, placement=None): spec = ServiceSpec( 'mds', fs_name, placement=PlacementSpec.from_string(placement), ) completion = self.add_mds(spec) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def test_osd_unmanaged(): osd_spec = {"placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": True}, "filter_logic": "AND", "objectstore": "bluestore"}, "unmanaged": True} dg_spec = ServiceSpec.from_json(osd_spec) assert dg_spec.unmanaged == True
def test_apply_prometheus_save(self, cephadm_module): with self._with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test'], count=1) spec = ServiceSpec('prometheus', placement=ps) c = cephadm_module.apply_prometheus(spec) assert wait(cephadm_module, c) == 'Scheduled prometheus update...' assert [ d.spec for d in wait(cephadm_module, cephadm_module.describe_service()) ] == [spec]
def test_node_assignment3(service_type, placement, hosts, daemons, expected_len, must_have): hosts, to_add, to_remove = HostAssignment( spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h) for h in hosts], daemons=daemons, ).place() assert len(hosts) == expected_len for h in must_have: assert h in [h.hostname for h in hosts]
def test_node_assignment2(service_type, placement, hosts, daemons, expected_len, in_set): hosts, to_add, to_remove = HostAssignment( spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h, labels=['foo']) for h in hosts], daemons=daemons, ).place() assert len(hosts) == expected_len for h in [h.hostname for h in hosts]: assert h in in_set
def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): cephadm_module.set_container_image('global', 'from_image') if use_repo_digest: cephadm_module.use_repo_digest = True with with_service(cephadm_module, ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr, 'test'): assert wait(cephadm_module, cephadm_module.upgrade_start( 'to_image', None)) == 'Initiating upgrade to to_image' assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'to_image' def _versions_mock(cmd): return json.dumps({ 'mgr': { 'myversion': 1 } }) cephadm_module._mon_command_mock_versions = _versions_mock with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(json.dumps({ 'image_id': 'image_id', 'repo_digest': 'to_image@repo_digest', }))): cephadm_module.upgrade._do_upgrade() assert cephadm_module.upgrade_status is not None with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( json.dumps([ dict( name=list(cephadm_module.cache.daemons['test'].keys())[0], style='cephadm', fsid='fsid', container_id='container_id', container_image_id='image_id', version='version', state='running', ) ]) )): cephadm_module._refresh_hosts_and_daemons() cephadm_module.upgrade._do_upgrade() _, image, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': 'global', 'key': 'container_image', }) if use_repo_digest: assert image == 'to_image@repo_digest' else: assert image == 'to_image'
def _apply_mds(self, fs_name, placement=None, unmanaged=False): placement = PlacementSpec.from_string(placement) placement.validate() spec = ServiceSpec('mds', fs_name, placement=placement, unmanaged=unmanaged) completion = self.apply_mds(spec) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): if not spec.placement: spec.placement = PlacementSpec(hosts=['test'], count=1) c = meth(cephadm_module, spec) assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' assert [ d.spec for d in wait(cephadm_module, cephadm_module.describe_service()) ] == [spec] cephadm_module._apply_all_services() dds = wait(cephadm_module, cephadm_module.list_daemons()) for dd in dds: assert dd.service_name() == spec.service_name() assert_rm_service(cephadm_module, spec.service_name())
def test_bad_specs(service_type, placement, hosts, daemons, expected): def get_hosts_func(label=None, as_hostspec=False): if as_hostspec: return [HostSpec(h) for h in hosts] return hosts with pytest.raises(OrchestratorValidationError) as e: hosts = HostAssignment( spec=ServiceSpec(service_type, placement=placement), get_hosts_func=get_hosts_func, get_daemons_func=lambda _: daemons).place() assert str(e.value) == expected
def test_odd_mons(service_type, placement, hosts, daemons, expected_count): spec = ServiceSpec(service_type=service_type, service_id=None, placement=placement) hosts = HostAssignment( spec=spec, hosts=[HostSpec(h) for h in hosts], get_daemons_func=lambda _: daemons).place() assert len(hosts) == expected_count
def test_active_assignment(service_type, placement, hosts, daemons, expected): spec = ServiceSpec(service_type=service_type, service_id=None, placement=placement) hosts = HostAssignment( spec=spec, hosts=[HostSpec(h) for h in hosts], get_daemons_func=lambda _: daemons).place() assert sorted([h.hostname for h in hosts]) in expected
def _apply_misc(self, service_type=None, placement=None, unmanaged=False, inbuf=None): usage = """Usage: ceph orch apply -i <yaml spec> ceph orch apply <service_type> <placement> [--unmanaged] """ if inbuf: if service_type or placement or unmanaged: raise OrchestratorValidationError(usage) content: Iterator = yaml.load_all(inbuf) specs = [ServiceSpec.from_json(s) for s in content] else: placement = PlacementSpec.from_string(placement) placement.validate() specs = [ServiceSpec(service_type, placement=placement, unmanaged=unmanaged)] completion = self.apply(specs) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def test_node_assignment(service_type, placement, hosts, daemons, expected): def get_hosts_func(label=None, as_hostspec=False): if as_hostspec: return [HostSpec(h) for h in hosts] return hosts hosts = HostAssignment( spec=ServiceSpec(service_type, placement=placement), get_hosts_func=get_hosts_func, get_daemons_func=lambda _: daemons).place() assert sorted([h.hostname for h in hosts]) == sorted(expected)
def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth, host: str): if spec.placement.is_empty(): spec.placement = PlacementSpec(hosts=[host], count=1) c = meth(cephadm_module, spec) assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' specs = [ d.spec for d in wait(cephadm_module, cephadm_module.describe_service()) ] assert spec in specs cephadm_module._apply_all_services() dds = wait(cephadm_module, cephadm_module.list_daemons()) names = {dd.service_name() for dd in dds} assert spec.service_name() in names, dds yield assert_rm_service(cephadm_module, spec.service_name())
def from_json(cls, data: dict) -> 'ServiceDescription': c = data.copy() status = c.pop('status', {}) event_strs = c.pop('events', []) spec = ServiceSpec.from_json(c) c_status = status.copy() for k in ['last_refresh', 'created']: if k in c_status: c_status[k] = str_to_datetime(c_status[k]) events = [OrchestratorEvent.from_json(e) for e in event_strs] return cls(spec=spec, events=events, **c_status)
def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth, host: str) -> Iterator[List[str]]: if spec.placement.is_empty(): spec.placement = PlacementSpec(hosts=[host], count=1) c = meth(cephadm_module, spec) assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' specs = [ d.spec for d in wait(cephadm_module, cephadm_module.describe_service()) ] assert spec in specs CephadmServe(cephadm_module)._apply_all_services() dds = wait(cephadm_module, cephadm_module.list_daemons()) own_dds = [dd for dd in dds if dd.service_name() == spec.service_name()] assert own_dds yield [dd.name() for dd in own_dds] assert_rm_service(cephadm_module, spec.service_name())
def mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count): if spec_section == 'hosts': mk_spec = lambda: ServiceSpec('mon', placement=PlacementSpec( hosts=explicit, count=count, )) mk_hosts = lambda _: hosts elif spec_section == 'label': mk_spec = lambda: ServiceSpec('mon', placement=PlacementSpec( label='mylabel', count=count, )) mk_hosts = lambda l: [e for e in explicit if e in hosts] if l == 'mylabel' else hosts elif spec_section == 'host_pattern': pattern = { 'e': 'notfound', '1': '1', '12': '[1-2]', '123': '*', }[explicit_key] mk_spec = lambda: ServiceSpec('mon', placement=PlacementSpec( host_pattern=pattern, count=count, )) mk_hosts = lambda _: hosts else: assert False def _get_hosts_wrapper(label=None, as_hostspec=False): hosts = mk_hosts(label) if as_hostspec: return list(map(HostSpec, hosts)) return hosts return mk_spec, _get_hosts_wrapper
def test_service_ls(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): with with_host(cephadm_module, 'host2'): # emulate the old scheduler: c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' cephadm_module._apply_all_services() out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} assert out == {'host1', 'host2'} c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' cephadm_module.migration_current = 0 cephadm_module.migration.migrate() # assert we need all daemons. assert cephadm_module.migration_current == 0 # Sorry, for this hack, but I need to make sure, Migration thinks, # we have updated all daemons already. cephadm_module.cache.last_daemon_update['host1'] = datetime.now() cephadm_module.cache.last_daemon_update['host2'] = datetime.now() cephadm_module.migration.migrate() assert cephadm_module.migration_current == 1 out = [o.spec.placement for o in wait(cephadm_module, cephadm_module.describe_service())] assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])] # assert_rm_service(cephadm_module, 'rgw.r.z') # assert_rm_daemon(cephadm_module, 'mds.name', 'test')
def fence_old_ranks(self, spec: ServiceSpec, rank_map: Dict[int, Dict[int, Optional[str]]], num_ranks: int) -> None: for rank, m in list(rank_map.items()): if rank >= num_ranks: for daemon_id in m.values(): if daemon_id is not None: self.fence(daemon_id) del rank_map[rank] nodeid = f'{spec.service_name()}.{rank}' self.mgr.log.info(f'Removing {nodeid} from the ganesha grace table') self.run_grace_tool(cast(NFSServiceSpec, spec), 'remove', nodeid) self.mgr.spec_store.save_rank_map(spec.service_name(), rank_map) else: max_gen = max(m.keys()) for gen, daemon_id in list(m.items()): if gen < max_gen: if daemon_id is not None: self.fence(daemon_id) del rank_map[rank][gen] self.mgr.spec_store.save_rank_map(spec.service_name(), rank_map)
def test_node_assignment(service_type, placement, hosts, daemons, expected): service_id = None if service_type == 'rgw': service_id = 'realm.zone' spec = ServiceSpec(service_type=service_type, service_id=service_id, placement=placement) hosts = HostAssignment(spec=spec, hosts=[HostSpec(h, labels=['foo']) for h in hosts], get_daemons_func=lambda _: daemons).place() assert sorted([h.hostname for h in hosts]) == sorted(expected)
def _daemon_add_misc(self, daemon_type=None, placement=None, inbuf=None): usage = f"""Usage: ceph orch daemon add -i <json_file> ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>""" if inbuf: if daemon_type or placement: raise OrchestratorValidationError(usage) spec = ServiceSpec.from_json(yaml.safe_load(inbuf)) else: placement = PlacementSpec.from_string(placement) placement.validate() spec = ServiceSpec(daemon_type, placement=placement) if daemon_type == 'mon': completion = self.add_mon(spec) elif daemon_type == 'mgr': completion = self.add_mgr(spec) elif daemon_type == 'rbd-mirror': completion = self.add_rbd_mirror(spec) elif daemon_type == 'crash': completion = self.add_crash(spec) elif daemon_type == 'alertmanager': completion = self.add_alertmanager(spec) elif daemon_type == 'grafana': completion = self.add_grafana(spec) elif daemon_type == 'node-exporter': completion = self.add_node_exporter(spec) elif daemon_type == 'prometheus': completion = self.add_prometheus(spec) elif daemon_type == 'iscsi': completion = self.add_iscsi(spec) else: raise OrchestratorValidationError( f'unknown daemon type `{daemon_type}`') self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def test_servicespec_map_test(s_type, o_spec, s_id): dict_spec = { "service_id": s_id, "service_type": s_type, "placement": dict(hosts=["host1:1.1.1.1"]) } if s_type == 'nfs': dict_spec['pool'] = 'pool' elif s_type == 'iscsi': dict_spec['pool'] = 'pool' dict_spec['api_user'] = '******' dict_spec['api_password'] = '******' spec = ServiceSpec.from_json(dict_spec) assert isinstance(spec, o_spec) assert isinstance(spec.placement, PlacementSpec) assert isinstance(spec.placement.hosts[0], HostPlacementSpec) assert spec.placement.hosts[0].hostname == 'host1' assert spec.placement.hosts[0].network == '1.1.1.1' assert spec.placement.hosts[0].name == '' assert spec.validate() is None ServiceSpec.from_json(spec.to_json())
def test_daemon_check(self, cephadm_module: CephadmOrchestrator, action): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test') as d_names: [daemon_name] = d_names cephadm_module._schedule_daemon_action(daemon_name, action) assert cephadm_module.cache.get_scheduled_daemon_action( 'test', daemon_name) == action CephadmServe(cephadm_module)._check_daemons() assert cephadm_module.cache.get_scheduled_daemon_action('test', daemon_name) is None
def test_node_assignment3(service_type, placement, hosts, daemons, expected_len, must_have): def get_hosts_func(label=None, as_hostspec=False): if as_hostspec: return [HostSpec(h) for h in hosts] return hosts hosts = HostAssignment(spec=ServiceSpec(service_type, placement=placement), get_hosts_func=get_hosts_func, get_daemons_func=lambda _: daemons).place() assert len(hosts) == expected_len for h in must_have: assert h in [h.hostname for h in hosts]
def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec('mgr')) as _, \ with_service(cephadm_module, GrafanaSpec(initial_admin_password='******')): out = cephadm_module.cephadm_services[ 'grafana'].generate_config( CephadmDaemonDeploySpec('test', 'daemon', 'grafana')) assert out == ({ 'files': { 'grafana.ini': '# This file is generated by cephadm.\n' '[users]\n' ' default_theme = light\n' '[auth.anonymous]\n' ' enabled = true\n' " org_name = 'Main Org.'\n" " org_role = 'Viewer'\n" '[server]\n' " domain = 'bootstrap.storage.lab'\n" ' protocol = https\n' ' cert_file = /etc/grafana/certs/cert_file\n' ' cert_key = /etc/grafana/certs/cert_key\n' ' http_port = 3000\n' ' http_addr = \n' '[security]\n' ' admin_user = admin\n' ' admin_password = secure\n' ' cookie_secure = true\n' ' cookie_samesite = none\n' ' allow_embedding = true', 'provisioning/datasources/ceph-dashboard.yml': "# This file is generated by cephadm.\n" 'deleteDatasources:\n\n' " - name: 'Loki'\n" ' orgId: 2\n\n' 'datasources:\n\n' " - name: 'Loki'\n" " type: 'loki'\n" " access: 'proxy'\n" ' orgId: 2\n' " url: 'http://[1::4]:3100'\n" ' basicAuth: false\n' ' isDefault: true\n' ' editable: false', 'certs/cert_file': ANY, 'certs/cert_key': ANY } }, [])
def mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count): if spec_section == 'hosts': mk_spec = lambda: ServiceSpec( 'mgr', placement=PlacementSpec( # noqa: E731 hosts=explicit, count=count, )) elif spec_section == 'label': mk_spec = lambda: ServiceSpec( 'mgr', placement=PlacementSpec( # noqa: E731 label='mylabel', count=count, )) elif spec_section == 'host_pattern': pattern = { 'e': 'notfound', '1': '1', '12': '[1-2]', '123': '*', }[explicit_key] mk_spec = lambda: ServiceSpec( 'mgr', placement=PlacementSpec( # noqa: E731 host_pattern=pattern, count=count, )) else: assert False hosts = [ HostSpec(h, labels=['mylabel']) if h in explicit else HostSpec(h) for h in hosts ] return mk_spec, hosts
def test_active_assignment(service_type, placement, hosts, daemons, expected, expected_add, expected_remove): spec = ServiceSpec(service_type=service_type, service_id=None, placement=placement) hosts, to_add, to_remove = HostAssignment( spec=spec, hosts=[HostSpec(h) for h in hosts], daemons=daemons, ).place() assert sorted([h.hostname for h in hosts]) in expected assert sorted([h.hostname for h in to_add]) in expected_add assert sorted([h.name() for h in to_remove]) in expected_remove
def save(self, spec: ServiceSpec, update_create: bool = True) -> None: name = spec.service_name() if spec.preview_only: self.spec_preview[name] = spec return None self._specs[name] = spec if update_create: self.spec_created[name] = datetime_now() data = { 'spec': spec.to_json(), 'created': datetime_to_str(self.spec_created[name]), } if name in self.spec_deleted: data['deleted'] = datetime_to_str(self.spec_deleted[name]) self.mgr.set_store( SPEC_STORE_PREFIX + name, json.dumps(data, sort_keys=True), ) self.mgr.events.for_service(spec, OrchestratorEvent.INFO, 'service was created')
def save( self, spec: ServiceSpec, update_create: bool = True, ) -> None: name = spec.service_name() if spec.preview_only: self.spec_preview[name] = spec return None self._specs[name] = spec if update_create: self.spec_created[name] = datetime_now() self._save(name)