def test_servicespec_map_test(s_type, o_spec, s_id): spec = ServiceSpec.from_json(_get_dict_spec(s_type, s_id)) assert isinstance(spec, o_spec) assert isinstance(spec.placement, PlacementSpec) assert isinstance(spec.placement.hosts[0], HostPlacementSpec) assert spec.placement.hosts[0].hostname == 'host1' assert spec.placement.hosts[0].network == '1.1.1.1' assert spec.placement.hosts[0].name == '' assert spec.validate() is None ServiceSpec.from_json(spec.to_json())
def test_servicespec_map_test(s_type, o_spec, s_id): dict_spec = { "service_id": s_id, "service_type": s_type, "placement": dict(hosts=["host1:1.1.1.1"]) } spec = ServiceSpec.from_json(dict_spec) assert isinstance(spec, o_spec) assert isinstance(spec.placement, PlacementSpec) assert isinstance(spec.placement.hosts[0], HostPlacementSpec) assert spec.placement.hosts[0].hostname == 'host1' assert spec.placement.hosts[0].network == '1.1.1.1' assert spec.placement.hosts[0].name == '' assert servicespec_validate_add(spec) is None ServiceSpec.from_json(spec.to_json())
def test_yaml(): y = """service_type: crash service_name: crash placement: host_pattern: '*' --- service_type: crash service_name: crash placement: host_pattern: '*' unmanaged: true --- service_type: rgw service_id: default-rgw-realm.eu-central-1.1 service_name: rgw.default-rgw-realm.eu-central-1.1 placement: hosts: - ceph-001 networks: - 10.0.0.0/8 - 192.168.0.0/16 spec: rgw_frontend_type: civetweb rgw_realm: default-rgw-realm rgw_zone: eu-central-1 --- service_type: osd service_id: osd_spec_default service_name: osd.osd_spec_default placement: host_pattern: '*' spec: data_devices: model: MC-55-44-XZ db_devices: model: SSD-123-foo filter_logic: AND objectstore: bluestore wal_devices: model: NVME-QQQQ-987 """ for y in y.split('---\n'): data = yaml.safe_load(y) object = ServiceSpec.from_json(data) assert yaml.dump(object) == y assert yaml.dump(ServiceSpec.from_json(object.to_json())) == y
def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module): unmanaged_spec = ServiceSpec.from_json(spec.to_json()) unmanaged_spec.unmanaged = True with with_host(cephadm_module, 'test'): with with_service(cephadm_module, unmanaged_spec): with with_daemon(cephadm_module, spec, meth, 'test'): pass
def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) with with_host(cephadm_module, 'test'): yaml_str = """service_type: alertmanager service_name: alertmanager placement: count: 1 spec: port: 4200 """ yaml_file = yaml.safe_load(yaml_str) spec = ServiceSpec.from_json(yaml_file) with patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value=({}, [])): with with_service(cephadm_module, spec): CephadmServe(cephadm_module)._check_daemons() _run_cephadm.assert_called_with( 'test', 'alertmanager.test', 'deploy', [ '--name', 'alertmanager.test', '--meta-json', '{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null}', '--config-json', '-', '--tcp-ports', '4200 9094', '--reconfig' ], stdin='{}', image='')
def test_spec_octopus(spec_json): # https://tracker.ceph.com/issues/44934 # Those are real user data from early octopus. # Please do not modify those JSON values. spec = ServiceSpec.from_json(spec_json) # just some verification that we can sill read old octopus specs def convert_to_old_style_json(j): j_c = dict(j.copy()) j_c.pop('service_name', None) if 'spec' in j_c: spec = j_c.pop('spec') j_c.update(spec) if 'placement' in j_c: if 'hosts' in j_c['placement']: j_c['placement']['hosts'] = [ { 'hostname': HostPlacementSpec.parse(h).hostname, 'network': HostPlacementSpec.parse(h).network, 'name': HostPlacementSpec.parse(h).name } for h in j_c['placement']['hosts'] ] j_c.pop('objectstore', None) j_c.pop('filter_logic', None) return j_c assert spec_json == convert_to_old_style_json(spec.to_json())
def test_HA_RGW_spec(): yaml_str = """service_type: ha-rgw service_id: haproxy_for_rgw placement: hosts: - host1 - host2 - host3 spec: virtual_ip_interface: eth0 virtual_ip_address: 192.168.20.1/24 frontend_port: 8080 ha_proxy_port: 1967 ha_proxy_stats_enabled: true ha_proxy_stats_user: admin ha_proxy_stats_password: admin ha_proxy_enable_prometheus_exporter: true ha_proxy_monitor_uri: /haproxy_health keepalived_password: admin """ yaml_file = yaml.safe_load(yaml_str) spec = ServiceSpec.from_json(yaml_file) assert spec.service_type == "ha-rgw" assert spec.service_id == "haproxy_for_rgw" assert spec.virtual_ip_interface == "eth0" assert spec.virtual_ip_address == "192.168.20.1/24" assert spec.frontend_port == 8080 assert spec.ha_proxy_port == 1967 assert spec.ha_proxy_stats_enabled is True assert spec.ha_proxy_stats_user == "admin" assert spec.ha_proxy_stats_password == "admin" assert spec.ha_proxy_enable_prometheus_exporter is True assert spec.ha_proxy_monitor_uri == "/haproxy_health" assert spec.keepalived_password == "admin"
def convert_to_explicit(spec: ServiceSpec) -> None: existing_daemons = self.mgr.cache.get_daemons_by_service(spec.service_name()) placements, to_add, to_remove = HostAssignment( spec=spec, hosts=self.mgr.inventory.all_specs(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), daemons=existing_daemons, ).place() # We have to migrate, only if the new scheduler would remove daemons if len(placements) >= len(existing_daemons): return def to_hostname(d: DaemonDescription) -> HostPlacementSpec: if d.hostname in old_hosts: return old_hosts[d.hostname] else: assert d.hostname return HostPlacementSpec(d.hostname, '', '') old_hosts = {h.hostname: h for h in spec.placement.hosts} new_hosts = [to_hostname(d) for d in existing_daemons] new_placement = PlacementSpec( hosts=new_hosts, count=spec.placement.count ) new_spec = ServiceSpec.from_json(spec.to_json()) new_spec.placement = new_placement logger.info(f"Migrating {spec.one_line_str()} to explicit placement") self.mgr.spec_store.save(new_spec)
def _apply_osd(self, all_available_devices=False, inbuf=None): # type: (bool, Optional[str]) -> HandleCommandResult """Apply DriveGroupSpecs to create OSDs""" usage = """ Usage: ceph orch apply osd -i <json_file/yaml_file> ceph orch apply osd --use-all-devices """ if not inbuf and not all_available_devices: return HandleCommandResult(-errno.EINVAL, stderr=usage) if inbuf: if all_available_devices: raise OrchestratorError( '--all-available-devices cannot be combined with an osd spec' ) try: drivegroups = yaml.load_all(inbuf) dg_specs = [ServiceSpec.from_json(dg) for dg in drivegroups] except ValueError as e: msg = 'Failed to read JSON/YAML input: {}'.format( str(e)) + usage return HandleCommandResult(-errno.EINVAL, stderr=msg) else: dg_specs = [ DriveGroupSpec( service_id='all-available-devices', placement=PlacementSpec(host_pattern='*'), data_devices=DeviceSelection(all=True), ) ] completion = self.apply_drivegroups(dg_specs) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def convert_to_explicit(spec: ServiceSpec) -> None: placements = HostAssignment(spec=spec, get_hosts_func=self.mgr._get_hosts, get_daemons_func=self.mgr.cache. get_daemons_by_service).place() existing_daemons = self.mgr.cache.get_daemons_by_service( spec.service_name()) # We have to migrate, only if the new scheduler would remove daemons if len(placements) >= len(existing_daemons): return old_hosts = {h.hostname: h for h in spec.placement.hosts} new_hosts = [ old_hosts[d.hostname] if d.hostname in old_hosts else HostPlacementSpec(hostname=d.hostname, network='', name='') for d in existing_daemons ] new_placement = PlacementSpec(hosts=new_hosts, count=spec.placement.count) new_spec = ServiceSpec.from_json(spec.to_json()) new_spec.placement = new_placement logger.info( f"Migrating {spec.one_line_str()} to explicit placement") self.mgr.spec_store.save(new_spec)
def _apply_misc(self, service_type=None, placement=None, unmanaged=False, inbuf=None): usage = """Usage: ceph orch apply -i <yaml spec> ceph orch apply <service_type> <placement> [--unmanaged] """ if inbuf: if service_type or placement or unmanaged: raise OrchestratorValidationError(usage) content: Iterator = yaml.load_all(inbuf) specs = [ServiceSpec.from_json(s) for s in content] else: placement = PlacementSpec.from_string(placement) placement.validate() specs = [ ServiceSpec(service_type, placement=placement, unmanaged=unmanaged) ] completion = self.apply(specs) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _apply_osd(self, inbuf=None): # type: (Optional[str]) -> HandleCommandResult """Apply DriveGroupSpecs to create OSDs""" usage = """ Usage: ceph orch apply osd -i <json_file/yaml_file> """ if not inbuf: return HandleCommandResult(-errno.EINVAL, stderr=usage) try: drivegroups = yaml.load_all(inbuf) dg_specs = [ServiceSpec.from_json(dg) for dg in drivegroups] except ValueError as e: msg = 'Failed to read JSON/YAML input: {}'.format(str(e)) + usage return HandleCommandResult(-errno.EINVAL, stderr=msg) completions = self.apply_drivegroups(dg_specs) [self._orchestrator_wait([completion]) for completion in completions] # type: ignore [raise_if_exception(completion) for completion in completions] # type: ignore result_strings = [ completion.result_str() for completion in completions ] return HandleCommandResult(stdout=" ".join(result_strings))
def test_apply_osd_save_placement(self, _save_spec, cephadm_module): with self._with_host(cephadm_module, 'test'): json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}} spec = ServiceSpec.from_json(json_spec) assert isinstance(spec, DriveGroupSpec) c = cephadm_module.apply_drivegroups([spec]) assert wait(cephadm_module, c) == ['Scheduled osd update...'] _save_spec.assert_called_with(spec)
def test_yaml(): y = """service_type: crash service_name: crash placement: host_pattern: '*' --- service_type: crash service_name: crash placement: host_pattern: '*' unmanaged: true --- service_type: rgw service_id: default-rgw-realm.eu-central-1.1 service_name: rgw.default-rgw-realm.eu-central-1.1 placement: hosts: - hostname: ceph-001 name: '' network: '' spec: rgw_realm: default-rgw-realm rgw_zone: eu-central-1 subcluster: '1' --- service_type: osd service_id: osd_spec_default service_name: osd.osd_spec_default placement: host_pattern: '*' spec: data_devices: model: MC-55-44-XZ db_devices: model: SSD-123-foo objectstore: bluestore wal_devices: model: NVME-QQQQ-987 """ for y in y.split('---\n'): data = yaml.safe_load(y) object = ServiceSpec.from_json(data) assert yaml.dump(object) == y assert yaml.dump(ServiceSpec.from_json(object.to_json())) == y
def test_osd_unmanaged(): osd_spec = {"placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": True}, "filter_logic": "AND", "objectstore": "bluestore"}, "unmanaged": True} dg_spec = ServiceSpec.from_json(osd_spec) assert dg_spec.unmanaged == True
def from_json(cls, data: dict) -> 'ServiceDescription': c = data.copy() status = c.pop('status', {}) event_strs = c.pop('events', []) spec = ServiceSpec.from_json(c) c_status = status.copy() for k in ['last_refresh', 'created']: if k in c_status: c_status[k] = str_to_datetime(c_status[k]) events = [OrchestratorEvent.from_json(e) for e in event_strs] return cls(spec=spec, events=events, **c_status)
def test_servicespec_map_test(s_type, o_spec, s_id): dict_spec = { "service_id": s_id, "service_type": s_type, "placement": dict(hosts=["host1:1.1.1.1"]) } if s_type == 'nfs': dict_spec['pool'] = 'pool' elif s_type == 'iscsi': dict_spec['pool'] = 'pool' dict_spec['api_user'] = '******' dict_spec['api_password'] = '******' spec = ServiceSpec.from_json(dict_spec) assert isinstance(spec, o_spec) assert isinstance(spec.placement, PlacementSpec) assert isinstance(spec.placement.hosts[0], HostPlacementSpec) assert spec.placement.hosts[0].hostname == 'host1' assert spec.placement.hosts[0].network == '1.1.1.1' assert spec.placement.hosts[0].name == '' assert spec.validate() is None ServiceSpec.from_json(spec.to_json())
def _daemon_add_misc(self, daemon_type: Optional[str] = None, placement: Optional[str] = None, inbuf: Optional[str] = None) -> HandleCommandResult: usage = f"""Usage: ceph orch daemon add -i <json_file> ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>""" if inbuf: if daemon_type or placement: raise OrchestratorValidationError(usage) spec = ServiceSpec.from_json(yaml.safe_load(inbuf)) else: spec = PlacementSpec.from_string(placement) assert daemon_type spec = ServiceSpec(daemon_type, placement=spec) daemon_type = spec.service_type if daemon_type == 'mon': completion = self.add_mon(spec) elif daemon_type == 'mgr': completion = self.add_mgr(spec) elif daemon_type == 'rbd-mirror': completion = self.add_rbd_mirror(spec) elif daemon_type == 'crash': completion = self.add_crash(spec) elif daemon_type == 'alertmanager': completion = self.add_alertmanager(spec) elif daemon_type == 'grafana': completion = self.add_grafana(spec) elif daemon_type == 'node-exporter': completion = self.add_node_exporter(spec) elif daemon_type == 'prometheus': completion = self.add_prometheus(spec) elif daemon_type == 'mds': completion = self.add_mds(spec) elif daemon_type == 'rgw': completion = self.add_rgw(spec) elif daemon_type == 'nfs': completion = self.add_nfs(spec) elif daemon_type == 'iscsi': completion = self.add_iscsi(spec) else: raise OrchestratorValidationError( f'unknown daemon type `{daemon_type}`') self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def load(self): # type: () -> None for k, v in self.mgr.get_store_prefix(SPEC_STORE_PREFIX).items(): service_name = k[len(SPEC_STORE_PREFIX):] try: j = cast(Dict[str, dict], json.loads(v)) if ((self.mgr.migration_current or 0) < 3 and j['spec'].get('service_type') == 'nfs'): self.mgr.log.debug(f'found legacy nfs spec {j}') queue_migrate_nfs_spec(self.mgr, j) spec = ServiceSpec.from_json(j['spec']) created = str_to_datetime(cast(str, j['created'])) self._specs[service_name] = spec self.spec_created[service_name] = created if 'deleted' in j: deleted = str_to_datetime(cast(str, j['deleted'])) self.spec_deleted[service_name] = deleted if 'rank_map' in j and isinstance(j['rank_map'], dict): self._rank_maps[service_name] = {} for rank_str, m in j['rank_map'].items(): try: rank = int(rank_str) except ValueError: logger.exception( f"failed to parse rank in {j['rank_map']}") continue if isinstance(m, dict): self._rank_maps[service_name][rank] = {} for gen_str, name in m.items(): try: gen = int(gen_str) except ValueError: logger.exception( f"failed to parse gen in {j['rank_map']}" ) continue if isinstance(name, str) or m is None: self._rank_maps[service_name][rank][ gen] = name self.mgr.log.debug('SpecStore: loaded spec for %s' % (service_name)) except Exception as e: self.mgr.log.warning('unable to load spec for %s: %s' % (service_name, e)) pass
def load(self): # type: () -> None for k, v in self.mgr.get_store_prefix(SPEC_STORE_PREFIX).items(): service_name = k[len(SPEC_STORE_PREFIX):] try: j = cast(Dict[str, dict], json.loads(v)) spec = ServiceSpec.from_json(j['spec']) created = str_to_datetime(cast(str, j['created'])) self.specs[service_name] = spec self.spec_created[service_name] = created self.mgr.log.debug('SpecStore: loaded spec for %s' % (service_name)) except Exception as e: self.mgr.log.warning('unable to load spec for %s: %s' % (service_name, e)) pass
def test_nfs(self, cephadm_module): with with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test'], count=1) spec = NFSServiceSpec( service_id='name', pool='pool', namespace='namespace', placement=ps) unmanaged_spec = ServiceSpec.from_json(spec.to_json()) unmanaged_spec.unmanaged = True with with_service(cephadm_module, unmanaged_spec): c = cephadm_module.add_nfs(spec) [out] = wait(cephadm_module, c) match_glob(out, "Deployed nfs.name.* on host 'test'") assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')
def load(self): # type: () -> None for k, v in six.iteritems(self.mgr.get_store_prefix(SPEC_STORE_PREFIX)): service_name = k[len(SPEC_STORE_PREFIX):] try: v = json.loads(v) spec = ServiceSpec.from_json(v['spec']) created = datetime.datetime.strptime(v['created'], DATEFMT) self.specs[service_name] = spec self.spec_created[service_name] = created self.mgr.log.debug('SpecStore: loaded spec for %s' % ( service_name)) except Exception as e: self.mgr.log.warning('unable to load spec for %s: %s' % ( service_name, e)) pass
def test_spec_octopus(spec_json): # https://tracker.ceph.com/issues/44934 # Those are real user data from early octopus. # Please do not modify those JSON values. spec = ServiceSpec.from_json(spec_json) # just some verification that we can sill read old octopus specs def convert_to_old_style_json(j): j_c = dict(j.copy()) j_c.pop('service_name', None) if 'spec' in j_c: spec = j_c.pop('spec') j_c.update(spec) j_c.pop('objectstore', None) return j_c assert spec_json == convert_to_old_style_json(spec.to_json())
def test_iscsi(self, cephadm_module): with with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test'], count=1) spec = IscsiServiceSpec( service_id='name', pool='pool', api_user='******', api_password='******', placement=ps) unmanaged_spec = ServiceSpec.from_json(spec.to_json()) unmanaged_spec.unmanaged = True with with_service(cephadm_module, unmanaged_spec): c = cephadm_module.add_iscsi(spec) [out] = wait(cephadm_module, c) match_glob(out, "Deployed iscsi.name.* on host 'test'") assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')
def test_ingress_spec(): yaml_str = """service_type: ingress service_id: rgw.foo placement: hosts: - host1 - host2 - host3 spec: virtual_ip: 192.168.20.1/24 backend_service: rgw.foo frontend_port: 8080 monitor_port: 8081 """ yaml_file = yaml.safe_load(yaml_str) spec = ServiceSpec.from_json(yaml_file) assert spec.service_type == "ingress" assert spec.service_id == "rgw.foo" assert spec.virtual_ip == "192.168.20.1/24" assert spec.frontend_port == 8080 assert spec.monitor_port == 8081
def json_to_generic_spec(spec: dict) -> GenericSpec: if 'service_type' in spec and spec['service_type'] == 'host': return HostSpec.from_json(spec) else: return ServiceSpec.from_json(spec)
def test_service_id_raises_invalid_char(s_type, s_id): with pytest.raises(ServiceSpecValidationError): spec = ServiceSpec.from_json(_get_dict_spec(s_type, s_id)) spec.validate()
def test_service_name(s_type, s_id, s_name): spec = ServiceSpec.from_json(_get_dict_spec(s_type, s_id)) spec.validate() assert spec.service_name() == s_name
def test_service_spec_validation_error(y, error_match): data = yaml.safe_load(y) with pytest.raises(SpecValidationError) as err: specObj = ServiceSpec.from_json(data) assert err.match(error_match)
def apply(self, service_spec: Dict) -> OrchResult[List[str]]: spec = ServiceSpec.from_json(service_spec) return self.api.apply([spec])