def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mon', json.dumps( { 'spec': { 'service_type': 'mon', 'placement': { 'count': 5, } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.set_store( SPEC_STORE_PREFIX + 'mon.wrong', json.dumps( { 'spec': { 'service_type': 'mon', 'service_id': 'wrong', 'placement': { 'hosts': ['host1'] } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.spec_store.load() assert len(cephadm_module.spec_store.all_specs) == 2 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name( ) == 'mon' assert cephadm_module.spec_store.all_specs['mon'].service_name( ) == 'mon' cephadm_module.migration_current = 1 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 2 assert len(cephadm_module.spec_store.all_specs) == 1 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec( service_type='mon', unmanaged=True, placement=PlacementSpec(count=5))
def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id) assert wait( cephadm_module, c ) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'" for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id) assert wait( cephadm_module, c ) == F"Scheduled to {what} rgw.{daemon_id} on host 'test'" # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module._store['_ceph_get/mon_map'] = { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', } cephadm_module.notify('mon_map', None) CephadmServe(cephadm_module)._check_daemons()
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def load(self): # type: () -> None for k, v in self.mgr.get_store_prefix(AGENT_CACHE_PREFIX).items(): host = k[len(AGENT_CACHE_PREFIX):] if host not in self.mgr.inventory: self.mgr.log.warning( 'removing stray AgentCache record for agent on %s' % (host)) self.mgr.set_store(k, None) try: j = json.loads(v) self.agent_config_deps[host] = {} conf_deps = j.get('agent_config_deps', {}) if conf_deps: conf_deps['last_config'] = str_to_datetime( conf_deps['last_config']) self.agent_config_deps[host] = conf_deps self.agent_counter[host] = int(j.get('agent_counter', 1)) self.agent_timestamp[host] = str_to_datetime( j.get('agent_timestamp', datetime_to_str(datetime_now()))) self.agent_keys[host] = str(j.get('agent_keys', '')) agent_port = int(j.get('agent_ports', 0)) if agent_port: self.agent_ports[host] = agent_port except Exception as e: self.mgr.log.warning( 'unable to load cached state for agent on host %s: %s' % (host, e)) pass
def _save(self, name: str) -> None: data: Dict[str, Any] = { 'spec': self._specs[name].to_json(), 'created': datetime_to_str(self.spec_created[name]), } if name in self._rank_maps: data['rank_map'] = self._rank_maps[name] if name in self.spec_deleted: data['deleted'] = datetime_to_str(self.spec_deleted[name]) self.mgr.set_store( SPEC_STORE_PREFIX + name, json.dumps(data, sort_keys=True), ) self.mgr.events.for_service(self._specs[name], OrchestratorEvent.INFO, 'service was created')
def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set( '_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {'modules': ['dashboard']}) with mock.patch( "cephadm.module.CephadmOrchestrator.mon_command" ) as _mon_cmd: CephadmServe(cephadm_module)._check_daemons() _mon_cmd.assert_any_call( { 'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000' }, None)
def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mds', json.dumps({ 'spec': { 'service_type': 'nfs', 'service_id': 'ganesha-foo', 'placement': { 'hosts': ['host1'] }, 'spec': { 'pool': 'mypool', 'namespace': 'foons', }, }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.migration_current = 1 cephadm_module.spec_store.load() ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) assert ls == [['ganesha-foo', 'mypool', 'foons']] cephadm_module.migration.migrate(True) assert cephadm_module.migration_current == 2 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 3
def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: with mock.patch('ceph_module.BaseMgrModule._ceph_send_command' ) as _ceph_send_command: _ceph_send_command.side_effect = Exception("myerror") # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set( '_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) CephadmServe(cephadm_module)._check_daemons() evs = [ e.message for e in cephadm_module.events.get_for_daemon( f'rgw.{daemon_id}') ] assert 'myerror' in ''.join(evs)
def to_json(self) -> dict: out: Dict[str, Any] = OrderedDict() out['daemon_type'] = self.daemon_type out['daemon_id'] = self.daemon_id out['hostname'] = self.hostname out['container_id'] = self.container_id out['container_image_id'] = self.container_image_id out['container_image_name'] = self.container_image_name out['container_image_digests'] = self.container_image_digests out['memory_usage'] = self.memory_usage out['memory_request'] = self.memory_request out['memory_limit'] = self.memory_limit out['version'] = self.version out['status'] = self.status.value if self.status is not None else None out['status_desc'] = self.status_desc if self.daemon_type == 'osd': out['osdspec_affinity'] = self.osdspec_affinity out['is_active'] = self.is_active out['ports'] = self.ports out['ip'] = self.ip for k in [ 'last_refresh', 'created', 'started', 'last_deployed', 'last_configured' ]: if getattr(self, k): out[k] = datetime_to_str(getattr(self, k)) if self.events: out['events'] = [e.to_json() for e in self.events] empty = [k for k, v in out.items() if v is None] for e in empty: del out[e] return out
def to_json(self): # type: () -> dict return { k: (getattr(self, k) if k != 'created' or not isinstance(getattr(self, k), datetime.datetime) else datetime_to_str(getattr(self, k))) for k in self.report_fields }
def save_agent(self, host: str) -> None: j: Dict[str, Any] = {} if host in self.agent_config_deps: j['agent_config_deps'] = { 'deps': self.agent_config_deps[host].get('deps', []), 'last_config': datetime_to_str(self.agent_config_deps[host]['last_config']), } if host in self.agent_counter: j['agent_counter'] = self.agent_counter[host] if host in self.agent_keys: j['agent_keys'] = self.agent_keys[host] if host in self.agent_ports: j['agent_ports'] = self.agent_ports[host] if host in self.agent_timestamp: j['agent_timestamp'] = datetime_to_str(self.agent_timestamp[host]) self.mgr.set_store(AGENT_CACHE_PREFIX + host, json.dumps(j))
def save_host(self, host: str) -> None: j: Dict[str, Any] = { 'daemons': {}, 'devices': [], 'osdspec_previews': [], 'osdspec_last_applied': {}, 'daemon_config_deps': {}, } if host in self.last_daemon_update: j['last_daemon_update'] = datetime_to_str( self.last_daemon_update[host]) if host in self.last_device_update: j['last_device_update'] = datetime_to_str( self.last_device_update[host]) if host in self.last_network_update: j['last_network_update'] = datetime_to_str( self.last_network_update[host]) if host in self.last_device_change: j['last_device_change'] = datetime_to_str( self.last_device_change[host]) if host in self.daemons: for name, dd in self.daemons[host].items(): j['daemons'][name] = dd.to_json() if host in self.devices: for d in self.devices[host]: j['devices'].append(d.to_json()) if host in self.networks: j['networks_and_interfaces'] = self.networks[host] if host in self.daemon_config_deps: for name, depi in self.daemon_config_deps[host].items(): j['daemon_config_deps'][name] = { 'deps': depi.get('deps', []), 'last_config': datetime_to_str(depi['last_config']), } if host in self.osdspec_previews and self.osdspec_previews[host]: j['osdspec_previews'] = self.osdspec_previews[host] if host in self.osdspec_last_applied: for name, ts in self.osdspec_last_applied[host].items(): j['osdspec_last_applied'][name] = datetime_to_str(ts) if host in self.last_host_check: j['last_host_check'] = datetime_to_str(self.last_host_check[host]) if host in self.last_client_files: j['last_client_files'] = self.last_client_files[host] if host in self.scheduled_daemon_actions: j['scheduled_daemon_actions'] = self.scheduled_daemon_actions[host] if host in self.agent_counter: j['agent_counter'] = self.agent_counter[host] if host in self.agent_keys: j['agent_keys'] = self.agent_keys[host] if host in self.agent_ports: j['agent_ports'] = self.agent_ports[host] self.mgr.set_store(HOST_CACHE_PREFIX + host, json.dumps(j))
def save_host(self, host: str) -> None: j: Dict[str, Any] = { 'daemons': {}, 'devices': [], 'osdspec_previews': [], 'daemon_config_deps': {}, } if host in self.last_daemon_update: j['last_daemon_update'] = datetime_to_str( self.last_daemon_update[host]) if host in self.last_device_update: j['last_device_update'] = datetime_to_str( self.last_device_update[host]) if host in self.daemons: for name, dd in self.daemons[host].items(): j['daemons'][name] = dd.to_json() if host in self.devices: for d in self.devices[host]: j['devices'].append(d.to_json()) if host in self.networks: j['networks'] = self.networks[host] if host in self.daemon_config_deps: for name, depi in self.daemon_config_deps[host].items(): j['daemon_config_deps'][name] = { 'deps': depi.get('deps', []), 'last_config': datetime_to_str(depi['last_config']), } if host in self.osdspec_previews and self.osdspec_previews[host]: j['osdspec_previews'] = self.osdspec_previews[host] if host in self.last_host_check: j['last_host_check'] = datetime_to_str(self.last_host_check[host]) if host in self.last_etc_ceph_ceph_conf: j['last_etc_ceph_ceph_conf'] = datetime_to_str( self.last_etc_ceph_ceph_conf[host]) if host in self.scheduled_daemon_actions: j['scheduled_daemon_actions'] = self.scheduled_daemon_actions[host] self.mgr.set_store(HOST_CACHE_PREFIX + host, json.dumps(j))
def save(self, spec: ServiceSpec, update_create: bool = True) -> None: name = spec.service_name() if spec.preview_only: self.spec_preview[name] = spec return None self._specs[name] = spec if update_create: self.spec_created[name] = datetime_now() data = { 'spec': spec.to_json(), 'created': datetime_to_str(self.spec_created[name]), } if name in self.spec_deleted: data['deleted'] = datetime_to_str(self.spec_deleted[name]) self.mgr.set_store( SPEC_STORE_PREFIX + name, json.dumps(data, sort_keys=True), ) self.mgr.events.for_service(spec, OrchestratorEvent.INFO, 'service was created')
def save(self, spec): # type: (ServiceSpec) -> None if spec.preview_only: self.spec_preview[spec.service_name()] = spec return None self.specs[spec.service_name()] = spec self.spec_created[spec.service_name()] = datetime_now() self.mgr.set_store( SPEC_STORE_PREFIX + spec.service_name(), json.dumps({ 'spec': spec.to_json(), 'created': datetime_to_str(self.spec_created[spec.service_name()]), }, sort_keys=True), ) self.mgr.events.for_service(spec, OrchestratorEvent.INFO, 'service was created')
def test_etc_ceph(self, _check, _get_connection, cephadm_module): _get_connection.return_value = mock.Mock(), mock.Mock() _check.return_value = '{}', '', 0 assert cephadm_module.manage_etc_ceph_ceph_conf is False with with_host(cephadm_module, 'test'): assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf( 'test') with with_host(cephadm_module, 'test'): cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True) cephadm_module.config_notify() assert cephadm_module.manage_etc_ceph_ceph_conf == True CephadmServe(cephadm_module)._refresh_hosts_and_daemons() _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'], stdin=b'') assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf( 'test') # set extra config and expect that we deploy another ceph.conf cephadm_module._set_extra_ceph_conf('[mon]\nk=v') CephadmServe(cephadm_module)._refresh_hosts_and_daemons() _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'], stdin=b'\n\n[mon]\nk=v\n') # reload cephadm_module.cache.last_etc_ceph_ceph_conf = {} cephadm_module.cache.load() assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf( 'test') # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set( '_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', mock.MagicMock()) assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf( 'test') cephadm_module.cache.last_etc_ceph_ceph_conf = {} cephadm_module.cache.load() assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf( 'test')
def _set(_, severity: MotdSeverity, expires: str, message: str): if expires != '0': delta = parse_timedelta(expires) if not delta: return 1, '', 'Invalid expires format, use "2h", "10d" or "30s"' expires = datetime_to_str(datetime_now() + delta) else: expires = '' value: str = json.dumps({ 'message': message, 'md5': hashlib.md5(message.encode()).hexdigest(), 'severity': severity.value, 'expires': expires }) self.set_option(self.NAME, value) return 0, 'Message of the day has been set.', ''
def to_json(self) -> dict: out: Dict[str, Any] = dict() out['osd_id'] = self.osd_id out['started'] = self.started out['draining'] = self.draining out['stopped'] = self.stopped out['replace'] = self.replace out['force'] = self.force out['hostname'] = self.hostname # type: ignore for k in ['drain_started_at', 'drain_stopped_at', 'drain_done_at', 'process_started_at']: if getattr(self, k): out[k] = datetime_to_str(getattr(self, k)) else: out[k] = getattr(self, k) return out
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch('cephadm.module.CephadmOrchestrator.get_module_option_ex', get_module_option_ex),\ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ mock.patch('cephadm.agent.CherryPyThread.run'), \ mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'), \ mock.patch('cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles'): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) if '_ceph_get/mgr_map' not in store: m.mock_store_set( '_ceph_get', 'mgr_map', { 'services': { 'dashboard': 'http://[::1]:8080', 'prometheus': 'http://[::1]:8081' }, 'modules': ['dashboard', 'prometheus'], }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" m.event_loop = MockEventLoopThread() m.tkey = NamedTemporaryFile(prefix='test-cephadm-identity-') yield m
def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({ 'spec': { 'service_type': 'mds', 'placement': { 'hosts': ['host1'] } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.spec_store.load() # there is nothing to migrate, as the spec is gone now. assert len(cephadm_module.spec_store.all_specs) == 0
def to_json(self) -> OrderedDict: out = self.spec.to_json() status = { 'container_image_id': self.container_image_id, 'container_image_name': self.container_image_name, 'rados_config_location': self.rados_config_location, 'service_url': self.service_url, 'size': self.size, 'running': self.running, 'last_refresh': self.last_refresh, 'created': self.created, } for k in ['last_refresh', 'created']: if getattr(self, k): status[k] = datetime_to_str(getattr(self, k)) status = {k: v for (k, v) in status.items() if v is not None} out['status'] = status if self.events: out['events'] = [e.to_json() for e in self.events] return out
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ mock.patch('cephadm.agent.CherryPyThread.run'): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" m.event_loop = MockEventLoopThread() m.tkey = NamedTemporaryFile(prefix='test-cephadm-identity-') yield m
def to_json(self) -> str: # Make a long list of events readable. created = datetime_to_str(self.created) return f'{created} {self.kind_subject()} [{self.level}] "{self.message}"'
def test_datetime_to_str_1(): dt = datetime.datetime.now() assert type(datetime_to_str(dt)) is str
def test_datetime_to_str_3(): dt = datetime.datetime.strptime('2020-11-02T04:40:12.748172-0800', '%Y-%m-%dT%H:%M:%S.%f%z') assert datetime_to_str(dt) == '2020-11-02T12:40:12.748172Z'
def test_datetime_to_str_2(): dt = datetime.datetime.strptime('2019-04-24T17:06:53.039991', '%Y-%m-%dT%H:%M:%S.%f') assert datetime_to_str(dt) == '2019-04-24T17:06:53.039991Z'
def test_datetime_to_str_2(): # note: tz isn't specified in the string, so explicitly store this as UTC dt = datetime.datetime.strptime( '2019-04-24T17:06:53.039991', '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=datetime.timezone.utc) assert datetime_to_str(dt) == '2019-04-24T17:06:53.039991Z'