def test_event_multiline(): from .._interface import OrchestratorEvent e = OrchestratorEvent(datetime_now(), 'service', 'subject', 'ERROR', 'message') assert OrchestratorEvent.from_json(e.to_json()) == e e = OrchestratorEvent(datetime_now(), 'service', 'subject', 'ERROR', 'multiline\nmessage') assert OrchestratorEvent.from_json(e.to_json()) == e
def update_host_devices_networks(self, host, dls, nets): # type: (str, List[inventory.Device], Dict[str,List[str]]) -> None if (host not in self.devices or host not in self.last_device_change or self.devices_changed(host, dls)): self.last_device_change[host] = datetime_now() self.last_device_update[host] = datetime_now() self.devices[host] = dls self.networks[host] = nets
def agent_config_successfully_delivered( self, daemon_spec: CephadmDaemonDeploySpec) -> None: # agent successfully received new config. Update config/deps assert daemon_spec.service_name == 'agent' self.update_daemon_config_deps(daemon_spec.host, daemon_spec.name(), daemon_spec.deps, datetime_now()) self.agent_timestamp[daemon_spec.host] = datetime_now() self.agent_counter[daemon_spec.host] = 1 self.save_host(daemon_spec.host)
def update_host_devices( self, host: str, dls: List[inventory.Device], ) -> None: if (host not in self.devices or host not in self.last_device_change or self.devices_changed(host, dls)): self.last_device_change[host] = datetime_now() self.last_device_update[host] = datetime_now() self.devices[host] = dls
def receive_agent_metadata(m: CephadmOrchestrator, host: str, ops: List[str] = None) -> None: to_update: Dict[str, Callable[[str, Any], None]] = { 'ls': m._process_ls_output, 'gather-facts': m.cache.update_host_facts, 'list-networks': m.cache.update_host_networks, } if ops: for op in ops: out = CephadmServe(m)._run_cephadm_json(host, cephadmNoImage, op, []) to_update[op](host, out) m.cache.last_daemon_update[host] = datetime_now() m.cache.last_facts_update[host] = datetime_now() m.cache.last_network_update[host] = datetime_now() m.cache.metadata_up_to_date[host] = True
def _agent_down(self, host: str) -> bool: # if we don't have a timestamp, it's likely because of a mgr fail over. # just set the timestamp to now. However, if host was offline before, we # should not allow creating a new timestamp to cause it to be marked online if host not in self.mgr.cache.agent_timestamp: self.mgr.cache.agent_timestamp[host] = datetime_now() if host in self.mgr.offline_hosts: return False # agent hasn't reported in 2.5 * it's refresh rate. Something is likely wrong with it. time_diff = datetime_now() - self.mgr.cache.agent_timestamp[host] if time_diff.total_seconds() > 2.5 * float( self.mgr.agent_refresh_rate): return True return False
def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mon', json.dumps( { 'spec': { 'service_type': 'mon', 'placement': { 'count': 5, } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.set_store( SPEC_STORE_PREFIX + 'mon.wrong', json.dumps( { 'spec': { 'service_type': 'mon', 'service_id': 'wrong', 'placement': { 'hosts': ['host1'] } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.spec_store.load() assert len(cephadm_module.spec_store.all_specs) == 2 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name( ) == 'mon' assert cephadm_module.spec_store.all_specs['mon'].service_name( ) == 'mon' cephadm_module.migration_current = 1 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 2 assert len(cephadm_module.spec_store.all_specs) == 1 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec( service_type='mon', unmanaged=True, placement=PlacementSpec(count=5))
def test_remove_osds(self, cephadm_module): with with_host(cephadm_module, 'test'): CephadmServe(cephadm_module)._refresh_host_daemons('test') c = cephadm_module.list_daemons() wait(cephadm_module, c) c = cephadm_module.remove_daemons(['osd.0']) out = wait(cephadm_module, c) assert out == ["Removed osd.0 from host 'test'"] cephadm_module.to_remove_osds.enqueue( OSD(osd_id=0, replace=False, force=False, hostname='test', fullname='osd.0', process_started_at=datetime_now(), remove_util=cephadm_module.to_remove_osds.rm_util)) cephadm_module.to_remove_osds.process_removal_queue() assert cephadm_module.to_remove_osds == OSDRemovalQueue( cephadm_module) c = cephadm_module.remove_osds_status() out = wait(cephadm_module, c) assert out == []
def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mds', json.dumps({ 'spec': { 'service_type': 'nfs', 'service_id': 'ganesha-foo', 'placement': { 'hosts': ['host1'] }, 'spec': { 'pool': 'mypool', 'namespace': 'foons', }, }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.migration_current = 1 cephadm_module.spec_store.load() ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) assert ls == [['ganesha-foo', 'mypool', 'foons']] cephadm_module.migration.migrate(True) assert cephadm_module.migration_current == 2 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 3
def create_from_spec_one( host: str, drive_selection: DriveSelection) -> Optional[str]: # skip this host if there has been no change in inventory if not self.mgr.cache.osdspec_needs_apply(host, drive_group): self.mgr.log.debug("skipping apply of %s on %s (no change)" % (host, drive_group)) return None # skip this host if we cannot schedule here if self.mgr.inventory.has_label(host, '_no_schedule'): return None cmd = self.driveselection_to_ceph_volume( drive_selection, osd_id_claims.get(host, [])) if not cmd: logger.debug("No data_devices, skipping DriveGroup: {}".format( drive_group.service_id)) return None logger.debug('Applying service osd.%s on host %s...' % (drive_group.service_id, host)) start_ts = datetime_now() env_vars: List[str] = [ f"CEPH_VOLUME_OSDSPEC_AFFINITY={drive_group.service_id}" ] ret_msg = self.create_single_host( drive_group, host, cmd, replace_osd_ids=osd_id_claims.get(host, []), env_vars=env_vars) self.mgr.cache.update_osdspec_last_applied( host, drive_group.service_name(), start_ts) self.mgr.cache.save_host(host) return ret_msg
def load(self): # type: () -> None for k, v in self.mgr.get_store_prefix(AGENT_CACHE_PREFIX).items(): host = k[len(AGENT_CACHE_PREFIX):] if host not in self.mgr.inventory: self.mgr.log.warning( 'removing stray AgentCache record for agent on %s' % (host)) self.mgr.set_store(k, None) try: j = json.loads(v) self.agent_config_deps[host] = {} conf_deps = j.get('agent_config_deps', {}) if conf_deps: conf_deps['last_config'] = str_to_datetime( conf_deps['last_config']) self.agent_config_deps[host] = conf_deps self.agent_counter[host] = int(j.get('agent_counter', 1)) self.agent_timestamp[host] = str_to_datetime( j.get('agent_timestamp', datetime_to_str(datetime_now()))) self.agent_keys[host] = str(j.get('agent_keys', '')) agent_port = int(j.get('agent_ports', 0)) if agent_port: self.agent_ports[host] = agent_port except Exception as e: self.mgr.log.warning( 'unable to load cached state for agent on host %s: %s' % (host, e)) pass
def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id) assert wait( cephadm_module, c ) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'" for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id) assert wait( cephadm_module, c ) == F"Scheduled to {what} rgw.{daemon_id} on host 'test'" # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module._store['_ceph_get/mon_map'] = { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', } cephadm_module.notify('mon_map', None) CephadmServe(cephadm_module)._check_daemons()
def update_host_networks( self, host: str, nets: Dict[str, Dict[str, List[str]]] ) -> None: self.networks[host] = nets self.last_network_update[host] = datetime_now()
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set( '_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {'modules': ['dashboard']}) with mock.patch( "cephadm.module.CephadmOrchestrator.mon_command" ) as _mon_cmd: CephadmServe(cephadm_module)._check_daemons() _mon_cmd.assert_any_call( { 'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000' }, None)
def test_store(self): mgr = FakeMgr(['a', 'b', 'c'], ['a', 'b', 'c'], [], {}) tps = TunedProfileStore(mgr) save_str_p1 = 'tuned_profiles: ' + json.dumps( {'p1': self.tspec1.to_json()}) tspec1_updated = self.tspec1.copy() tspec1_updated.settings.update({'new-setting': 'new-value'}) save_str_p1_updated = 'tuned_profiles: ' + json.dumps( {'p1': tspec1_updated.to_json()}) save_str_p1_updated_p2 = 'tuned_profiles: ' + \ json.dumps({'p1': tspec1_updated.to_json(), 'p2': self.tspec2.to_json()}) tspec2_updated = self.tspec2.copy() tspec2_updated.settings.pop('something') save_str_p1_updated_p2_updated = 'tuned_profiles: ' + \ json.dumps({'p1': tspec1_updated.to_json(), 'p2': tspec2_updated.to_json()}) save_str_p2_updated = 'tuned_profiles: ' + json.dumps( {'p2': tspec2_updated.to_json()}) with pytest.raises(SaveError) as e: tps.add_profile(self.tspec1) assert str(e.value) == save_str_p1 assert 'p1' in tps with pytest.raises(SaveError) as e: tps.add_setting('p1', 'new-setting', 'new-value') assert str(e.value) == save_str_p1_updated assert 'new-setting' in tps.list_profiles()[0].settings with pytest.raises(SaveError) as e: tps.add_profile(self.tspec2) assert str(e.value) == save_str_p1_updated_p2 assert 'p2' in tps assert 'something' in tps.list_profiles()[1].settings with pytest.raises(SaveError) as e: tps.rm_setting('p2', 'something') assert 'something' not in tps.list_profiles()[1].settings assert str(e.value) == save_str_p1_updated_p2_updated with pytest.raises(SaveError) as e: tps.rm_profile('p1') assert str(e.value) == save_str_p2_updated assert 'p1' not in tps assert 'p2' in tps assert len(tps.list_profiles()) == 1 assert tps.list_profiles()[0].profile_name == 'p2' cur_last_updated = tps.last_updated('p2') new_last_updated = datetime_now() assert cur_last_updated != new_last_updated tps.set_last_updated('p2', new_last_updated) assert tps.last_updated('p2') == new_last_updated # check FakeMgr get_store func to see what is expected to be found in Key Store here tps.load() assert 'x' in tps assert 'y' in tps assert [p for p in tps.list_profiles() if p.profile_name == 'x'][0].settings == { 'x': 'x' } assert [p for p in tps.list_profiles() if p.profile_name == 'y'][0].settings == { 'y': 'y' }
def _refresh_host_daemons(self, host: str) -> Optional[str]: try: out, err, code = self._run_cephadm(host, 'mon', 'ls', [], no_fsid=True) if code: return 'host %s cephadm ls returned %d: %s' % (host, code, err) ls = json.loads(''.join(out)) except ValueError: msg = 'host %s scrape failed: Cannot decode JSON' % host self.log.exception('%s: \'%s\'' % (msg, ''.join(out))) return msg except Exception as e: return 'host %s scrape failed: %s' % (host, e) dm = {} for d in ls: if not d['style'].startswith('cephadm'): continue if d['fsid'] != self.mgr._cluster_fsid: continue if '.' not in d['name']: continue sd = orchestrator.DaemonDescription() sd.last_refresh = datetime_now() for k in [ 'created', 'started', 'last_configured', 'last_deployed' ]: v = d.get(k, None) if v: setattr(sd, k, str_to_datetime(d[k])) sd.daemon_type = d['name'].split('.')[0] sd.daemon_id = '.'.join(d['name'].split('.')[1:]) sd.hostname = host sd.container_id = d.get('container_id') if sd.container_id: # shorten the hash sd.container_id = sd.container_id[0:12] sd.container_image_name = d.get('container_image_name') sd.container_image_id = d.get('container_image_id') sd.version = d.get('version') if sd.daemon_type == 'osd': sd.osdspec_affinity = self.mgr.osd_service.get_osdspec_affinity( sd.daemon_id) if 'state' in d: sd.status_desc = d['state'] sd.status = { 'running': 1, 'stopped': 0, 'error': -1, 'unknown': -1, }[d['state']] else: sd.status_desc = 'unknown' sd.status = None dm[sd.name()] = sd self.log.debug('Refreshed host %s daemons (%d)' % (host, len(dm))) self.mgr.cache.update_host_daemons(host, dm) self.mgr.cache.save_host(host) return None
def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: with mock.patch('ceph_module.BaseMgrModule._ceph_send_command' ) as _ceph_send_command: _ceph_send_command.side_effect = Exception("myerror") # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set( '_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) CephadmServe(cephadm_module)._check_daemons() evs = [ e.message for e in cephadm_module.events.get_for_daemon( f'rgw.{daemon_id}') ] assert 'myerror' in ''.join(evs)
def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1', refresh_hosts=False): with with_host(cephadm_module, 'host2', refresh_hosts=False): # emulate the old scheduler: c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."): CephadmServe(cephadm_module)._apply_all_services() cephadm_module.migration_current = 0 cephadm_module.migration.migrate() # assert we need all daemons. assert cephadm_module.migration_current == 0 CephadmServe(cephadm_module)._refresh_hosts_and_daemons() receive_agent_metadata_all_hosts(cephadm_module) cephadm_module.migration.migrate() CephadmServe(cephadm_module)._apply_all_services() out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} assert out == {'host1', 'host2'} c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' # Sorry, for this hack, but I need to make sure, Migration thinks, # we have updated all daemons already. cephadm_module.cache.last_daemon_update['host1'] = datetime_now() cephadm_module.cache.last_daemon_update['host2'] = datetime_now() cephadm_module.migration_current = 0 cephadm_module.migration.migrate() assert cephadm_module.migration_current >= 2 out = [o.spec.placement for o in wait( cephadm_module, cephadm_module.describe_service())] assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec( hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
def from_orch_error(self, e: OrchestratorError) -> None: if e.event_subject is not None: self.add(OrchestratorEvent( datetime_now(), e.event_subject[0], e.event_subject[1], "ERROR", str(e) ))
def host_needs_autotune_memory(self, host): # type: (str) -> bool if host in self.mgr.offline_hosts: logger.debug(f'Host "{host}" marked as offline. Skipping autotune') return False cutoff = datetime_now() - datetime.timedelta( seconds=self.mgr.autotune_interval) if host not in self.last_autotune or self.last_autotune[host] < cutoff: return True return False
def host_needs_facts_refresh(self, host): # type: (str) -> bool if host in self.mgr.offline_hosts: logger.debug(f'Host "{host}" marked as offline. Skipping gather facts refresh') return False cutoff = datetime_now() - datetime.timedelta( seconds=self.mgr.facts_cache_timeout) if host not in self.last_facts_update or self.last_facts_update[host] < cutoff: return True return False
def handle_metadata(self, data: Dict[str, Any]) -> None: try: host = data['host'] self.mgr.cache.agent_ports[host] = int(data['port']) if host not in self.mgr.cache.agent_counter: self.mgr.log.debug( f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' ) self.mgr.cache.agent_counter[host] = 1 self.mgr.agent_helpers._request_agent_acks({host}) return # update timestamp of most recent agent update self.mgr.cache.agent_timestamp[host] = datetime_now() up_to_date = False int_ack = int(data['ack']) if int_ack == self.mgr.cache.agent_counter[host]: up_to_date = True else: # we got old counter value with message, inform agent of new timestamp if not self.mgr.cache.messaging_agent(host): self.mgr.agent_helpers._request_agent_acks({host}) self.mgr.log.info( f'Received old metadata from agent on host {host}. Requested up-to-date metadata.' ) if 'ls' in data and data['ls']: self.mgr._process_ls_output(host, data['ls']) if 'networks' in data and data['networks']: self.mgr.cache.update_host_networks(host, data['networks']) if 'facts' in data and data['facts']: self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) if 'volume' in data and data['volume']: ret = Devices.from_json(json.loads(data['volume'])) self.mgr.cache.update_host_devices(host, ret.devices) if up_to_date: was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date( ) self.mgr.cache.metadata_up_to_date[host] = True if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date( ): self.mgr.log.info( 'New metadata from agent has made all hosts up to date. Kicking serve loop' ) self.mgr._kick_serve_loop() self.mgr.log.info( f'Received up-to-date metadata from agent on host {host}.') except Exception as e: self.mgr.log.warning( f'Failed to update metadata with metadata from agent on host {host}: {e}' )
def _refresh_host_daemons(self, host: str) -> Optional[str]: try: ls = self._run_cephadm_json(host, 'mon', 'ls', [], no_fsid=True) except OrchestratorError as e: return str(e) dm = {} for d in ls: if not d['style'].startswith('cephadm'): continue if d['fsid'] != self.mgr._cluster_fsid: continue if '.' not in d['name']: continue sd = orchestrator.DaemonDescription() sd.last_refresh = datetime_now() for k in ['created', 'started', 'last_configured', 'last_deployed']: v = d.get(k, None) if v: setattr(sd, k, str_to_datetime(d[k])) sd.daemon_type = d['name'].split('.')[0] sd.daemon_id = '.'.join(d['name'].split('.')[1:]) sd.hostname = host sd.container_id = d.get('container_id') if sd.container_id: # shorten the hash sd.container_id = sd.container_id[0:12] sd.container_image_name = d.get('container_image_name') sd.container_image_id = d.get('container_image_id') sd.container_image_digests = d.get('container_image_digests') sd.memory_usage = d.get('memory_usage') sd.memory_request = d.get('memory_request') sd.memory_limit = d.get('memory_limit') sd._service_name = d.get('service_name') sd.version = d.get('version') sd.ports = d.get('ports') sd.ip = d.get('ip') if sd.daemon_type == 'osd': sd.osdspec_affinity = self.mgr.osd_service.get_osdspec_affinity(sd.daemon_id) if 'state' in d: sd.status_desc = d['state'] sd.status = { 'running': DaemonDescriptionStatus.running, 'stopped': DaemonDescriptionStatus.stopped, 'error': DaemonDescriptionStatus.error, 'unknown': DaemonDescriptionStatus.error, }[d['state']] else: sd.status_desc = 'unknown' sd.status = None dm[sd.name()] = sd self.log.debug('Refreshed host %s daemons (%d)' % (host, len(dm))) self.mgr.cache.update_host_daemons(host, dm) self.mgr.cache.save_host(host) return None
def _agent_down(self, host: str) -> bool: # if host is draining or drained (has _no_schedule label) there should not # be an agent deployed there and therefore we should return False if host not in [h.hostname for h in self.mgr.cache.get_non_draining_hosts()]: return False # if we haven't deployed an agent on the host yet, don't say an agent is down if not self.mgr.cache.get_daemons_by_type('agent', host=host): return False # if we don't have a timestamp, it's likely because of a mgr fail over. # just set the timestamp to now. However, if host was offline before, we # should not allow creating a new timestamp to cause it to be marked online if host not in self.mgr.cache.agent_timestamp: self.mgr.cache.agent_timestamp[host] = datetime_now() if host in self.mgr.offline_hosts: return False # agent hasn't reported in 2.5 * it's refresh rate. Something is likely wrong with it. time_diff = datetime_now() - self.mgr.cache.agent_timestamp[host] if time_diff.total_seconds() > 2.5 * float(self.mgr.agent_refresh_rate): return True return False
def list(_) -> Optional[Dict]: # pylint: disable=no-self-argument value: str = self.get_option(self.NAME) if not value: return None data: MotdData = MotdData(**json.loads(value)) # Check if the MOTD has been expired. if data.expires: expires = str_to_datetime(data.expires) if expires < datetime_now(): return None return data._asdict()
def rm(self, service_name: str) -> bool: if service_name not in self._specs: return False if self._specs[service_name].preview_only: self.finally_rm(service_name) return True self.spec_deleted[service_name] = datetime_now() self.save(self._specs[service_name], update_create=False) return True
def host_needs_device_refresh(self, host): # type: (str) -> bool if host in self.mgr.offline_hosts: logger.debug(f'Host "{host}" marked as offline. Skipping device refresh') return False if host in self.device_refresh_queue: self.device_refresh_queue.remove(host) return True cutoff = datetime_now() - datetime.timedelta( seconds=self.mgr.device_cache_timeout) if host not in self.last_device_update or self.last_device_update[host] < cutoff: return True return False
def save( self, spec: ServiceSpec, update_create: bool = True, ) -> None: name = spec.service_name() if spec.preview_only: self.spec_preview[name] = spec return None self._specs[name] = spec if update_create: self.spec_created[name] = datetime_now() self._save(name)
def save(self, spec): # type: (ServiceSpec) -> None if spec.preview_only: self.spec_preview[spec.service_name()] = spec return None self.specs[spec.service_name()] = spec self.spec_created[spec.service_name()] = datetime_now() self.mgr.set_store( SPEC_STORE_PREFIX + spec.service_name(), json.dumps({ 'spec': spec.to_json(), 'created': datetime_to_str(self.spec_created[spec.service_name()]), }, sort_keys=True), ) self.mgr.events.for_service(spec, OrchestratorEvent.INFO, 'service was created')