def on_version(self, reported_by, sync_type, new_version): """ Notify me that a particular version of a particular map exists. I may choose to initiate RPC to retrieve the map """ log.debug("SyncObjects.on_version %s/%s/%s" % (reported_by, sync_type.str, new_version)) old_version = self.get_version(sync_type) if sync_type.cmp(new_version, old_version) > 0: known_version = self._known_versions[sync_type] if sync_type.cmp(new_version, known_version) > 0: # We are out of date: request an up to date copy log.info("Advanced known version %s/%s %s->%s" % ( self._cluster_name, sync_type.str, known_version, new_version)) self._known_versions[sync_type] = new_version else: log.info("on_version: %s is newer than %s" % (new_version, old_version)) # If we already have a request out for this type of map, then consider # cancelling it if we've already waited for a while. if self._fetching_at[sync_type] is not None: if now() - self._fetching_at[sync_type] < self.FETCH_TIMEOUT: log.info("Fetch already underway for %s" % sync_type.str) return else: log.warn("Abandoning fetch for %s started at %s" % ( sync_type.str, self._fetching_at[sync_type])) log.info("on_version: fetching %s/%s from %s, currently got %s, know %s" % ( sync_type, new_version, reported_by, old_version, known_version )) self.fetch(reported_by, sync_type)
def on_fetch_complete(self, minion_id, sync_type, version, data): """ :return A SyncObject if this version was new to us, else None """ log.debug("SyncObjects.on_fetch_complete %s/%s/%s" % (minion_id, sync_type.str, version)) self._fetching_at[sync_type] = None # A fetch might give us a newer version than we knew we had asked for if sync_type.cmp(version, self._known_versions[sync_type]) > 0: self._known_versions[sync_type] = version # Don't store this if we already got something newer if sync_type.cmp(version, self.get_version(sync_type)) <= 0: log.warn("Ignoring outdated update %s/%s from %s" % (sync_type.str, version, minion_id)) new_object = None else: log.info("Got new version %s/%s" % (sync_type.str, version)) new_object = self.set_map(sync_type, version, data) # This might not be the latest: if it's not, send out another fetch # right away if sync_type.cmp(self._known_versions[sync_type], version) > 0: self.fetch(minion_id, sync_type) return new_object
def fetch(self, minion_id, sync_type): log.debug("SyncObjects.fetch: %s/%s" % (minion_id, sync_type)) if minion_id is None: # We're probably being replayed to from the database log.warn("SyncObjects.fetch called with minion_id=None") return self._fetching_at[sync_type] = now() client = LocalClient(config.get('cthulhu', 'salt_config_path')) # TODO clean up unused 'since' argument pub_data = client.run_job( minion_id, 'ceph.get_cluster_object', condition_kwarg( [], { 'cluster_name': self._cluster_name, 'sync_type': sync_type.str, 'since': None })) if not pub_data: log.error("Failed to start fetch job %s/%s" % (minion_id, sync_type)) # Don't throw an exception because if a fetch fails we should always else: log.debug("SyncObjects.fetch: jid=%s minions=%s" % (pub_data['jid'], pub_data['minions']))
def _get_fqdn(self, fsid, service_type, service_id): """ Resolve a service to a FQDN if possible, else return None """ server = self._manager.servers.get_by_service(ServiceId(fsid, service_type, str(service_id))) if server is None: log.warn("No server found for service %s %s" % (service_type, service_id)) return server.fqdn if server else None
def fetch(self, minion_id, sync_type): log.debug("SyncObjects.fetch: %s/%s" % (minion_id, sync_type)) if minion_id is None: # We're probably being replayed to from the database log.warn("SyncObjects.fetch called with minion_id=None") return self._fetching_at[sync_type] = now() client = LocalClient(config.get('cthulhu', 'salt_config_path')) # TODO clean up unused 'since' argument pub_data = client.run_job(minion_id, 'ceph.get_cluster_object', condition_kwarg([], {'cluster_name': self._cluster_name, 'sync_type': sync_type.str, 'since': None})) if not pub_data: log.error("Failed to start fetch job %s/%s" % (minion_id, sync_type)) # Don't throw an exception because if a fetch fails we should always else: log.debug("SyncObjects.fetch: jid=%s minions=%s" % (pub_data['jid'], pub_data['minions']))
def on_sync_object(self, minion_id, data): if minion_id != self._favorite_mon: log.debug("Ignoring map from %s, it is not my favourite (%s)" % (minion_id, self._favorite_mon)) assert data['fsid'] == self.fsid sync_object = data['data'] sync_type = SYNC_OBJECT_STR_TYPE[data['type']] new_object = self.inject_sync_object(minion_id, data['type'], data['version'], sync_object) if new_object: self._requests.on_map(self.fsid, sync_type, new_object) self._persister.update_sync_object( self.fsid, self.name, sync_type.str, new_object.version if isinstance(new_object.version, int) else None, now(), sync_object) else: log.warn("ClusterMonitor.on_sync_object: stale object received from %s" % minion_id)
def fetch(self, minion_id, sync_type): log.debug("SyncObjects.fetch: %s/%s" % (minion_id, sync_type)) if minion_id is None: # We're probably being replayed to from the database log.warn("SyncObjects.fetch called with minion_id=None") return self._fetching_at[sync_type] = now() try: # TODO clean up unused 'since' argument jid = remote.run_job(minion_id, 'ceph.get_cluster_object', {'cluster_name': self._cluster_name, 'sync_type': sync_type.str, 'since': None}) except Unavailable: # Don't throw an exception because if a fetch fails we should end up # issuing another on next heartbeat log.error("Failed to start fetch job %s/%s" % (minion_id, sync_type)) else: log.debug("SyncObjects.fetch: jid=%s" % jid)
def fetch(self, minion_id, sync_type): log.debug("SyncObjects.fetch: %s/%s" % (minion_id, sync_type)) if minion_id is None: # We're probably being replayed to from the database log.warn("SyncObjects.fetch called with minion_id=None") return self._fetching_at[sync_type] = now() try: # TODO clean up unused 'since' argument jid = remote.run_job( minion_id, 'ceph.get_cluster_object', { 'cluster_name': self._cluster_name, 'sync_type': sync_type.str, 'since': None }) except Unavailable: # Don't throw an exception because if a fetch fails we should end up # issuing another on next heartbeat log.error("Failed to start fetch job %s/%s" % (minion_id, sync_type)) else: log.debug("SyncObjects.fetch: jid=%s" % jid)