class ClusterMonitor(gevent.greenlet.Greenlet): """ Remote management of a Ceph cluster. Consumes cluster map logs from the mon cluster, maintains a record of which user requests are ongoing, and uses this combined knowledge to mediate user requests to change the state of the system. This class spawns two threads, one to listen to salt events and another to listen to user requests. """ def __init__(self, fsid, cluster_name, notifier, persister, servers, eventer): super(ClusterMonitor, self).__init__() self.fsid = fsid self.name = cluster_name self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) self._notifier = notifier self._persister = persister self._servers = servers self._eventer = eventer # Which mon we are currently using for running requests, # identified by minion ID self._favorite_mon = None self._last_heartbeat = {} self._complete = gevent.event.Event() self.done = gevent.event.Event() self._sync_objects = SyncObjects(self.name) self._requests = RequestCollection(self._sync_objects, eventer) self._derived_objects = DerivedObjects() self._request_factories = { OSD: OsdRequestFactory, POOL: PoolRequestFactory } self._plugin_monitor = PluginMonitor(servers) self._ready = gevent.event.Event() self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self._requests.tick()) def ready(self): """ Block until the ClusterMonitor is ready to receive salt events """ self._ready.wait() def stop(self): log.info("%s stopping" % self.__class__.__name__) self._complete.set() @nosleep def list_requests(self): return self._requests.get_all() @nosleep def get_request(self, request_id): return self._requests.get_by_id(request_id) @nosleep def get_sync_object_data(self, object_type): """ :param object_type: A SyncObject subclass :return a json-serializable object """ return self._sync_objects.get_data(object_type) @nosleep def get_sync_object(self, object_type): """ :param object_type: A SyncObject subclass :return a SyncObject instance """ return self._sync_objects.get(object_type) @nosleep def get_derived_object(self, object_type): return self._derived_objects.get(object_type) def _run(self): self._plugin_monitor.start() self._ready.set() log.debug("ClusterMonitor._run: ready") self._request_ticker.start() event = SaltEventSource(salt_config) while not self._complete.is_set(): # No salt tag filtering: https://github.com/saltstack/salt/issues/11582 ev = event.get_event(full=True) if ev is not None: data = ev['data'] tag = ev['tag'] log.debug("_run.ev: %s/tag=%s" % (data['id'] if 'id' in data else None, tag)) # I am interested in the following tags: # - salt/job/<jid>/ret/<minion id> where jid is one that I started # (this includes ceph.rados_command and ceph.get_cluster_object) # - ceph/cluster/<fsid> where fsid is my fsid try: if tag.startswith("ceph/cluster/{0}".format(self.fsid)): # A ceph.heartbeat beacon self.on_heartbeat(data['id'], data['data']) elif re.match("^salt/job/\d+/ret/[^/]+$", tag): if data['fun'] == "saltutil.running": # Update on what jobs are running # It would be nice to filter these down to those which really are for # this cluster, but as long as N_clusters and N_jobs are reasonably small # it's not an efficiency problem. self._requests.on_tick_response(data['id'], data['return']) # It would be much nicer to put the FSID at the start of # the tag, if salt would only let us add custom tags to our jobs. # Instead we enforce a convention that all calamari jobs must include # fsid in their return value. if (not isinstance(data, dict)) or not isinstance(data['return'], dict): log.info("Ignoring job return, not a cthulhu job") continue if 'fsid' not in data['return'] or data['return']['fsid'] != self.fsid: log.debug("Ignoring job return, not for my FSID") continue if data['fun'] == 'ceph.get_cluster_object': # A ceph.get_cluster_object response if not data['success']: log.error("on_sync_object: failure from %s: %s" % (data['id'], data['return'])) continue self.on_sync_object(data['id'], data['return']) elif data['fun'] == 'ceph.rados_commands': # A ceph.rados_commands response self.on_completion(data) else: # This does not concern us, ignore it pass except: # Because this is our main event handling loop, swallow exceptions # instead of letting them end the world. log.exception("Exception handling message with tag %s" % tag) log.debug("Message content: %s" % data) self._request_ticker.stop() self._request_ticker.join() log.info("%s complete" % self.__class__.__name__) self._plugin_monitor.stop() self._plugin_monitor.join() self.done.set() def _is_favorite(self, minion_id): """ Check if this minion is the one which we are currently treating as the primary source of updates, and promote it to be the favourite if the favourite has not sent a heartbeat since cthulhu->favorite_timeout_s. :return True if this minion was the favorite or has just been promoted. """ now = datetime.datetime.now(tz=dateutil.tz.tzlocal()) self._last_heartbeat[minion_id] = now if self._favorite_mon is None: log.debug("%s is my new favourite" % minion_id) self._set_favorite(minion_id) return True elif minion_id != self._favorite_mon: # Consider whether this minion should become my new favourite: has it been # too long since my current favourite reported in? time_since = now - self._last_heartbeat[self._favorite_mon] favorite_timeout_s = self._servers.get_contact_period(self._favorite_mon) * FAVORITE_TIMEOUT_FACTOR if time_since > datetime.timedelta(seconds=favorite_timeout_s): log.debug("My old favourite, %s, has not sent a heartbeat for %s: %s is my new favourite" % ( self._favorite_mon, time_since, minion_id )) self._set_favorite(minion_id) return minion_id == self._favorite_mon @nosleep def on_heartbeat(self, minion_id, cluster_data): """ Handle a ceph.heartbeat from a minion. Heartbeats come from all servers, but we're mostly interested in those which come from a mon (and therefore have the 'clusters' attribute populated) as these tells us whether there are any new versions of cluster maps for us to fetch. """ if not self._is_favorite(minion_id): log.debug('Ignoring cluster data from %s, it is not my favourite (%s)' % (minion_id, self._favorite_mon)) return self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) log.debug('Checking for version increments in heartbeat from %s' % minion_id) for sync_type in SYNC_OBJECT_TYPES: self._sync_objects.on_version( minion_id, sync_type, cluster_data['versions'][sync_type.str]) def inject_sync_object(self, minion_id, sync_type, version, data): sync_type = SYNC_OBJECT_STR_TYPE[sync_type] old_object = self._sync_objects.get(sync_type) new_object = self._sync_objects.on_fetch_complete(minion_id, sync_type, version, data) if new_object: # The ServerMonitor is interested in cluster maps, do this prior # to updating any derived objects so that derived generators have # access to latest view of server state if sync_type == OsdMap: self._servers.on_osd_map(data) elif sync_type == MonMap: self._servers.on_mon_map(data) elif sync_type == MdsMap: self._servers.on_mds_map(self.fsid, data) # The frontend would like us to maintain some derived objects that # munge together the PG and OSD maps into an easier-to-consume form. for generator in derived.generators: if sync_type in generator.depends: dependency_data = {} for t in generator.depends: obj = self._sync_objects.get(t) if obj is not None: dependency_data[t] = obj.data else: dependency_data[t] = None if None not in dependency_data.values(): log.debug("Updating %s" % generator.__name__) derived_objects = generator.generate(self, self._servers, dependency_data) self._derived_objects.update(derived_objects) self._eventer.on_sync_object(self.fsid, sync_type, new_object, old_object) return new_object @nosleep def on_sync_object(self, minion_id, data): if minion_id != self._favorite_mon: log.debug("Ignoring map from %s, it is not my favourite (%s)" % (minion_id, self._favorite_mon)) assert data['fsid'] == self.fsid sync_object = data['data'] sync_type = SYNC_OBJECT_STR_TYPE[data['type']] new_object = self.inject_sync_object(minion_id, data['type'], data['version'], sync_object) if new_object: self._requests.on_map(sync_type, self._sync_objects) self._persister.update_sync_object( self.fsid, self.name, sync_type.str, new_object.version if isinstance(new_object.version, int) else None, now(), sync_object) else: log.warn("ClusterMonitor.on_sync_object: stale object received from %s" % minion_id) @nosleep def on_completion(self, data): self._requests.on_completion(data) def _set_favorite(self, minion_id): assert minion_id != self._favorite_mon self._requests.fail_all(minion_id) self._favorite_mon = minion_id def _request(self, method, obj_type, *args, **kwargs): """ Create and submit UserRequest for an apply, create, update or delete. """ # nosleep during preparation phase (may touch ClusterMonitor/ServerMonitor state) with nosleep_mgr(): request_factory = self.get_request_factory(obj_type) if self._favorite_mon is None: raise ClusterUnavailable("Ceph cluster is currently unavailable for commands") request = getattr(request_factory, method)(*args, **kwargs) if request: # sleeps permitted during terminal phase of submitting, because we're # doing I/O to the salt master to kick off self._requests.submit(request, self._favorite_mon) return { 'request_id': request.id } else: return None def request_delete(self, obj_type, obj_id): return self._request('delete', obj_type, obj_id) def request_create(self, obj_type, attributes): return self._request('create', obj_type, attributes) def request_update(self, command, obj_type, obj_id, attributes): return self._request(command, obj_type, obj_id, attributes) def request_apply(self, obj_type, obj_id, command): return self._request(command, obj_type, obj_id) def get_valid_commands(self, object_type, obj_ids): return self.get_request_factory(object_type).get_valid_commands(obj_ids) def get_request_factory(self, object_type): try: return self._request_factories[object_type](self) except KeyError: raise ValueError("{0} is not one of {1}".format(object_type, self._request_factories.keys()))