def __init__(self): self._complete = gevent.event.Event() self._rpc_thread = RpcThread(self) self._discovery_thread = TopLevelEvents(self) self._process_monitor = ProcessMonitorThread() db_path = config.get('cthulhu', 'db_path') if sqlalchemy is not None and db_path: try: # Prepare persistence engine = create_engine(config.get('cthulhu', 'db_path')) # noqa Session.configure(bind=engine) self.persister = Persister() except sqlalchemy.exc.ArgumentError as e: log.error("Database error: %s" % e) raise else: class NullPersister(object): def start(self): pass def stop(self): pass def join(self): pass def __getattribute__(self, item): if item.startswith('_'): return object.__getattribute__(self, item) else: try: return object.__getattribute__(self, item) except AttributeError: def blackhole(*args, **kwargs): pass return blackhole self.persister = NullPersister() # Remote operations self.requests = RequestCollection(self) self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self.requests.tick()) # FSID to ClusterMonitor self.clusters = {} # Generate events on state changes self.eventer = Eventer(self) # Handle all ceph/server messages self.servers = ServerMonitor(self.persister, self.eventer, self.requests)
def __init__(self, fsid, cluster_name, notifier, persister, servers, eventer): super(ClusterMonitor, self).__init__() self.fsid = fsid self.name = cluster_name self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) self._notifier = notifier self._persister = persister self._servers = servers self._eventer = eventer # Which mon we are currently using for running requests, # identified by minion ID self._favorite_mon = None self._last_heartbeat = {} self._complete = gevent.event.Event() self.done = gevent.event.Event() self._sync_objects = SyncObjects(self.name) self._requests = RequestCollection(self._sync_objects, eventer) self._derived_objects = DerivedObjects() self._request_factories = { OSD: OsdRequestFactory, POOL: PoolRequestFactory } self._plugin_monitor = PluginMonitor(servers) self._ready = gevent.event.Event() self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self._requests.tick())
class Manager(object): """ Manage a collection of ClusterMonitors. Subscribe to ceph/cluster events, and create a ClusterMonitor for any FSID we haven't seen before. """ def __init__(self): self._complete = gevent.event.Event() self._rpc_thread = RpcThread(self) self._discovery_thread = TopLevelEvents(self) self._process_monitor = ProcessMonitorThread() self.notifier = NotificationThread() if sqlalchemy is not None: try: # Prepare persistence engine = create_engine(config.get('cthulhu', 'db_path')) Session.configure(bind=engine) self.persister = Persister() except sqlalchemy.exc.ArgumentError as e: log.error("Database error: %s" % e) raise else: class NullPersister(object): def start(self): pass def stop(self): pass def join(self): pass def __getattribute__(self, item): if item.startswith('_'): return object.__getattribute__(self, item) else: try: return object.__getattribute__(self, item) except AttributeError: def blackhole(*args, **kwargs): pass return blackhole self.persister = NullPersister() # Remote operations self.requests = RequestCollection(self) self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self.requests.tick()) # FSID to ClusterMonitor self.clusters = {} # Generate events on state changes self.eventer = Eventer(self) # Handle all ceph/server messages self.servers = ServerMonitor(self.persister, self.eventer, self.requests) def delete_cluster(self, fs_id): """ Note that the cluster will pop right back again if it's still sending heartbeats. """ victim = self.clusters[fs_id] victim.stop() victim.done.wait() del self.clusters[fs_id] self._expunge(fs_id) def stop(self): log.info("%s stopping" % self.__class__.__name__) for monitor in self.clusters.values(): monitor.stop() self._rpc_thread.stop() self._discovery_thread.stop() self._process_monitor.stop() self.notifier.stop() self.eventer.stop() self._request_ticker.stop() def _expunge(self, fsid): session = Session() session.query(SyncObject).filter_by(fsid=fsid).delete() session.commit() def _recover(self): if sqlalchemy is None: return session = Session() for server in session.query(Server).all(): log.debug("Recovered server %s" % server.fqdn) assert server.boot_time is None or server.boot_time.tzinfo is not None # expect timezone-aware DB backend self.servers.inject_server( ServerState(fqdn=server.fqdn, hostname=server.hostname, managed=server.managed, last_contact=server.last_contact, boot_time=server.boot_time, ceph_version=server.ceph_version)) for service in session.query(Service).all(): if service.server: server = session.query(Server).get(service.server) else: server = None log.debug("Recovered service %s/%s/%s on %s" % (service.fsid, service.service_type, service.service_id, server.fqdn if server else None)) self.servers.inject_service( ServiceState(fsid=service.fsid, service_type=service.service_type, service_id=service.service_id), server.fqdn if server else None) # I want the most recent version of every sync_object fsids = [(row[0], row[1]) for row in session.query( SyncObject.fsid, SyncObject.cluster_name).distinct(SyncObject.fsid) ] for fsid, name in fsids: cluster_monitor = ClusterMonitor(fsid, name, self.notifier, self.persister, self.servers, self.eventer, self.requests) self.clusters[fsid] = cluster_monitor object_types = [ row[0] for row in session.query(SyncObject.sync_type).filter_by( fsid=fsid).distinct() ] for sync_type in object_types: latest_record = session.query(SyncObject).filter_by( fsid=fsid, sync_type=sync_type).order_by(SyncObject.version.desc(), SyncObject.when.desc())[0] # FIXME: bit of a hack because records persisted only store their 'version' # if it's a real counter version, underlying problem is that we have # underlying data (health, pg_brief) without usable version counters. def md5(raw): hasher = hashlib.md5() hasher.update(raw) return hasher.hexdigest() if latest_record.version: version = latest_record.version else: version = md5(latest_record.data) when = latest_record.when when = when.replace(tzinfo=tzutc()) if cluster_monitor.update_time is None or when > cluster_monitor.update_time: cluster_monitor.update_time = when cluster_monitor.inject_sync_object( None, sync_type, version, msgpack.unpackb(latest_record.data)) for monitor in self.clusters.values(): log.info("Recovery: Cluster %s with update time %s" % (monitor.fsid, monitor.update_time)) monitor.start() def start(self): log.info("%s starting" % self.__class__.__name__) # Before we start listening to the outside world, recover # our last known state from persistent storage try: self._recover() except: log.exception("Recovery failed") os._exit(-1) self._rpc_thread.bind() self._rpc_thread.start() self._discovery_thread.start() self._process_monitor.start() self.notifier.start() self.persister.start() self.eventer.start() self._request_ticker.start() self.servers.start() def join(self): log.info("%s joining" % self.__class__.__name__) self._rpc_thread.join() self._discovery_thread.join() self._process_monitor.join() self.notifier.join() self.persister.join() self.eventer.join() self._request_ticker.join() self.servers.join() for monitor in self.clusters.values(): monitor.join() def on_discovery(self, minion_id, heartbeat_data): log.info("on_discovery: {0}/{1}".format(minion_id, heartbeat_data['fsid'])) cluster_monitor = ClusterMonitor(heartbeat_data['fsid'], heartbeat_data['name'], self.notifier, self.persister, self.servers, self.eventer, self.requests) self.clusters[heartbeat_data['fsid']] = cluster_monitor # Run before passing on the heartbeat, because otherwise the # syncs resulting from the heartbeat might not be received # by the monitor. cluster_monitor.start() # Wait for ClusterMonitor to start accepting events before asking it # to do anything cluster_monitor.ready() cluster_monitor.on_heartbeat(minion_id, heartbeat_data)
class Manager(object): """ Manage a collection of ClusterMonitors. Subscribe to ceph/cluster events, and create a ClusterMonitor for any FSID we haven't seen before. """ def __init__(self): self._complete = gevent.event.Event() self._rpc_thread = RpcThread(self) self._discovery_thread = TopLevelEvents(self) self._process_monitor = ProcessMonitorThread() db_path = config.get('cthulhu', 'db_path') if sqlalchemy is not None and db_path: try: # Prepare persistence engine = create_engine(config.get('cthulhu', 'db_path')) # noqa Session.configure(bind=engine) self.persister = Persister() except sqlalchemy.exc.ArgumentError as e: log.error("Database error: %s" % e) raise else: class NullPersister(object): def start(self): pass def stop(self): pass def join(self): pass def __getattribute__(self, item): if item.startswith('_'): return object.__getattribute__(self, item) else: try: return object.__getattribute__(self, item) except AttributeError: def blackhole(*args, **kwargs): pass return blackhole self.persister = NullPersister() # Remote operations self.requests = RequestCollection(self) self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self.requests.tick()) # FSID to ClusterMonitor self.clusters = {} # Generate events on state changes self.eventer = Eventer(self) # Handle all ceph/server messages self.servers = ServerMonitor(self.persister, self.eventer, self.requests) def delete_cluster(self, fs_id): """ Note that the cluster will pop right back again if it's still sending heartbeats. """ victim = self.clusters[fs_id] victim.stop() victim.done.wait() del self.clusters[fs_id] self._expunge(fs_id) def stop(self): log.info("%s stopping" % self.__class__.__name__) for monitor in self.clusters.values(): monitor.stop() self._rpc_thread.stop() self._discovery_thread.stop() self._process_monitor.stop() self.eventer.stop() self._request_ticker.stop() def _expunge(self, fsid): if sqlalchemy is None: return session = Session() session.query(SyncObject).filter_by(fsid=fsid).delete() session.commit() def _recover(self): if sqlalchemy is None: return session = Session() for server in session.query(Server).all(): log.debug("Recovered server %s" % server.fqdn) assert server.boot_time is None or server.boot_time.tzinfo is not None # expect timezone-aware DB backend self.servers.inject_server(ServerState( fqdn=server.fqdn, hostname=server.hostname, managed=server.managed, last_contact=server.last_contact, boot_time=server.boot_time, ceph_version=server.ceph_version )) for service in session.query(Service).all(): if service.server: server = session.query(Server).get(service.server) else: server = None log.debug("Recovered service %s/%s/%s on %s" % ( service.fsid, service.service_type, service.service_id, server.fqdn if server else None )) self.servers.inject_service(ServiceState( fsid=service.fsid, service_type=service.service_type, service_id=service.service_id ), server.fqdn if server else None) # I want the most recent version of every sync_object fsids = [(row[0], row[1]) for row in session.query(SyncObject.fsid, SyncObject.cluster_name).distinct(SyncObject.fsid)] for fsid, name in fsids: cluster_monitor = ClusterMonitor(fsid, name, self.persister, self.servers, self.eventer, self.requests) self.clusters[fsid] = cluster_monitor object_types = [row[0] for row in session.query(SyncObject.sync_type).filter_by(fsid=fsid).distinct()] for sync_type in object_types: latest_record = session.query(SyncObject).filter_by( fsid=fsid, sync_type=sync_type).order_by( SyncObject.version.desc(), SyncObject.when.desc())[0] # FIXME: bit of a hack because records persisted only store their 'version' # if it's a real counter version, underlying problem is that we have # underlying data (health, pg_brief) without usable version counters. def md5(raw): hasher = hashlib.md5() hasher.update(raw) return hasher.hexdigest() if latest_record.version: version = latest_record.version else: version = md5(latest_record.data) when = latest_record.when when = when.replace(tzinfo=tzutc()) if cluster_monitor.update_time is None or when > cluster_monitor.update_time: cluster_monitor.update_time = when cluster_monitor.inject_sync_object(None, sync_type, version, msgpack.unpackb(latest_record.data)) for monitor in self.clusters.values(): log.info("Recovery: Cluster %s with update time %s" % (monitor.fsid, monitor.update_time)) monitor.start() def start(self): log.info("%s starting" % self.__class__.__name__) self._rpc_thread.bind() self._rpc_thread.start() self._discovery_thread.start() self._process_monitor.start() self.persister.start() self.eventer.start() self._request_ticker.start() self.servers.start() return True def join(self): log.info("%s joining" % self.__class__.__name__) self._rpc_thread.join() self._discovery_thread.join() self._process_monitor.join() self.persister.join() self.eventer.join() self._request_ticker.join() self.servers.join() for monitor in self.clusters.values(): monitor.join() def on_discovery(self, minion_id, heartbeat_data): log.info("on_discovery: {0}/{1}".format(minion_id, heartbeat_data['fsid'])) cluster_monitor = ClusterMonitor(heartbeat_data['fsid'], heartbeat_data['name'], self.persister, self.servers, self.eventer, self.requests) self.clusters[heartbeat_data['fsid']] = cluster_monitor # Run before passing on the heartbeat, because otherwise the # syncs resulting from the heartbeat might not be received # by the monitor. cluster_monitor.start() # Wait for ClusterMonitor to start accepting events before asking it # to do anything cluster_monitor.ready() cluster_monitor.on_heartbeat(minion_id, heartbeat_data)
class ClusterMonitor(gevent.greenlet.Greenlet): """ Remote management of a Ceph cluster. Consumes cluster map logs from the mon cluster, maintains a record of which user requests are ongoing, and uses this combined knowledge to mediate user requests to change the state of the system. This class spawns two threads, one to listen to salt events and another to listen to user requests. """ def __init__(self, fsid, cluster_name, notifier, persister, servers, eventer): super(ClusterMonitor, self).__init__() self.fsid = fsid self.name = cluster_name self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) self._notifier = notifier self._persister = persister self._servers = servers self._eventer = eventer # Which mon we are currently using for running requests, # identified by minion ID self._favorite_mon = None self._last_heartbeat = {} self._complete = gevent.event.Event() self.done = gevent.event.Event() self._sync_objects = SyncObjects(self.name) self._requests = RequestCollection(self._sync_objects, eventer) self._derived_objects = DerivedObjects() self._request_factories = { OSD: OsdRequestFactory, POOL: PoolRequestFactory } self._plugin_monitor = PluginMonitor(servers) self._ready = gevent.event.Event() self._request_ticker = Ticker(request_collection.TICK_PERIOD, lambda: self._requests.tick()) def ready(self): """ Block until the ClusterMonitor is ready to receive salt events """ self._ready.wait() def stop(self): log.info("%s stopping" % self.__class__.__name__) self._complete.set() @nosleep def list_requests(self): return self._requests.get_all() @nosleep def get_request(self, request_id): return self._requests.get_by_id(request_id) @nosleep def get_sync_object_data(self, object_type): """ :param object_type: A SyncObject subclass :return a json-serializable object """ return self._sync_objects.get_data(object_type) @nosleep def get_sync_object(self, object_type): """ :param object_type: A SyncObject subclass :return a SyncObject instance """ return self._sync_objects.get(object_type) @nosleep def get_derived_object(self, object_type): return self._derived_objects.get(object_type) def _run(self): self._plugin_monitor.start() self._ready.set() log.debug("ClusterMonitor._run: ready") self._request_ticker.start() event = SaltEventSource(salt_config) while not self._complete.is_set(): # No salt tag filtering: https://github.com/saltstack/salt/issues/11582 ev = event.get_event(full=True) if ev is not None: data = ev['data'] tag = ev['tag'] log.debug("_run.ev: %s/tag=%s" % (data['id'] if 'id' in data else None, tag)) # I am interested in the following tags: # - salt/job/<jid>/ret/<minion id> where jid is one that I started # (this includes ceph.rados_command and ceph.get_cluster_object) # - ceph/cluster/<fsid> where fsid is my fsid try: if tag.startswith("ceph/cluster/{0}".format(self.fsid)): # A ceph.heartbeat beacon self.on_heartbeat(data['id'], data['data']) elif re.match("^salt/job/\d+/ret/[^/]+$", tag): if data['fun'] == "saltutil.running": # Update on what jobs are running # It would be nice to filter these down to those which really are for # this cluster, but as long as N_clusters and N_jobs are reasonably small # it's not an efficiency problem. self._requests.on_tick_response(data['id'], data['return']) # It would be much nicer to put the FSID at the start of # the tag, if salt would only let us add custom tags to our jobs. # Instead we enforce a convention that all calamari jobs must include # fsid in their return value. if (not isinstance(data, dict)) or not isinstance(data['return'], dict): log.info("Ignoring job return, not a cthulhu job") continue if 'fsid' not in data['return'] or data['return']['fsid'] != self.fsid: log.debug("Ignoring job return, not for my FSID") continue if data['fun'] == 'ceph.get_cluster_object': # A ceph.get_cluster_object response if not data['success']: log.error("on_sync_object: failure from %s: %s" % (data['id'], data['return'])) continue self.on_sync_object(data['id'], data['return']) elif data['fun'] == 'ceph.rados_commands': # A ceph.rados_commands response self.on_completion(data) else: # This does not concern us, ignore it pass except: # Because this is our main event handling loop, swallow exceptions # instead of letting them end the world. log.exception("Exception handling message with tag %s" % tag) log.debug("Message content: %s" % data) self._request_ticker.stop() self._request_ticker.join() log.info("%s complete" % self.__class__.__name__) self._plugin_monitor.stop() self._plugin_monitor.join() self.done.set() def _is_favorite(self, minion_id): """ Check if this minion is the one which we are currently treating as the primary source of updates, and promote it to be the favourite if the favourite has not sent a heartbeat since cthulhu->favorite_timeout_s. :return True if this minion was the favorite or has just been promoted. """ now = datetime.datetime.now(tz=dateutil.tz.tzlocal()) self._last_heartbeat[minion_id] = now if self._favorite_mon is None: log.debug("%s is my new favourite" % minion_id) self._set_favorite(minion_id) return True elif minion_id != self._favorite_mon: # Consider whether this minion should become my new favourite: has it been # too long since my current favourite reported in? time_since = now - self._last_heartbeat[self._favorite_mon] favorite_timeout_s = self._servers.get_contact_period(self._favorite_mon) * FAVORITE_TIMEOUT_FACTOR if time_since > datetime.timedelta(seconds=favorite_timeout_s): log.debug("My old favourite, %s, has not sent a heartbeat for %s: %s is my new favourite" % ( self._favorite_mon, time_since, minion_id )) self._set_favorite(minion_id) return minion_id == self._favorite_mon @nosleep def on_heartbeat(self, minion_id, cluster_data): """ Handle a ceph.heartbeat from a minion. Heartbeats come from all servers, but we're mostly interested in those which come from a mon (and therefore have the 'clusters' attribute populated) as these tells us whether there are any new versions of cluster maps for us to fetch. """ if not self._is_favorite(minion_id): log.debug('Ignoring cluster data from %s, it is not my favourite (%s)' % (minion_id, self._favorite_mon)) return self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) log.debug('Checking for version increments in heartbeat from %s' % minion_id) for sync_type in SYNC_OBJECT_TYPES: self._sync_objects.on_version( minion_id, sync_type, cluster_data['versions'][sync_type.str]) def inject_sync_object(self, minion_id, sync_type, version, data): sync_type = SYNC_OBJECT_STR_TYPE[sync_type] old_object = self._sync_objects.get(sync_type) new_object = self._sync_objects.on_fetch_complete(minion_id, sync_type, version, data) if new_object: # The ServerMonitor is interested in cluster maps, do this prior # to updating any derived objects so that derived generators have # access to latest view of server state if sync_type == OsdMap: self._servers.on_osd_map(data) elif sync_type == MonMap: self._servers.on_mon_map(data) elif sync_type == MdsMap: self._servers.on_mds_map(self.fsid, data) # The frontend would like us to maintain some derived objects that # munge together the PG and OSD maps into an easier-to-consume form. for generator in derived.generators: if sync_type in generator.depends: dependency_data = {} for t in generator.depends: obj = self._sync_objects.get(t) if obj is not None: dependency_data[t] = obj.data else: dependency_data[t] = None if None not in dependency_data.values(): log.debug("Updating %s" % generator.__name__) derived_objects = generator.generate(self, self._servers, dependency_data) self._derived_objects.update(derived_objects) self._eventer.on_sync_object(self.fsid, sync_type, new_object, old_object) return new_object @nosleep def on_sync_object(self, minion_id, data): if minion_id != self._favorite_mon: log.debug("Ignoring map from %s, it is not my favourite (%s)" % (minion_id, self._favorite_mon)) assert data['fsid'] == self.fsid sync_object = data['data'] sync_type = SYNC_OBJECT_STR_TYPE[data['type']] new_object = self.inject_sync_object(minion_id, data['type'], data['version'], sync_object) if new_object: self._requests.on_map(sync_type, self._sync_objects) self._persister.update_sync_object( self.fsid, self.name, sync_type.str, new_object.version if isinstance(new_object.version, int) else None, now(), sync_object) else: log.warn("ClusterMonitor.on_sync_object: stale object received from %s" % minion_id) @nosleep def on_completion(self, data): self._requests.on_completion(data) def _set_favorite(self, minion_id): assert minion_id != self._favorite_mon self._requests.fail_all(minion_id) self._favorite_mon = minion_id def _request(self, method, obj_type, *args, **kwargs): """ Create and submit UserRequest for an apply, create, update or delete. """ # nosleep during preparation phase (may touch ClusterMonitor/ServerMonitor state) with nosleep_mgr(): request_factory = self.get_request_factory(obj_type) if self._favorite_mon is None: raise ClusterUnavailable("Ceph cluster is currently unavailable for commands") request = getattr(request_factory, method)(*args, **kwargs) if request: # sleeps permitted during terminal phase of submitting, because we're # doing I/O to the salt master to kick off self._requests.submit(request, self._favorite_mon) return { 'request_id': request.id } else: return None def request_delete(self, obj_type, obj_id): return self._request('delete', obj_type, obj_id) def request_create(self, obj_type, attributes): return self._request('create', obj_type, attributes) def request_update(self, command, obj_type, obj_id, attributes): return self._request(command, obj_type, obj_id, attributes) def request_apply(self, obj_type, obj_id, command): return self._request(command, obj_type, obj_id) def get_valid_commands(self, object_type, obj_ids): return self.get_request_factory(object_type).get_valid_commands(obj_ids) def get_request_factory(self, object_type): try: return self._request_factories[object_type](self) except KeyError: raise ValueError("{0} is not one of {1}".format(object_type, self._request_factories.keys()))