def _check(self): all_running = [] for daemon in self.daemons: is_running = daemon.is_running() is_running.addCallback(self._restart_if_not_running, daemon) all_running.append(is_running) def reschedule(ignored): self._checking = self.reactor.callLater(5, self._check) gather_results(all_running).addBoth(reschedule)
def remove_expired_hash_id_requests(self): now = time.time() timeout = now - HASH_ID_REQUEST_TIMEOUT def update_or_remove(is_pending, request): if is_pending: # Request is still in the queue. Update the timestamp. request.timestamp = now elif request.timestamp < timeout: # Request was delivered, and is older than the threshold. request.remove() results = [] for request in self._store.iter_hash_id_requests(): if request.message_id is None: # May happen in some rare cases, when a send_message() is # interrupted abruptly. If it just fails normally, the # request is removed and so we don't get here. request.remove() else: result = self._broker.is_message_pending(request.message_id) result.addCallback(update_or_remove, request) results.append(result) return gather_results(results)
def test_multiple_import_failure(self): """ If multiple keys are specified, and that the first one fails, the error is correctly reported. """ deferred1 = Deferred() deferred2 = Deferred() deferreds = [deferred1, deferred2] def _run_process(command, args, env={}, path=None, uid=None, gid=None): return deferreds.pop(0) self.sourceslist._run_process = _run_process self.manager.dispatch_message({ "type": "apt-sources-replace", "sources": [], "gpg-keys": ["key1", "key2"], "operation-id": 1 }) deferred1.callback(("error", "", 1)) deferred2.callback(("error", "", 1)) msg = "ProcessError: error\n" service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{ "type": "operation-result", "result-text": msg, "status": FAILED, "operation-id": 1 }]) return gather_results(deferreds)
def stop_clients(self): """Tell all the clients to exit.""" results = [] # FIXME: check whether the client are still alive for client in self.get_clients(): results.append(client.exit()) result = gather_results(results, consume_errors=True) return result.addCallback(lambda ignored: None)
def terminate_processes(broker_stopped): if broker_stopped: results = [daemon.wait_or_die() for daemon in self.daemons] else: # If request_exit fails, we should just kill the daemons # immediately. error("Couldn't request that broker gracefully shut down; " "killing forcefully.") results = [x.stop() for x in self.daemons] return gather_results(results)
def broadcast_message(self, message): """Call the C{message} method of all the registered plugins. @see: L{register_plugin}. """ results = [] for client in self.get_clients(): results.append(client.message(message)) result = gather_results(results) return result.addCallback(self._message_delivered, message)
def got_connection(add_result, connector, reactor, remote): """Handle becomming connected to a broker.""" handlers = {"registration-done": partial(success, add_result), "registration-failed": partial(failure, add_result), "exchange-failed": partial(exchange_failure, add_result)} deferreds = [ remote.call_on_event(handlers), remote.register().addErrback( partial(handle_registration_errors, add_result), connector)] results = gather_results(deferreds) results.addCallback(done, connector, reactor) return results
def test_register_message(self): """ When L{BrokerClient.register_message} is called, the broker is notified that the message type is now accepted. """ result1 = self.client.register_message("foo", lambda m: None) result2 = self.client.register_message("bar", lambda m: None) def got_result(result): self.assertEqual( self.exchanger.get_client_accepted_message_types(), sorted(["bar", "foo"] + DEFAULT_ACCEPTED_TYPES)) return gather_results([result1, result2]).addCallback(got_result)
def fire_event(self, event_type, *args, **kwargs): """Fire an event of a given type. @return: A L{Deferred} resulting in a list of returns values of the fired event handlers, in the order they were fired. """ if event_type == "message-type-acceptance-changed": message_type = args[0] acceptance = args[1] results = self.reactor.fire((event_type, message_type), acceptance) else: results = self.reactor.fire(event_type, *args, **kwargs) return gather_results( [maybeDeferred(lambda x: x, result) for result in results])
def check_running(self): """Return a list of any daemons that are already running.""" results = [] for daemon in self.daemons: # This method is called on startup, we basically try to connect # a few times in fast sequence (with exponential backoff), if we # don't get a response we assume the daemon is not running. result = daemon.is_running() result.addCallback(lambda is_running, d=daemon: (is_running, d)) results.append(result) def got_all_results(r): return [x[1] for x in r if x[0]] return gather_results(results).addCallback(got_all_results)
def got_connection(remote): handlers = { "registration-done": success, "registration-failed": failure, "exchange-failed": exchange_failure } deferreds = [ remote.call_on_event(handlers), remote.register().addErrback(handle_registration_errors) ] # We consume errors here to ignore errors after the first one. # catch_all will be called for the very first deferred that fails. results = gather_results(deferreds, consume_errors=True) results.addErrback(catch_all) results.addCallback(stop)
def run(self): """Run all plugins, and return a deferred aggregating their results. This will call the run() method on each of the registered plugins, and return a deferred which aggregates each resulting deferred. """ deferreds = [] for plugin in self.get_plugins(): try: result = plugin.run() except Exception: self._log_plugin_error(Failure(), plugin) else: result.addErrback(self._log_plugin_error, plugin) deferreds.append(result) return gather_results(deferreds).addCallback(self._report_error_note)
def test_handle_reconnect(self): """ The L{BrokerClient.handle_reconnect} method is triggered by a broker-reconnect event, and it causes any message types previously registered with the broker to be registered again. """ result1 = self.client.register_message("foo", lambda m: None) result2 = self.client.register_message("bar", lambda m: None) def got_result(result): broker = mock.Mock() self.client.broker = broker self.client_reactor.fire("broker-reconnect") calls = [mock.call("bar"), mock.call("foo")] broker.register_client_accepted_message_type.assert_has_calls( calls, any_order=True) broker.register_client.assert_called_once_with("client") return gather_results([result1, result2]).addCallback(got_result)
def test_multiple_import_sequential(self): """ If multiple keys are specified, the imports run sequentially, not in parallel. """ deferred1 = Deferred() deferred2 = Deferred() deferreds = [deferred1, deferred2] def _run_process(command, args, env={}, path=None, uid=None, gid=None): if not deferreds: return succeed(None) return deferreds.pop(0) self.sourceslist._run_process = _run_process self.manager.dispatch_message({ "type": "apt-sources-replace", "sources": [], "gpg-keys": ["key1", "key2"], "operation-id": 1 }) self.assertEqual(1, len(deferreds)) deferred1.callback(("ok", "", 0)) self.assertEqual(0, len(deferreds)) deferred2.callback(("ok", "", 0)) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{ "type": "operation-result", "status": SUCCEEDED, "operation-id": 1 }]) return gather_results(deferreds)
def broadcast_event(self, *args, **kwargs): fired = [] for client in self.get_clients(): fired.append(client.fire_event(event_type, *args, **kwargs)) return gather_results(fired)