def onJoin(self, details): self.log.info('HostMonitor connected (monitors available: {monitors})', monitors=sorted(MONITORS.keys())) yield WorkerController.onJoin(self, details, publish_ready=False) # register monitor procedures dl = [] for monitor in self._monitors.values(): d = self.register(monitor.get, u'{}.get_{}'.format(self._prefix, monitor.ID)) dl.append(d) res = yield DeferredList(dl, fireOnOneErrback=True) print(res) self.log.info('HostMonitor {pcnt} procedures registered', pcnt=len(res)) # signal this worker is done with setup and ready yield self.publish_ready()
def shutdown(x=None): """ This is a top level method which control the process of finishing the program. Calls method ``shutdown()`` in other modules. """ lg.out(2, "shutdowner.shutdown " + str(x)) from services import driver from main import control from main import events from logs import weblog from logs import webtraffic from system import tmpfile from system import run_upnpc from raid import eccmap from lib import net_misc from updates import git_proc from interface import api_jsonrpc_server from interface import api_rest_http_server from interface import ftp_server from userid import my_id from crypt import my_keys dl = [] my_keys.shutdown() my_id.shutdown() ftp_server.shutdown() api_jsonrpc_server.shutdown() api_rest_http_server.shutdown() driver.shutdown() eccmap.shutdown() run_upnpc.shutdown() net_misc.shutdown() git_proc.shutdown() events.clear_subscribers() tmpfile.shutdown() control.shutdown() weblog.shutdown() webtraffic.shutdown() for a in automat.objects().values(): if a.name != 'shutdowner': a.event('shutdown') return DeferredList(dl)
def testConnect(self): mutable = [] def gotConnection(conn): mutable.append(conn) def gotAll(null): prevItem = mutable.pop() while mutable: thisItem = mutable.pop() self.failUnlessEqual(thisItem, prevItem) prevItem = thisItem d1 = self.broker.connect().addCallback(gotConnection) d2 = self.broker.connect().addCallback(gotConnection) d3 = deferToDelay(None, DELAY) d3.addCallback(lambda _: self.broker.connect()) d3.addCallback(gotConnection) return DeferredList([d1, d2, d3]).addCallback(gotAll)
def test_rest_error(crossbar, request, rest_crossbar): """ an RPC call that raises an error """ session = yield functest_session( url=u"ws://localhost:8686", realm=u'some_realm', role="role0", ) def sad_method(*args, **kw): raise RuntimeError("sadness") reg = yield session.register(sad_method, u'sad.method') request.addfinalizer(lambda: reg.unregister()) body = { u"procedure": u"sad.method", } r = treq.post( "http://localhost:8585/", json.dumps(body).encode('utf8'), headers={'Content-Type': ['application/json']}, ) timeout = sleep(5) results = yield DeferredList([r, timeout], fireOnOneCallback=True, fireOnOneErrback=True) r = results[0] # the HTTP "call" succeeds... assert r.code >= 200 and r.code < 300 data = yield r.content() data = json.loads(data) # ...but there's an error key assert 'error' in data assert 'args' in data assert data['error'] == 'wamp.error.runtime_error' assert data['args'] == ['sadness']
def onJoin(self, details): def got(res, started, msg): duration = 1000. * (time.clock() - started) print("{}: {} in {}".format(msg, res, duration)) t1 = time.clock() d1 = self.call('com.math.slowsquare', 3) d1.addCallback(got, t1, "Slow Square") t2 = time.clock() d2 = self.call('com.math.square', 3) d2.addCallback(got, t2, "Quick Square") def done(_): print("All finished.") self.leave() DeferredList ([d1, d2]).addBoth(done)
def testShutdownThreeBrokers(self): brokerB = AccessBroker(DB_URL) brokerC = AccessBroker(DB_URL) def thisOneShutdown(null, broker): print "Done shutting down broker '%s'" % broker def shutEmDown(null): dList = [] for broker in (self.broker, brokerB, brokerC): d = broker.shutdown() if VERBOSE: d.addCallback(thisOneShutdown, broker) dList.append(d) return DeferredList(dList) d = DeferredList([brokerB.startup(), brokerC.startup()]) d.addCallback(shutEmDown) return d
def testInsertAndDelete(self): items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4} def first(null): return self.i.delete('c').addCallback(second) def second(null): return self.i.names().addCallback(third) def third(nameList): desiredList = [x for x in items.keys() if x != 'c'] desiredList.sort() nameList.sort() self.failUnlessEqual(nameList, desiredList) dL = [] for name, value in items.iteritems(): dL.append(self.i.insert(name, value)) return DeferredList(dL).addCallback(first)
def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, request): nodes_d = [] # start all 5 nodes in parallel for x in range(5): name = 'node{}'.format(x) web_port= 9990 + x nodes_d.append( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port="tcp:{}:interface=localhost".format(web_port), storage=True, ) ) nodes_status = pytest_twisted.blockon(DeferredList(nodes_d)) nodes = [] for ok, process in nodes_status: assert ok, "Storage node creation failed: {}".format(process) nodes.append(process) return nodes
def test_lost_connection(self): # now, try with server that has valid transport that # has lost its connection client = YamClient(['one'], connect=False) transports = makeTestConnections(client) client.factories[0].stopTrying() d1 = client.get("foo") d2 = client.get("bar") transports[0].loseConnection() done = DeferredList([d1, d2], consumeErrors=True) def checkFailures(results): for success, result in results: self.assertFalse(success) result.trap(ConnectionDone) return done.addCallback(checkFailures)
def TestInternetConnection(remote_hosts=None, timeout=10): """ """ if remote_hosts is None: remote_hosts = [] from userid import known_servers for host, ports in known_servers.by_host().items(): remote_hosts.append('http://%s:%d' % ( host, ports[0], )) random.shuffle(remote_hosts) dl = [] for host in remote_hosts[:5]: dl.append(getPageTwisted(host, timeout=timeout)) return DeferredList(dl, fireOnOneCallback=True, fireOnOneErrback=False, consumeErrors=True)
def testOnlyOneAction(self): '''one player does one action. he should win the tourney.''' self.createTourney(players_quota=8, players_min=8, seats_per_game=4, inactive_delay=1000) tourney, clients = self.tourney, self.clients tourney.changeState(TOURNAMENT_STATE_RUNNING) serial = 1 client_online = clients[serial] table_online = None dl = [] for client in clients.itervalues(): client.registerHandler(PACKET_POKER_TOURNEY_RANK, client.handleRank) dl.append(client.finished) dl = DeferredList(dl) for game_id, game in tourney.id2game.items(): if serial in game.serial2player: table_online = self.service.tables[game_id] break def handleOneCall(packet): client_online.handlePosition(packet) client_online._handlers.pop() client_online._chooseAction = lambda packet: 'call' client_online.registerHandler(client_online.filterPosition, handleOneCall) def checkForRank(res): self.assertEquals(tourney.winners[0], client_online.getSerial()) dl.addCallback(checkForRank) table_online.joinPlayer(client_online) table_online.update() return dl
def open(self): """ Resolves hostnames, opens socket. Callbacks when done. """ def one_resolved(ip, host): for i in xrange(self.addresses_len): if self.addresses[i][0] == host: self.addresses[i] = (ip, self.addresses[i][1]) def all_resolved(_): self._listener = reactor.listenUDP(0, self) self.addresses_len = len(self.addresses) deferreds = [ reactor.resolve(host).addCallback(one_resolved, host) for (host, _) in self.addresses ] return DeferredList( deferreds, fireOnOneErrback=True).addCallback(all_resolved)
def doSetDown(self, *args, **kwargs): """ Action method. """ lg.out(4, "id_server.doSetDown") shutlist = [] if self.web_listener: d = self.web_listener.stopListening() if d: shutlist.append(d) lg.out(4, " stopped web listener") if self.tcp_listener: d = self.tcp_listener.stopListening() if d: shutlist.append(d) lg.out(4, " stopped TCP listener") self.web_listener = None self.tcp_listener = None DeferredList(shutlist).addBoth(lambda x: self.automat('server-down'))
def run(self): client = txcloudstack.Client(self._url, self._api_key, self._secret_key) deferreds = [] if self._collect_events: # Prevent multiple simultaneous calls to the same API. lock = open(self._temp_filename('events.lock'), 'w') fcntl.flock(lock.fileno(), fcntl.LOCK_EX) # Go back two days to compensate for downtime and timezone # variance between poller and cloud. startdate = datetime.date.today() - datetime.timedelta(hours=1) deferreds.extend(( client.listAlerts(), client.listEvents(startdate=startdate.strftime('%Y-%m-%d')), )) else: # Prevent multiple simultaneous calls to the same API. lock = open(self._temp_filename('values.lock'), 'w') fcntl.flock(lock.fileno(), fcntl.LOCK_EX) saved_values = self._saved_values() if saved_values is not None: self._values = saved_values self._print_output() return deferreds.extend(( client.listCapacity(), client.listHosts(type="Routing"), client.listSystemVms(), client.listVirtualMachines(domainid='1', isrecursive=True, state="Running"), )) DeferredList(deferreds, consumeErrors=True).addCallback(self._callback) reactor.run()
def notifyOfUpdate(self, gridKeys: List[str]): """ Notify of Grid Updates This method is called by the client.GridCacheController when it receives updates from the server. """ self._filterOutOfflineVortexes() payloadsByVortexUuid = defaultdict(Payload) for gridKey in gridKeys: gridTuple = self._cacheController.encodedChunk(gridKey) if not gridTuple: gridTuple = EncodedGridTuple() gridTuple.gridKey = gridKeys vortexUuids = self._observedVortexUuidsByGridKey.get(gridKey, []) # Queue up the required client notifications for vortexUuid in vortexUuids: logger.debug("Sending unsolicited grid %s to vortex %s", gridKey, vortexUuid) payloadsByVortexUuid[vortexUuid].tuples.append(gridTuple) # Send the updates to the clients dl = [] for vortexUuid, payload in list(payloadsByVortexUuid.items()): payload.filt = clientGridWatchUpdateFromDeviceFilt # Serliase in thread, and then send. d = payload.makePayloadEnvelopeDefer() d.addCallback( lambda payloadEnvelope: payloadEnvelope.toVortexMsgDefer()) d.addCallback(VortexFactory.sendVortexMsg, destVortexUuid=vortexUuid) dl.append(d) # Log the errors, otherwise we don't care about them dl = DeferredList(dl, fireOnOneErrback=True) dl.addErrback(vortexLogFailure, logger, consumeError=True)
class Channels(PythonPlugin): """Twitch Channels modeler plugin.""" relname = 'twitchChannels' modname = 'ZenPacks.training.Twitch.TwitchChannel' requiredProperties = ('zTwitchChannels', 'zAuthorizationToken', 'zClientID',) deviceProperties = PythonPlugin.deviceProperties + requiredProperties @inlineCallbacks def collect(self, device, log): self.contextfactory = WebClientContextFactory() self.agent = Agent(reactor, self.contextfactory) """Asynchronously collect data from device. Return a deferred.""" log.info("%s: collecting data", device.id) channels = getattr(device, 'zTwitchChannels', None) auth_token = getattr(device, 'zAuthorizationToken', None) client_id = getattr(device, 'zClientID', None) headers = {'Client-ID': client_id, 'Authorization': ["Bearer " + auth_token]} if not channels: log.error("%s: No channels.", device.id) returnValue(None) responses = [] for channel in channels: try: response = yield self.agent.request("GET", "https://api.twitch.tv/helix/streams?user_login="******"%s: %s", device.id, e) returnValue(None) result = DeferredList(responses, consumeErrors=True) returnValue(result)
def SendServers(): """ My identity file can be stored in different locations, see the "sources" field. So I can use different identity servers to store more secure and reliable. This method will send my identity file to all my identity servers via transport_tcp. """ from transport.tcp import tcp_node _, sendfilename = tmpfile.make("propagate", close_fd=True) LocalIdentity = my_id.getLocalIdentity() bpio.WriteTextFile(sendfilename, LocalIdentity.serialize(as_text=True)) dlist = [] for idurl in LocalIdentity.getSources(as_originals=True): # sources for out identity are servers we need to send to protocol, host, port, filename = nameurl.UrlParse(idurl) # TODO: rebuild identity-server logic to be able to send my identity via HTTP POST instead of TCP and # get rid of second TCP port at all webport, tcpport = known_servers.by_host().get( host, ( # by default use "expected" port numbers settings.IdentityWebPort(), settings.IdentityServerPort())) normalized_address = net_misc.normalize_address(( host, int(tcpport), )) dlist.append( tcp_node.send( sendfilename, normalized_address, 'Identity', keep_alive=False, )) if _Debug: lg.args(_DebugLevel, normalized_address=normalized_address, filename=filename) dl = DeferredList(dlist, consumeErrors=True) return dl
def _send_batch(self): """ Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send. """ # We can be triggered by the LoopingCall, and have nothing to send... # Or, we've got SendRequest(s) to send, but are still processing the # previous batch... if (not self._batch_reqs) or self._batch_send_d: return # Save a local copy, and clear the global list & metrics requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 # Iterate over them, fetching the partition for each message batch d_list = [] for req in requests: # For each request, we get the topic & key and use that to lookup # the next partition on which we should produce d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() # Since DeferredList doesn't propagate cancel() calls to deferreds it # might be waiting on for a result, we need to use this structure, # rather than just using the DeferredList directly d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) # Once we finish fully processing the current batch, clear the # _batch_send_d and check if any more requests piled up when we # were busy. d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) # Fire off the callback to start processing... d.callback(None)
def _do_identity_cache(ret): all_stories = [] for _supplier_idurl in ret['suppliers']: if _supplier_idurl: _supplier_idurl = id_url.to_bin(_supplier_idurl) if not id_url.is_cached( _supplier_idurl) or not identitycache.HasFile( _supplier_idurl): one_supplier_story = identitycache.immediatelyCaching( _supplier_idurl) if _Debug: one_supplier_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache' ) all_stories.append(one_supplier_story) _customer_idurl = id_url.to_bin(ret['customer_idurl']) if _customer_idurl and (not id_url.is_cached(_customer_idurl) or not identitycache.HasFile(_customer_idurl)): one_customer_story = identitycache.immediatelyCaching( _customer_idurl) if _Debug: one_customer_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache') all_stories.append(one_customer_story) if _Debug: lg.args(_DebugLevel, all_stories=len(all_stories), ret=ret) id_cache_story = DeferredList(all_stories, consumeErrors=True) id_cache_story.addCallback(_do_save_customer_suppliers, ret) if _Debug: id_cache_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache') id_cache_story.addErrback(result.errback) return id_cache_story
def fireSystemEvent(self, eventType): """See twisted.internet.interfaces.IReactorCore.fireSystemEvent. """ sysEvtTriggers = self._eventTriggers.get(eventType) if not sysEvtTriggers: return defrList = [] for callable, args, kw in sysEvtTriggers[0]: try: d = callable(*args, **kw) except: log.deferr() else: if isinstance(d, Deferred): defrList.append(d) if defrList: DeferredList(defrList).addBoth(self._cbContinueSystemEvent, eventType) else: self.callLater(0, self._continueSystemEvent, eventType)
def cleanup(self): """ Cleans the session by cancelling all deferreds and closing sockets. :return: A deferred that fires once the cleanup is done. """ yield super(UdpTrackerSession, self).cleanup() UdpTrackerSession.remove_transaction_id(self) # Cleanup deferred that fires when everything has been cleaned # Cancel the resolving ip deferred. self.ip_resolve_deferred = None self.result_deferred = None if self.scraper: self.clean_defer_list.append(self.scraper.stop()) del self.scraper # Return a deferredlist with all clean deferreds we have to wait on res = yield DeferredList(self.clean_defer_list) returnValue(res)
def gather(futures, consume_exceptions=True): def completed(res): rtn = [] for (ok, value) in res: rtn.append(value) if not ok and not consume_exceptions: value.raiseException() return rtn # XXX if consume_exceptions is False in asyncio.gather(), it will # abort on the first raised exception -- should we set # fireOnOneErrback=True (if consume_exceptions=False?) -- but then # we'll have to wrap the errback() to extract the "real" failure # from the FirstError that gets thrown if you set that ... dl = DeferredList(list(futures), consumeErrors=consume_exceptions) # we unpack the (ok, value) tuples into just a list of values, so # that the callback() gets the same value in asyncio and Twisted. add_callbacks(dl, completed, None) return dl
def test__deferredDHCPRequestErrback_cancels_all_on_FirstError(self): mock_cancelAll = self.patch(DHCPRequestMonitor, "cancelAll") def raise_ioerror(): raise IOError() a = deferLater(reactor, 0.0, raise_ioerror) b = deferLater(reactor, 6, lambda: "b") monitor = DHCPRequestMonitor("lo") monitor.deferredDHCPRequests = [a, b] deferredList = DeferredList( monitor.deferredDHCPRequests, consumeErrors=True, fireOnOneErrback=True, ) deferredList.addErrback(monitor.deferredDHCPRequestErrback) yield deferredList # Still have one call left in the reactor, since we mocked cancelAll(). b.cancel() self.assertThat(mock_cancelAll, MockCallsMatch(call([a, b])))
def stopServer(self): ''' stop all the running scripts and exit ''' yield None try: #cancel all scheduled scripts for scheduled, name, loop in self.scheduler.get_scheduled(): self.scheduler.cancel_scheduled_script(scheduled) for ident, scan, priority in self.scheduler.get_queue(): self.scheduler.remove_queued_script(ident) #stop all running scipts for ident, name in self.scheduler.get_running(): self.scheduler.stop_running(ident) #wait for all deferred to finish running = DeferredList(self.scheduler.running_deferred_list()) yield running except AttributeError: #if dictionary doesn't exist yet (i.e bad identification error), do nothing pass
def test_load(self): def gotValue(value, name): if name == 'foo': self.failUnlessEqual(value, 'OK') else: self.failUnless( isinstance(value, MockThing), "Item 'bar' is a '%s', not an instance of 'MockThing'" \ % value) self.failUnless( value.beenThereDoneThat, "Class instance wasn't properly persisted with its state") self.failUnlessEqual( value.method(2.5), 5.0, "Class instance wasn't properly persisted with its method") dList = [] for name in ('foo', 'bar'): dList.append(self.i.t.load(name).addCallback(gotValue, name)) return DeferredList(dList)
def insertLots(self, callback): noviceThing = MockThing() experiencedThing = MockThing() experiencedThing.method(0) self.whatToInsert = { 'alpha': 5937341, 'bravo': 'abc', 'charlie': -3.1415, 'delta': (1, 2, 3), 'echo': True, 'foxtrot': False, 'golf': noviceThing, 'hotel': experiencedThing, 'india': MockThing } dList = [] for name, value in self.whatToInsert.iteritems(): dList.append(self.i.t.insert(name, value)) return DeferredList(dList).addCallback(callback, self.whatToInsert.copy())
def cache_correspondents(path=None): """ Make sure identities of all correspondents we know are cached. """ dl = [] if path is None: path = settings.CorrespondentIDsFilename() lst = bpio._read_list(path) or [] for i in range(len(lst)): try: one_correspondent_idurl = lst[i].strip().split(' ', 1)[0] except: lg.exc() continue if one_correspondent_idurl: if not id_url.is_cached(one_correspondent_idurl): dl.append(identitycache.immediatelyCaching(one_correspondent_idurl)) if _Debug: lg.out(_DebugLevel, 'contactsdb.cache_correspondents prepared %d idurls to be cached' % len(dl)) return DeferredList(dl, consumeErrors=True)
def stop(): """ """ global _StopingDeferred if _StopingDeferred: lg.warn('driver.stop already called') return _StopingDeferred if _Debug: lg.out(_DebugLevel - 6, 'driver.stop') dl = [] for name in reversed(boot_up_order()): svc = services().get(name, None) if not svc: raise ServiceNotFound(name) d = Deferred() dl.append(d) svc.automat('stop', d) _StopingDeferred = DeferredList(dl) _StopingDeferred.addCallback(on_stopped_all_services) return _StopingDeferred
def _cleanThreads(self): """Find threadpools still in use and wait for them to quiesce.""" noisy = [ pool for pool in self._getThreadpools() if not self._isThreadpoolQuiet(pool) ] if len(noisy) == 0: stacks = None # Save the effort. else: stacks = self._captureThreadStacks() d = DeferredList(map(self._waitForThreadpoolToQuiesce, noisy), fireOnOneErrback=True, consumeErrors=True) def unwrap(results): return [repr(pool) for _, pool in results], stacks return d.addCallback(unwrap)
def health_check(services_list=[]): if not services_list: services_list.extend(reversed(boot_up_order())) if _Debug: lg.out(_DebugLevel - 6, 'driver.health_check with %d services' % len(services_list)) dl = [] for name in services_list: svc = services().get(name, None) if not svc: continue service_health = svc.health_check() if isinstance(service_health, Deferred): dl.append(service_health) else: d = Deferred() d.callback(bool(service_health)) dl.append(service_health) health_result = DeferredList(dl, consumeErrors=True) return health_result