def generateGraph(self, ticket, bnglContents, graphtype): print ticket pointer = tempfile.mkstemp(suffix='.bngl', text=True) with open(pointer[1], 'w') as f: f.write(bnglContents) try: if graphtype in ['regulatory', 'contactmap']: consoleCommands.setBngExecutable(bngDistro) consoleCommands.generateGraph(pointer[1], graphtype) name = pointer[1].split('.')[0].split('/')[-1] with open('{0}_{1}.gml'.format(name, graphtype), 'r') as f: graphContent = f.read() gml = networkx.read_gml('{0}_{1}.gml'.format(name, graphtype)) result = gml2cyjson(gml, graphtype=graphtype) jsonStr = json.dumps(result, indent=1, separators=(',', ': ')) result = {'jsonStr': jsonStr, 'gmlStr': graphContent} self.addToDict(ticket, result) os.remove('{0}_{1}.gml'.format(name, graphtype)) print 'success', ticket elif graphtype in ['sbgn_er']: consoleCommands.setBngExecutable(bngDistro) consoleCommands.generateGraph(pointer[1], 'contactmap') name = pointer[1].split('.')[0].split('/')[-1] # with open('{0}_{1}.gml'.format(name,'contactmap'),'r') as f: # graphContent = f.read() graphContent = networkx.read_gml( '{0}_{1}.gml'.format(name, 'contactmap')) sbgn = libsbgn.createSBNG_ER_gml(graphContent) self.addToDict(ticket, sbgn) os.remove('{0}_{1}.gml'.format(name, 'contactmap')) print 'success', ticket elif graphtype in ['std']: consoleCommands.setBngExecutable(bngDistro) consoleCommands.bngl2xml(pointer[1]) xmlFileName = pointer[1].split('.')[0] + '.xml' xmlFileName = xmlFileName.split(os.sep)[-1] graph = stdgraph.generateSTDGML(xmlFileName) gmlGraph = networkx.generate_gml(graph) #os.remove('{0}.gml'.format(xmlFileName)) result = gml2cyjson(graph, graphtype=graphtype) jsonStr = json.dumps(result, indent=1, separators=(',', ': ')) result = {'jsonStr': jsonStr, 'gmlStr': ''.join(gmlGraph)} #self.addToDict(ticket, ''.join(gmlGraph)) self.addToDict(ticket, result) print 'success', ticket except: import traceback traceback.print_exc() self.addToDict(ticket,-5) print 'failure',ticket finally: task.deferLater(reactor, 600, freeQueue, ticket)
def startProducing(self, consumer): """Start producing entries. The producer writes EntryResponse protos to the consumer in batches, until all entries have been received, or an error occurs. Args: consumer: the consumer to write to. Returns: a deferred that fires when no more entries will be written. Upon success, this deferred fires number of produced entries or None if production wasn't successful. Upon failure, this deferred fires with the appropriate HTTPError. Raises: RuntimeError: consumer already registered. """ if self._consumer: raise RuntimeError("Producer already has a consumer registered") self._consumer = consumer self._stopped = False self._paused = True self._pending = None self._done = defer.Deferred() # An IBodyProducer should start producing immediately, without waiting # for an explicit resumeProducing() call. task.deferLater(self._reactor, 0, self.resumeProducing) return self._done
def call_repeating(timing_helper, work, *args, **kwargs): """Call a function repeatedly. Args: timing_helper: A function which accepts a datetime() for the current time, and returns a datetime telling when the work function should next be called. work: A function to be called at repeating intervals. Passed *args, **kwargs. """ def timing_helper_to_seconds_delay(): utc_now = datetime.utcnow() result = timing_helper(utc_now) return datetime_to_seconds_delay(utc_now, result) def do_work_repeating(): # Don't let an error doing the work prevent the job from repeating. try: work(*args, **kwargs) # pylint: disable=W0703 except Exception: log.err() task.deferLater(reactor, timing_helper_to_seconds_delay(), do_work_repeating) # Setup initial call to do_work_repeating task.deferLater(reactor, timing_helper_to_seconds_delay(), do_work_repeating)
def atomize(self, ticket, xmlFile, atomize, userConf = None): reaction = 'config/reactionDefinitions.json' try: logStream = StringIO.StringIO() if userConf: jsonpointer = tempfile.mkstemp(suffix='.json', text=True) with open(jsonpointer[1], 'w') as f: f.write(userConf) jsonpointer = jsonpointer[1] else: jsonpointer = None result = libsbml2bngl.readFromString(xmlFile, reaction, False, jsonpointer, atomize, logStream) if result and atomize: pointer = tempfile.mkstemp(suffix='.bngl', text=True) with open(pointer[1], 'w') as f: f.write(result.finalString) print pointer[1] bnglresult = libsbml2bngl.postAnalyzeString(pointer[1], bngDistro, result.database) else: bnglresult = result.finalString self.addToDict(ticket, [bnglresult, logStream.getvalue(), {'finalspecies':result.database.species, 'rawreactions':result.database.rawreactions}]) print 'success', ticket except: self.addToDict(ticket, -5) print 'failure', ticket finally: task.deferLater(reactor, 600, freeQueue, ticket)
def privmsg(self, user, target, msg): if not user or self.nickname not in msg: return sentence = self.factory.markov.generateString() deferLater(self.factory.reactor, len(sentence) / 500.0, self.msg, target, sentence)
def fire_if_not_running(): if self.manage_running is False: self.manage_running = True have_set_manage_running[0] = True d.callback(True) else: task.deferLater(reactor, 1, fire_if_not_running)
def test_auto_retune(self): # pylint: disable=no-member f1 = 50e6 # avoid 100e6 because that's a default a couple of places dev = simulate.SimulatedDevice(freq=f1, allow_tuning=True) bandwidth = dev.get_rx_driver().get_output_type().get_sample_rate() top = Top(devices={'s1': dev}) (_key, receiver) = top.add_receiver('AM', key='a') # initial state check receiver.set_rec_freq(f1) self.assertEqual(dev.get_freq(), f1) # one "page" up f2 = f1 + bandwidth * 3/4 receiver.set_rec_freq(f2) self.assertEqual(dev.get_freq(), f1 + bandwidth) # must wait for tune_delay, which is 0 for simulated source, or it will look still-valid yield deferLater(the_reactor, 0.1, lambda: None) # one "page" down receiver.set_rec_freq(f1) self.assertEqual(dev.get_freq(), f1) yield deferLater(the_reactor, 0.1, lambda: None) # long hop receiver.set_rec_freq(200e6) self.assertEqual(dev.get_freq(), 200e6)
def __retry(): """Retry placing the call on hold. Create a deferred to handle errors and schedule retry. """ task.deferLater(reactor, .25, phone.hold_call).addErrback(__handle_error)
def parseNewPage(f, external_id, remaining=None, parser=None): spoon = 1024*10 if remaining is None: remaining = f.tell() # TODO! how do i know, that the received fileis more than 1024??? parser=etree.HTMLParser(target=NewTarget())#encoding='cp1251' f.seek(0) rd = f.read(spoon) parser.feed(rd) remaining -= spoon d = deferLater(reactor, 0, parseNewPage, f, external_id, remaining, parser) return d else: if remaining < spoon: rd = f.read(remaining) parser.feed(rd) f.close() parser.close() return parser.target.prepareNewComponents(external_id) else: rd = f.read(spoon) parser.feed(rd) remaining -= spoon d = deferLater(reactor, 1, parseNewPage, f, external_id, remaining, parser) return d
def test_nack(self): config = StompConfig(uri='tcp://%s:%d' % (HOST, PORT), version='1.1') client = async.Stomp(config) try: client = yield client.connect(host=VIRTUALHOST) if client.session.version == '1.0': yield client.disconnect() raise StompProtocolError('Broker chose STOMP protocol 1.0') except StompProtocolError as e: print 'Broker does not support STOMP protocol 1.1. Skipping this test case. [%s]' % e defer.returnValue(None) client.subscribe(self.queue, self._nackFrame, {StompSpec.ACK_HEADER: 'client-individual', 'id': '4711'}, ack=False) client.send(self.queue, self.frame) while not self.framesHandled: yield task.deferLater(reactor, 0.01, lambda: None) yield client.disconnect() if BROKER == 'activemq': print 'Broker %s by default does not redeliver messages. Will not try and harvest the NACKed message.' % BROKER return self.framesHandled = 0 client = yield client.connect(host=VIRTUALHOST) client.subscribe(self.queue, self._eatFrame, {StompSpec.ACK_HEADER: 'client-individual', 'id': '4711'}, ack=True) while self.framesHandled != 1: yield task.deferLater(reactor, 0.01, lambda: None) yield client.disconnect()
def __connect(self): print "lancement de la connection a redis",self.port," - ",self.unix retry = 10 while not self.stopTrying: try: if self.port: self.redis = yield ClientCreator(reactor, Redis).connectTCP(__HOST__, self.port) elif self.unix: self.redis = yield ClientCreator(reactor, Redis).connectUNIX(self.unix) else: raise NotImplemented("PAS de port ou de socket fournit au client") r = yield self.redis.select(self.db) print r , self,self.port,self.unix self.ready.callback(True) except ConnectionRefusedError,e: print >>sys.stderr,"connection impossible",str(e) if not retry: print "nombre d'essai fini on reessais dans 1 heure" retry = 11 yield task.deferLater(reactor, 3600,lambda: None) retry -= 1 print "on essais dans %i secondes"%(10 - retry) yield task.deferLater(reactor, (10 - retry),lambda: None) except Exception,e: print >>sys.stderr,"connection impossible sur ",self.port," ", self.unix," ",str(e) print "connection impossible sur ",self.port," ", self.unix," ",str(e) raise e
def _do_pull( self, i, num_pulls, hostname, subscr_info): prefix = "{0} {1}".format(hostname, subscr_info) subscription = self._subscriptions_dct[(hostname, subscr_info)] if num_pulls > 0 and i == num_pulls: yield subscription.unsubscribe() del self._subscriptions_dct[(hostname, subscr_info)] if not self._subscriptions_dct: self._d.callback(None) return i += 1 sys.stdout.write('{0} pull #{1}'.format(prefix, i)) if num_pulls > 0: sys.stdout.write(' of {0}'.format(num_pulls)) print def print_event(event): self._event_count += 1 print "{0} {1}".format(prefix, event) log.debug("subscription.pull- {0} {1} (start)" .format(hostname, subscr_info)) yield subscription.pull(print_event) log.debug("subscription.pull- {0} {1} (finished)" .format(hostname, subscr_info)) task.deferLater(reactor, 0, self._do_pull, i, num_pulls, hostname, subscr_info)
def test_replay(self): config = self.getConfig(StompSpec.VERSION_1_0) client = async.Stomp(config) client = yield client.connect(host=VIRTUALHOST) client.subscribe(self.queue, self._eatFrame, {StompSpec.ACK_HEADER: 'client-individual'}) client.send(self.queue, self.frame) while self.framesHandled != 1: yield task.deferLater(reactor, 0.01, lambda: None) client._protocol.loseConnection() try: yield client.disconnected except StompConnectionError: pass client = yield client.connect(host=VIRTUALHOST) client.send(self.queue, self.frame) while self.framesHandled != 2: yield task.deferLater(reactor, 0.01, lambda: None) try: yield client.disconnect(failure=RuntimeError('Hi')) except RuntimeError as e: self.assertEquals(str(e), 'Hi') client = yield client.connect(host=VIRTUALHOST) client.send(self.queue, self.frame) while self.framesHandled != 2: yield task.deferLater(reactor, 0.01, lambda: None) yield client.disconnect()
def test_no_self_connect(): """ Test that when a producer connects, the consumer sends all existing subscription to it. """ class WampConsumerServerFactory(ConsumerMixin, wamp.WampServerFactory): protocol = wamp.WampServerProtocol id = generate_id() consumer = ConsumerServer('localhost', 19200, id=id) consumer.processor = WampConsumerServerFactory('ws://localhost:19202') listenWS(consumer.processor) producer = ProducerClient(id=id) deferLater(reactor, 0.5, producer.connect, 'localhost', 19200) def check_connection(): """ Make sure producer has no connections, because it's been controlled. """ print(set(producer.nodes)) assert set(producer.nodes) == set() return deferLater(reactor, 1.0, check_connection)
def delay(self, seconds, function, *args, **kwargs): """ delay(seconds, function, *args, **kwargs): Delay execution of function(*args, **kwargs) for up to 120 seconds. Error messages are relayed to caller unless a specific keyword 'errobj' is supplied pointing to another object to receiver errors. """ # handle the special error-reporting object errobj = self.caller if "errobj" in kwargs: errobj = kwargs["errobj"] del kwargs["errobj"] # set up some callbacks for delayed execution def errback(f, errobj): if errobj: try: f = f.getErrorMessage() except: pass errobj.msg("EVLANG delay error: " + str(f)) def runfunc(func, *args, **kwargs): threads.deferToThread(func, *args, **kwargs).addErrback(errback, errobj) # get things going if seconds <= 120: task.deferLater(reactor, seconds, runfunc, function, *args, **kwargs).addErrback(errback, errobj) else: raise EvlangError("delay() can only delay for a maximum of 120 seconds (got %ss)." % seconds ) return True
def test_unsubscribe(self): config = self.getConfig(StompSpec.VERSION_1_0) client = async.Stomp(config) yield client.connect(host=VIRTUALHOST) token = yield client.subscribe( self.queue, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL}, listener=SubscriptionListener(self._eatFrame), ) client.send(self.queue, self.frame) while self.framesHandled != 1: yield task.deferLater(reactor, 0.01, lambda: None) client.unsubscribe(token) client.send(self.queue, self.frame) yield task.deferLater(reactor, 0.2, lambda: None) self.assertEquals(self.framesHandled, 1) client.subscribe( self.queue, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL}, listener=SubscriptionListener(self._eatFrame), ) while self.framesHandled != 2: yield task.deferLater(reactor, 0.01, lambda: None) client.disconnect() yield client.disconnected
def test_transaction_commit(self): config = self.getConfig(StompSpec.VERSION_1_0) client = async.Stomp(config) client.add(ReceiptListener()) yield client.connect(host=VIRTUALHOST) client.subscribe( self.queue, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL, StompSpec.ID_HEADER: "4711"}, listener=SubscriptionListener(self._eatFrame, ack=True), ) transaction = "4711" yield client.begin(transaction, receipt="%s-begin" % transaction) client.send(self.queue, b"test message with transaction", {StompSpec.TRANSACTION_HEADER: transaction}) yield task.deferLater(reactor, 0.1, lambda: None) client.send(self.queue, b"test message without transaction") while self.framesHandled != 1: yield task.deferLater(reactor, 0.01, lambda: None) self.assertEquals(self.consumedFrame.body, b"test message without transaction") yield client.commit(transaction, receipt="%s-commit" % transaction) while self.framesHandled != 2: yield task.deferLater(reactor, 0.01, lambda: None) self.assertEquals(self.consumedFrame.body, b"test message with transaction") client.disconnect() yield client.disconnected
def shutdown(): global main_xmlrpc_handler global _xmlrpc_listener global _xmlrpc_site try: site = _xmlrpc_site logging.info("shutting down, first closing listening ports...") print("Shutting down, hold on a moment...") yield stop_listening() # This doesn't work, site.session is always empty logging.info("Ports closed, waiting for current sessions to close...") logging.debug("Clients still connected: {}".format(len(site.sessions))) while not len(site.sessions)==0: logging.debug("Waiting, {} sessions still active".format(len(site.sessions))) yield task.deferLater(reactor, 1, lambda _:0, 0) logging.info("No more sessions, waiting for locked hosts...") while not utils.none_waiting(): logging.info("Waiting to shut down, {} hosts still blocked".format(utils.count_waiting())) yield task.deferLater(reactor, 1, lambda _:0, 0) logging.debug("reactor.getDelayedCalls: {}".format([c.func for c in reactor.getDelayedCalls()])) logging.info("All hosts unlocked, waiting 3 more seconds...") yield task.deferLater(reactor, 1, lambda _:0, 0) logging.debug("Waiting 2 more seconds...") yield task.deferLater(reactor, 1, lambda _:0, 0) logging.debug("Waiting 1 more second...") yield task.deferLater(reactor, 1, lambda _:0, 0) logging.info("Continuing shutdown") except: logging.exception("Error in shutdown callback")
def _test_nack(self, version): if version not in commands.versions(VERSION): print 'Skipping test case (version %s is not configured)' % VERSION defer.returnValue(None) config = self.getConfig(version) client = async.Stomp(config) try: client = yield client.connect(host=VIRTUALHOST, versions=[version]) except StompProtocolError as e: print 'Broker does not support STOMP protocol %s. Skipping this test case. [%s]' % (version, e) defer.returnValue(None) client.subscribe(self.queue, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL, StompSpec.ID_HEADER: '4711'}, listener=SubscriptionListener(self._nackFrame, ack=False)) client.send(self.queue, self.frame) while not self.framesHandled: yield task.deferLater(reactor, 0.01, lambda: None) client.disconnect() yield client.disconnected if BROKER == 'activemq': print 'Broker %s by default does not redeliver messages. Will not try and harvest the NACKed message.' % BROKER return self.framesHandled = 0 client = yield client.connect(host=VIRTUALHOST) client.subscribe(self.queue, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL, StompSpec.ID_HEADER: '4711'}, listener=SubscriptionListener(self._eatFrame, ack=True)) while self.framesHandled != 1: yield task.deferLater(reactor, 0.01, lambda: None) client.disconnect() yield client.disconnected
def startInventory(self, *args): """Add a ROSpec to the reader and enable it.""" if self.state == LLRPClient.STATE_INVENTORYING: logger.warn('ignoring startInventory() while already inventorying') return None logger.info('starting inventory') if self.duration: task.deferLater(reactor, self.duration, self.stopPolitely, True) rospec = self.getROSpec()['ROSpec'] d2 = defer.Deferred() d2.addCallback(self.send_ENABLE_EVENTS_AND_REPORTS, onCompletion=None) d2.addErrback(self.panic, 'START_ROSPEC failed') d1 = defer.Deferred() d1.addCallback(self.send_START_ROSPEC, rospec, onCompletion=d2) d1.addErrback(self.panic, 'ENABLE_ROSPEC failed') d = defer.Deferred() d.addCallback(self.send_ENABLE_ROSPEC, rospec, onCompletion=d1) d.addErrback(self.panic, 'ADD_ROSPEC failed') self.send_ADD_ROSPEC(rospec, onCompletion=d)
def storeMissionComplete(self): log.msg("Store Mission Accomplished") # send the metadata self.fileMap[self.metadata.filename] = self.metadata # self.sendMetadata(self.metadata) # self.metadata.save(self.metadir) task.deferLater(reactor, 1, self.missionComplete, self.mission)
def onResponse(self, responseContent, url): """Called when new content arrives""" links = extractLinks(responseContent, url) new_ = set(links.keys()).difference(self.history) self.history.update(new_) # send system notifications for new urls today = date.today() appendLog = '' messages = [] for url in new_: title = links[url] title_enc = title.encode('utf-8', 'replace') msg = '{0} {1} : {2}\n'.format(today.strftime('%Y-%m-%d'), title_enc, url) appendLog = appendLog + msg if filterTitle(title, self.keywords) or filterUrl(url, self.domains): messages.append(MESSAGE_TPL.format(title, url, url)) notify(messages) # update archive archiveName = today.strftime('archive-%Y-%m.txt') with open(archiveName, 'a') as f: f.write(appendLog) # schedule next crawl deferLater(reactor, self.interval, self.fetch)
def setup_crawler( spider_class, **kwargs ): """ Use scrapy in a script see http://doc.scrapy.org/en/latest/topics/practices.html :param spider_class: Spider class to test :type spider_class: text """ def add_item(item): items.append(item) items = [] # create Crawler settings = get_project_settings() crawler = Crawler(settings) crawler.configure() # connect collecting function on item_passed crawler.signals.connect(add_item, signals.item_passed) # create & connect spider spider = spider_class(**kwargs) crawler.crawl(spider) # start crawler log.start() crawler.start() # run crawler task.deferLater(reactor, 1, reactor.stop) reactor.run() return items
def test_retry_after_timeout(self): """ If a timeout happens, one can retry to consume message from the queue later on. """ yield self.manager.connected((self.client, self.channel)) yield self.channel.queue_declare( queue=self.queue_prefix + "uuid1") reply = yield self.client.queue(self.tag_prefix + "uuid1.0") reply.clock = self.clock event_queue = QueueWrapper(reply).event_queue d1 = self.manager.get_message("uuid1", "0") yield event_queue.get() yield deferLater(reactor, 0, lambda: None) self.clock.advance(self.manager.message_timeout + 1) yield assert_fails_with(d1, Timeout) # Let's wrap the queue again reply = yield self.client.queue(self.tag_prefix + "uuid1.1") reply.clock = self.clock event_queue = QueueWrapper(reply).event_queue d2 = self.manager.get_message("uuid1", "1") yield event_queue.get() yield deferLater(reactor, 0, lambda: None) self.clock.advance(self.manager.message_timeout + 1) yield assert_fails_with(d2, Timeout)
def testManySubscribers(self): """Subscribe 3 clients, expects 9 reqeusts 3x subscribe, 3x disconnect, 3x logged_out Check that service records correct data at each step""" self.webServer.expectRequests(9) client1 = MockFlashClient(1) client2 = MockFlashClient(2) client3 = MockFlashClient(3) client1.connectedEvent.addCallback(lambda _: client1.sendSubscribeMessage([1])) client2.connectedEvent.addCallback(lambda _: client2.sendSubscribeMessage([1])) client3.connectedEvent.addCallback(lambda _: client3.sendSubscribeMessage([2])) def assertsOnService(*a): self.assertEqual(len(self.service.channels.keys()), 2) self.assertEqual(len(self.service.channels[1]), 2) self.assertEqual(len(self.service.channels[2]), 1) self.assertEqual(len(self.service.clients.keys()), 3) self.assertEqual(self.service.clients.keys(), [1, 2, 3]) task.deferLater(reactor, 0.05, assertsOnService ).addCallback(lambda _: client1.connector.disconnect() ).addCallback(lambda _: client2.connector.disconnect() ).addCallback(lambda _: client3.connector.disconnect()) def assertClientsDead(*a): for channel in self.service.channels.values(): for client in channel: self.assertFalse(client.is_alive) self.webServer.getNFirstRequests(6).addCallback(assertsOnService #clients are not removed from channels yet, only marked as dead ).addCallback(assertClientsDead) def assertClientsRemoved(*a): self.assertEqual(len(self.service.channels.keys()), 0) self.assertEqual(len(self.service.clients.keys()), 0) self.webServer.getAllRequests().addCallback(assertClientsRemoved) return defer.DeferredList([client1.disconnectedEvent, client2.disconnectedEvent, client3.disconnectedEvent])
def region_instance(region_instance_seq): region, instance_id = region_instance_seq[0], region_instance_seq[1] print 'region:', region, 'instance:', instance_id if region == app_util.app_region: print 'connect to:', region for image in all_images(region): try: print 'de-register images:', image image.deregister() print 'wait 20 seconds' yield task.deferLater(reactor, 20, defer.succeed, True) except Exception as e: print 'de-register error:', e print 'create image:', app_util.app_name r_conn = boto.ec2.connect_to_region(region) try: #for service_name in identify.service_names(region): # os.system('sudo rm ' + identify.service_path(service_name) ) for instance in r_conn.get_only_instances(instance_ids=[instance_id]): print 'instance to tag:', instance instance.add_tag(fixed.tag_state, fixed.state_replicate) ami_response = r_conn.create_image(instance_id, app_util.app_name) print 'ami response:', ami_response except Exception as e: print 'exception:', e else: print 'region mismatch' yield task.deferLater(reactor, 1, defer.succeed, True) print 'complete' reactor.callLater(0, reactor.stop)
def onStart(self): self.graphs = {} self._persistPath = PERSIST_PATH self.loadAll() deferLater(reactor, PERSIST_INTERVAL, self.periodicPersist)
def __checkPred(res): if res: target.callback(self) return res else: task.deferLater(reactor, poll, self.__wait, pred, poll, target) return
def expose(application): def attachDnsController(dns_controller): ######### # Mammatus is the giver of names, on TCP and UDP. ## verbosity = 0 tcpFactory = names_server.DNSServerFactory(clients=[dns_controller], verbose=verbosity) udpFactory = names_dns.DNSDatagramProtocol(tcpFactory) tcpFactory.noisy = udpFactory.noisy = verbosity dns_service = service.MultiService() internet.TCPServer(53, tcpFactory).setServiceParent(dns_service) internet.UDPServer(53, udpFactory).setServiceParent(dns_service) dns_service.setServiceParent(application) def attachHttpController(http_controller): ######### # Mammatus feeds you, over HTTP. ## httpFactory = web_server.Site(http_controller) web_service = internet.TCPServer(80, httpFactory) web_service.setServiceParent(application) ######### # Expose Mammia ## deferDnsController = deferLater(reactor, 0, dns.getController, model) deferDnsController.addCallback(attachDnsController) deferHttpController = deferLater(reactor, 0, http.getController, model) deferHttpController.addCallback(attachHttpController)
def deferred_route_twilio_call(session_id, url, defer_time): ''' Currently unused but potentially useful. ''' twilio_rest_client = TwilioRestClient(API_tokens.TWILIO_SID, API_tokens.TWILIO_AUTH_TOKEN) deferLater(reactor, defer_time, twilio_rest_client.calls.route, session_id, url) return True
def test_waiter(self): print("test_main()") #reactor.callLater(1.0, junk_messages, self.mcc) return task.deferLater(reactor, 32, self._called_by_deffered)
def cleanUp(): def stopit(): self.sharedService.pauseMonitor() return deferLater(reactor, 0.1, stopit)
def setUp(self): """ Work around Twisted #3178 by tricking trial into thinking something asynchronous is happening. """ return deferLater(reactor, 0, lambda: None)
def maybe_change_power_state(system_id, hostname, power_type, power_change, context, clock=reactor): """Attempt to change the power state of a node. If there is no power action already in progress, register this action and then pass change_power_state() to the reactor to call later and then return. This function exists to guarantee that PowerActionAlreadyInProgress errors will be raised promptly, before any work is done to power the node on. :raises: PowerActionAlreadyInProgress if there's already a power action in progress for this node. """ assert power_change in ('on', 'off', 'cycle'), ("Unknown power change: %s" % power_change) power_driver = PowerDriverRegistry.get_item(power_type) if power_driver is None: raise PowerActionFail("Unknown power_type '%s'" % power_type) missing_packages = power_driver.detect_missing_packages() if len(missing_packages): raise PowerActionFail("'%s' package(s) are not installed" % " ".join(missing_packages)) # There should be one and only one power change for each system ID. if system_id in power_action_registry: current_power_change, d = power_action_registry[system_id] else: current_power_change, d = None, None if current_power_change is None: # Arrange for the power change to happen later; do not make the caller # wait, because it might take a long time. We set a timeout so that if # the power action doesn't return in a timely fashion (or fails # silently or some such) it doesn't block other actions on the node. d = deferLater(clock, 0, deferWithTimeout, CHANGE_POWER_STATE_TIMEOUT, change_power_state, system_id, hostname, power_type, power_change, context, clock) power_action_registry[system_id] = power_change, d # Whether we succeed or fail, we need to remove the action from the # registry of actions, otherwise subsequent actions will fail. d.addBoth(callOut, power_action_registry.pop, system_id, None) # Log cancellations distinctly from other errors. def eb_cancelled(failure): failure.trap(CancelledError) log.msg("%s: Power could not be set to %s; timed out." % (hostname, power_change)) return power_change_failure(system_id, hostname, power_change, "Timed out") d.addErrback(eb_cancelled) # Catch-all log. d.addErrback(log.err, "%s: Power %s failed." % (hostname, power_change)) elif current_power_change == power_change: # What we want is already happening; let it continue. pass else: # Right now we reject conflicting power changes. However, we have the # Deferred (in `d`) along which the current power change is occurring, # so the option to cancel is available if we want it. raise PowerActionAlreadyInProgress( "Unable to change power state to '%s' for node %s: another " "action is already in progress for that node." % (power_change, hostname))
def sleep(self, *args, seconds): """Non blocking sleep callback""" return deferLater(reactor, seconds, lambda: None)
def test_waiter(self): return task.deferLater(reactor, 5, self._called_by_deffered)
def render_GET(self, request): n = getarg(request, "n", 1, type=float) d = deferLater(reactor, n, lambda: (request, n)) d.addCallback(self._delayedRender) return NOT_DONE_YET
def loop(result): if not result: d = deferLater(reactor, 0.1, predicate) d.addCallback(loop) return d return result
def _do_log(self): try: from Tribler.dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME except: from dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME total_stumbled_candidates = defaultdict(lambda: defaultdict(set)) prev_statistics = {} prev_total_received = {} prev_total_dropped = {} prev_total_delayed = {} prev_total_outgoing = {} prev_total_fail = {} prev_endpoint_recv = {} prev_endpoint_send = {} prev_created_messages = {} prev_bootstrap_candidates = {} while True: self._dispersy.statistics.update() communities_dict = {} for c in self._dispersy.statistics.communities: if c._community.dispersy_enable_candidate_walker: # determine current size of candidates categories nr_walked = nr_intro = nr_stumbled = 0 # we add all candidates which have a last_stumble > now - CANDIDATE_STUMBLE_LIFETIME now = time() for candidate in c._community.candidates.itervalues(): if candidate.last_stumble > now - CANDIDATE_STUMBLE_LIFETIME: nr_stumbled += 1 mid = candidate.get_member().mid total_stumbled_candidates[c.hex_cid][ candidate.last_stumble].add(mid) if candidate.last_walk > now - CANDIDATE_WALK_LIFETIME: nr_walked += 1 if candidate.last_intro > now - CANDIDATE_INTRO_LIFETIME: nr_intro += 1 else: nr_walked = nr_intro = nr_stumbled = "?" total_nr_stumbled_candidates = sum( len(members) for members in total_stumbled_candidates[ c.hex_cid].values()) communities_dict[c.hex_cid] = { 'classification': c.classification, 'global_time': c.global_time, 'sync_bloom_new': c.sync_bloom_new, 'sync_bloom_reuse': c.sync_bloom_reuse, 'sync_bloom_send': c.sync_bloom_send, 'sync_bloom_skip': c.sync_bloom_skip, 'nr_candidates': len(c.candidates) if c.candidates else 0, 'nr_walked': nr_walked, 'nr_stumbled': nr_stumbled, 'nr_intro': nr_intro, 'total_stumbled_candidates': total_nr_stumbled_candidates } # check for missing communities, reset candidates to 0 cur_cids = communities_dict.keys() for cid, c in prev_statistics.get('communities', {}).iteritems(): if cid not in cur_cids: _c = c.copy() _c['nr_candidates'] = "?" _c['nr_walked'] = "?" _c['nr_stumbled'] = "?" _c['nr_intro'] = "?" communities_dict[cid] = _c statistics_dict = { 'conn_type': self._dispersy.statistics.connection_type, 'received_count': self._dispersy.statistics.total_received, 'success_count': self._dispersy.statistics.msg_statistics.success_count, 'drop_count': self._dispersy.statistics.msg_statistics.drop_count, 'delay_count': self._dispersy.statistics.msg_statistics.delay_received_count, 'delay_success': self._dispersy.statistics.msg_statistics.delay_success_count, 'delay_timeout': self._dispersy.statistics.msg_statistics.delay_timeout_count, 'delay_send': self._dispersy.statistics.msg_statistics.delay_send_count, 'created_count': self._dispersy.statistics.msg_statistics.created_count, 'total_up': self._dispersy.statistics.total_up, 'total_down': self._dispersy.statistics.total_down, 'total_send': self._dispersy.statistics.total_send, 'cur_sendqueue': self._dispersy.statistics.cur_sendqueue, 'total_candidates_discovered': self._dispersy.statistics.total_candidates_discovered, 'walk_attempt': self._dispersy.statistics.walk_attempt_count, 'walk_success': self._dispersy.statistics.walk_success_count, 'walk_invalid_response_identifier': self._dispersy.statistics.invalid_response_identifier_count, 'is_online': self.is_online(), 'communities': communities_dict } prev_statistics = self.print_on_change("statistics", prev_statistics, statistics_dict) prev_total_dropped = self.print_on_change( "statistics-dropped-messages", prev_total_dropped, self._dispersy.statistics.msg_statistics.drop_dict) prev_total_delayed = self.print_on_change( "statistics-delayed-messages", prev_total_delayed, self._dispersy.statistics.msg_statistics.delay_dict) prev_total_received = self.print_on_change( "statistics-successful-messages", prev_total_received, self._dispersy.statistics.msg_statistics.success_dict) prev_total_outgoing = self.print_on_change( "statistics-outgoing-messages", prev_total_outgoing, self._dispersy.statistics.msg_statistics.outgoing_dict) prev_created_messages = self.print_on_change( "statistics-created-messages", prev_created_messages, self._dispersy.statistics.msg_statistics.created_dict) prev_total_fail = self.print_on_change( "statistics-walk-fail", prev_total_fail, self._dispersy.statistics.walk_failure_dict) prev_endpoint_recv = self.print_on_change( "statistics-endpoint-recv", prev_endpoint_recv, self._dispersy.statistics.endpoint_recv) prev_endpoint_send = self.print_on_change( "statistics-endpoint-send", prev_endpoint_send, self._dispersy.statistics.endpoint_send) yield deferLater(reactor, 5.0, lambda: None)
def schedule_poll(self, imgur_token=None, delay=None): d = deferLater(reactor, delay, self.request_imgur_count, imgur_token)
workers = [] fakeReactor = CountingReactorWithSuccess(workers) self.runner.run(TestCase(), fakeReactor) def check(): localLock = FilesystemLock(workingDirectory + ".lock") self.assertTrue(localLock.lock()) self.assertEqual(1, fakeReactor.stopCount) self.assertEqual(list(fakeReactor.triggers.keys()), ["before"]) self.assertEqual(list(fakeReactor.triggers["before"]), ["shutdown"]) self.reap(workers) return deferLater(reactor, 0, check) def test_runWaitForProcessesDeferreds(self): """ L{DistTrialRunner} waits for the worker processes to stop when the reactor is stopping, and then unlocks the test directory, not trying to stop the reactor again. """ workers = [] workingDirectory = self.runner._workingDirectory fakeReactor = CountingReactor(workers) self.runner.run(TestCase(), fakeReactor) def check(ign): # Let the AMP deferreds fire
def pushEvents(self): """Flush events to ZenHub. """ # are we already shutting down? if not reactor.running: self.log.debug("Skipping event sending - reactor not running.") return if self.eventQueueManager.event_queue_length >= self.options.maxqueuelen * self.options.queueHighWaterMark and not self._eventHighWaterMark: self.log.debug( "Queue length exceeded high water mark, %s ;creating high water mark deferred", self.eventQueueManager.event_queue_length) self._eventHighWaterMark = defer.Deferred() # are still connected to ZenHub? evtSvc = self.services.get('EventService', None) if not evtSvc: self.log.error("No event service: %r", evtSvc) yield task.deferLater(reactor, 0, lambda: None) if self._eventHighWaterMark: d, self._eventHighWaterMark = self._eventHighWaterMark, None #not connected, release throttle and let things queue d.callback("No Event Service") defer.returnValue(None) if self._pushEventsDeferred: self.log.debug("Skipping event sending - previous call active.") defer.returnValue("Push Pending") sent = 0 try: #only set _pushEventsDeferred after we know we have an evtSvc/connectivity self._pushEventsDeferred = defer.Deferred() def repush(val): if self.eventQueueManager.event_queue_length >= self.options.eventflushchunksize: self.pushEvents() return val # conditionally push more events after this pushEvents call finishes self._pushEventsDeferred.addCallback(repush) discarded_events = self.eventQueueManager.discarded_events if discarded_events: self.log.error( 'Discarded oldest %d events because maxqueuelen was ' 'exceeded: %d/%d', discarded_events, discarded_events + self.options.maxqueuelen, self.options.maxqueuelen) self.counters['discardedEvents'] += discarded_events self.eventQueueManager.discarded_events = 0 send_events_fn = partial(evtSvc.callRemote, 'sendEvents') try: sent = yield self.eventQueueManager.sendEvents(send_events_fn) except ConnectionLost as ex: self.log.error('Error sending event: %s', ex) #let the reactor have time to clean up any connection errors and make callbacks yield task.deferLater(reactor, 0, lambda: None) except Exception as ex: self.log.exception(ex) #let the reactor have time to clean up any connection errors and make callbacks yield task.deferLater(reactor, 0, lambda: None) finally: if self._pushEventsDeferred: d, self._pushEventsDeferred = self._pushEventsDeferred, None d.callback('sent %s' % sent) if self._eventHighWaterMark and self.eventQueueManager.event_queue_length < self.options.maxqueuelen * self.options.queueHighWaterMark: self.log.debug("Queue restored to below high water mark: %s", self.eventQueueManager.event_queue_length) d, self._eventHighWaterMark = self._eventHighWaterMark, None d.callback("Queue length below high water mark")
def incrementBananas(self): self.gameState.p1_data.numBananas += 1 self.gameState.p2_data.numBananas += 1 self.sendGameState() task.deferLater(reactor, BANANA_RESPAWN_INTERVAL, self.incrementBananas)
def check(ign): # Let the AMP deferreds fire return deferLater(reactor, 0, realCheck)
def onBufferReceived(self, buf): # deliver buffer to all listeners for listener in self.listenerRegistry.iterListeners(): _d = deferLater(reactor, 0, listener.onPush, buf)
def checkAgedBananas(self): shouldSendGameState = self.gameState.checkAgedBananas() if shouldSendGameState: self.sendGameState() task.deferLater(reactor, BANANA_PEEL_REFRESH_INTERVAL, self.checkAgedBananas)
def defer_way_later(*args, **kwargs): # Create a defer that will finish in 1 minute. return deferLater(reactor, 60 * 60, lambda: None)
def _garbageCollect(self): _d = deferLater(reactor, 1000, self._garbageCollect) bytes_ = gc.collect() log.msg('Garbage collected %d' % bytes_)
def _wait_for_next_chunk(self): return deferLater(reactor, self.out_chunk_delay_sec, self._write_single_chunk)
def announce(self, data): buf = json.dumps(data) + '\n' for listener in self.infoListenerRegistry.iterListeners(): _d = deferLater(reactor, 0, listener.onPush, buf)
def dataReceived(self, data): print "Server said:", data task.deferLater(reactor, 1, self.sendData)
def testResend(res): self.failUnless(res[0].name=='body', 'Wrong element') s = self.b.service.sessions[self.sid] self.failUnless(s.inactivity==2,'Wrong inactivity value') self.failUnless(s.wait==2, 'Wrong wait value') return task.deferLater(reactor, s.wait+s.inactivity+1, sendTest)
def raise_exception_later(service_name): # We use deferLater() to ensure that `raise_exception` is called # asynchronously; this helps to ensure that ensureServices() has # not closed over mutating local state, e.g. a loop variable. return deferLater(reactor, 0, raise_exception, service_name)
def test_cancelWhileLocksAvailable(self): def _owns_lock(step, lock): access = [ step_access for step_lock, step_access in step.locks if step_lock == lock ][0] return lock.isOwner(step, access) def _lock_available(step, lock): access = [ step_access for step_lock, step_access in step.locks if step_lock == lock ][0] return lock.isAvailable(step, access) lock1 = locks.MasterLock("masterlock1") real_lock1 = locks.RealMasterLock(lock1) lock2 = locks.MasterLock("masterlock2") real_lock2 = locks.RealMasterLock(lock2) stepa = self.setupStep( self.FakeBuildStep(locks=[(real_lock1, locks.LockAccess(lock1, 'exclusive'))])) stepb = self.setupStep( self.FakeBuildStep(locks=[(real_lock2, locks.LockAccess(lock2, 'exclusive'))])) stepc = self.setupStep( self.FakeBuildStep( locks=[(real_lock1, locks.LockAccess(lock1, 'exclusive') ), (real_lock2, locks.LockAccess(lock2, 'exclusive'))])) stepd = self.setupStep( self.FakeBuildStep( locks=[(real_lock1, locks.LockAccess(lock1, 'exclusive') ), (real_lock2, locks.LockAccess(lock2, 'exclusive'))])) # Start all the steps yield stepa.acquireLocks() yield stepb.acquireLocks() c_d = stepc.acquireLocks() d_d = stepd.acquireLocks() # Check that step a and step b have the locks self.assertTrue(_owns_lock(stepa, real_lock1)) self.assertTrue(_owns_lock(stepb, real_lock2)) # Check that step c does not have a lock self.assertFalse(_owns_lock(stepc, real_lock1)) self.assertFalse(_owns_lock(stepc, real_lock2)) # Check that step d does not have a lock self.assertFalse(_owns_lock(stepd, real_lock1)) self.assertFalse(_owns_lock(stepd, real_lock2)) # Release lock 1 stepa.releaseLocks() yield deferLater(reactor, 0, lambda: None) # lock1 should be available for step c self.assertTrue(_lock_available(stepc, real_lock1)) self.assertFalse(_lock_available(stepc, real_lock2)) self.assertFalse(_lock_available(stepd, real_lock1)) self.assertFalse(_lock_available(stepd, real_lock2)) # Cancel step c stepc.interrupt("cancelling") yield c_d # Check that step c does not have a lock self.assertFalse(_owns_lock(stepc, real_lock1)) self.assertFalse(_owns_lock(stepc, real_lock2)) # No lock should be available for step c self.assertFalse(_lock_available(stepc, real_lock1)) self.assertFalse(_lock_available(stepc, real_lock2)) # lock 1 should be available for step d self.assertTrue(_lock_available(stepd, real_lock1)) self.assertFalse(_lock_available(stepd, real_lock2)) # Release lock 2 stepb.releaseLocks() # Both locks should be available for step d self.assertTrue(_lock_available(stepd, real_lock1)) self.assertTrue(_lock_available(stepd, real_lock2)) # So it should run yield d_d # Check that step d owns the locks self.assertTrue(_owns_lock(stepd, real_lock1)) self.assertTrue(_owns_lock(stepd, real_lock2))
def _connect_to_device(reactor, options, port, daemon, connect_func): if options is None: options = [] host = '127.0.0.1' # We use rigctld instead of rigctl, because rigctl will only execute one command at a time and does not have the better-structured response formats. # If it were possible, we'd rather connect to rigctld over a pipe or unix-domain socket to avoid port allocation issues. if port is not None: # Make sure that there isn't (as best we can check) something using the port already. fake_connected = defer.Deferred() reactor.connectTCP(host, port, _HamlibClientFactory('(probe) %s' % (daemon,), fake_connected)) try: yield fake_connected raise Exception('Something is already using port %i!' % port) except ConnectionRefusedError: pass for _ in xrange(4 if port is None else 1): # loop to try available port numbers in case of collision (hamlib will not bind to 0 and report) if port is None: actual_port = random.randint(49152, 65535) else: actual_port = port process = subprocess.Popen( args=['/usr/bin/env', daemon, '-T', host, '-t', str(actual_port)] + options, stdin=None, stdout=None, stderr=None, close_fds=True) # Retry connecting with exponential backoff, because the daemon process won't tell us when it's started listening. proxy_device = None refused = Exception('this shouldn\'t be raised') for i in xrange(0, 4): try: proxy_device = yield connect_func( reactor=reactor, host=host, port=actual_port) break except ConnectionRefusedError as e: refused = e if process.poll() is not None: # If the process has terminated already, then it is probably due to either a rig communication problem or due to a port number collision. break yield deferLater(reactor, 0.1 * (2 ** i), lambda: None) else: raise refused if proxy_device is None: # If we get here, then we aborted the loop by the process.poll() check. continue # TODO: Sometimes we fail to kill the process because there was a protocol error during the connection stages. Refactor so that doesn't happen. _install_closed_hook(proxy_device, process) defer.returnValue(proxy_device) break # defer.returnValue exits by raise; this is just for lint else: raise Exception('Failed to start {}'.format(daemon))
def start_lbry_reuploader(sd_hash, kill_event, dead_event, ready_event, n, ul_rate_limit=None, is_generous=False): use_epoll_on_linux() init_conf_windows() from twisted.internet import reactor logging.debug("Starting the uploader") Random.atfork() r = random.Random() r.seed("start_lbry_reuploader") wallet = FakeWallet() peer_port = 5553 + n peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() sd_identifier = StreamDescriptorIdentifier() db_dir, blob_dir = mk_db_and_blob_dir() session = Session( conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, node_id="abcd" + str(n), dht_node_port=4446, peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=blob_dir, peer_port=peer_port, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1], external_ip="127.0.0.1") lbry_file_manager = EncryptedFileManager(session, sd_identifier) if ul_rate_limit is not None: session.rate_limiter.set_ul_limit(ul_rate_limit) def make_downloader(metadata, prm, download_directory): factories = metadata.factories return factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, download_directory) def download_file(): prm = session.payment_rate_manager d = download_sd_blob(session, sd_hash, prm) d.addCallback(sd_identifier.get_metadata_for_sd_blob) d.addCallback(make_downloader, prm, db_dir) d.addCallback(lambda downloader: downloader.start()) return d def start_transfer(): logging.debug("Starting the transfer") d = session.setup() d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier)) d.addCallback(lambda _: lbry_file_manager.setup()) d.addCallback(lambda _: download_file()) return d def start_server(): server_port = None query_handler_factories = { 1: BlobAvailabilityHandlerFactory(session.blob_manager), 2: BlobRequestHandlerFactory(session.blob_manager, session.wallet, session.payment_rate_manager, None), 3: session.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(session.rate_limiter, query_handler_factories, session.peer_manager) server_port = reactor.listenTCP(peer_port, server_factory) logging.debug("Started listening") def kill_server(): ds = [] ds.append(session.shut_down()) ds.append(lbry_file_manager.stop()) if server_port: ds.append(server_port.stopListening()) ds.append(rm_db_and_blob_dir(db_dir, blob_dir)) kill_check.stop() dead_event.set() dl = defer.DeferredList(ds) dl.addCallback(lambda _: reactor.stop()) return dl def check_for_kill(): if kill_event.is_set(): kill_server() kill_check = task.LoopingCall(check_for_kill) kill_check.start(1.0) ready_event.set() logging.debug("set the ready event") d = task.deferLater(reactor, 1.0, start_transfer) d.addCallback(lambda _: start_server()) if not reactor.running: reactor.run()
def render_GET(self, request): # Be sure that the TFTP endpoint is running. try: tftp = services.getServiceNamed('tftp') except KeyError: # TFTP service is not installed cannot handle a boot request. request.setResponseCode(503) return b'HTTP boot service not ready.' # Extract the local servers IP/port of the request. localHost = request.getHeader('X-Server-Addr') try: localPort = int(request.getHeader('X-Server-Port')) except (TypeError, ValueError): localPort = 0 # Extract the original clients IP/port of the request. remoteHost = request.getHeader('X-Forwarded-For') try: remotePort = int(request.getHeader('X-Forwarded-Port')) except (TypeError, ValueError): remotePort = 0 # localHost and remoteHost are required headers. if not localHost or not remoteHost: request.setResponseCode(400) return b'Missing X-Server-Addr and X-Forwarded-For HTTP headers.' def handleFailure(failure): if failure.check(AccessViolation): request.setResponseCode(403) request.write(b'') elif failure.check(FileNotFound): request.setResponseCode(404) request.write(b'') else: log.err(failure, "Failed to handle boot HTTP request.") request.setResponseCode(500) request.write(str(failure.value).encode('utf-8')) request.finish() def writeResponse(reader): # Some readers from `tftp` do not provide a way to get the size # of the generated content. Only set `Content-Length` when size # can be determined for the response. if hasattr(reader, 'size'): request.setHeader(b'Content-Length', reader.size) # The readers from `tftp` use `finish` instead of `close`, but # `NoRangeStaticProducer` expects `close` instead of `finish`. Map # `finish` to `close` so the file handlers are cleaned up. reader.close = reader.finish # Produce the result without allowing range. This producer will # call `close` on the reader and `finish` on the request when done. producer = NoRangeStaticProducer(request, reader) producer.start() path = b'/'.join(request.postpath) d = context.call( { "local": (localHost, localPort), "remote": (remoteHost, remotePort), }, tftp.backend.get_reader, path, skip_logging=True) d.addCallback(writeResponse) d.addErrback(handleFailure) d.addErrback(log.err, "Failed to handle boot HTTP request.") # Log the HTTP request to rackd.log and push that event to the # region controller. log_path = path.decode('utf-8') log.info("{path} requested by {remoteHost}", path=log_path, remoteHost=remoteHost) d = deferLater(reactor, 0, send_node_event_ip_address, event_type=EVENT_TYPES.NODE_HTTP_REQUEST, ip_address=remoteHost, description=log_path) d.addErrback(log.err, "Logging HTTP request failed.") # Response is handled in the defer. return NOT_DONE_YET
def sleep(secs): """Async sleep function""" return deferLater(reactor, secs, lambda: None)
def _check_fds(_): fds = set(reactor.getReaders() + reactor.getReaders()) if not [fd for fd in fds if isinstance(fd, Client) and validate_client(fd)]: return return deferLater(reactor, 0, _check_fds, None)
def onCS(self, item): self.stopQueueTask() self.queueTask = task.deferLater(reactor, 3.5, self.startQueueTask) item['cb'](self.getData(item['cs']))