def start_processes(_): logpool.log( self.uuid, "internal", "Starting work on job %s, assignment of %s tasks." % (self.assignment["job"]["title"], len(self.assignment["tasks"])), ) self._before_start() logger.debug("%r.start()", self.__class__.__name__) try: self.start() self.start_called = True logger.debug("Collecting started deferreds from spawned " "processes") if not self.processes: logger.warning("No processes have been started, firing deferreds " "immediately.") self.started_deferred.callback(None) self.stopped_deferred.callback(None) else: logger.debug("Making deferred list for %s started " "processes", len(self.processes)) processes_deferred = DeferredList([process.started for process in self.processes.values()]) processes_deferred.addCallback(lambda x: self.started_deferred.callback(x)) except Exception as e: self.started_deferred.errback(e) self.stopped_deferred.errback(e)
def _copyDomainCallback3(self, data, source_domain, destination_domain, total_box_usage=0): xml = ET.fromstring(data["response"]) box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text) self.box_usage += box_usage total_box_usage += box_usage next_token_element = xml.find(".//%sNextToken" % SDB_NAMESPACE) if next_token_element is not None: next_token = next_token_element.text else: next_token = None items = xml.findall(".//%sItem" % SDB_NAMESPACE) results = {} for item in items: key = item.find("./%sName" % SDB_NAMESPACE).text attributes = item.findall("%sAttribute" % SDB_NAMESPACE) attribute_dict = {} for attribute in attributes: attr_name = attribute.find("./%sName" % SDB_NAMESPACE).text attr_value = attribute.find("./%sValue" % SDB_NAMESPACE).text if attr_name in attribute_dict: attribute_dict[attr_name].append(attr_value) else: attribute_dict[attr_name] = [attr_value] results[key] = attribute_dict deferreds = [] for key in results: d = self.putAttributes(destination_domain, key, results[key]) d.addErrback(self._copyPutAttributesErrback, destination_domain, key, results[key]) deferreds.append(d) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._copyDomainCallback4, source_domain, destination_domain, next_token=next_token, total_box_usage=total_box_usage) return d
def handle_nodes(result, media_id, owner_username): """ I don't know what the hell this does. looks like nothing. @return: Unknown @rtype: Unknown The above comment was added by Clint. I left it here to illustrate something: Clint's full of shit. V """ if result[0] != 0: raise errors.APIError(result[1]) nodes = result[1] dl = [] for n in nodes: d2 = self._make_media_path(media_id, n, owner_username) d2.addCallback(store) d2.addCallback(lambda _: self.clear_renders(media_id, owner_username, n)) dl.append(d2) dList = DeferredList(dl) dList.addCallback(lambda _: "success") return dList
def _checkBodies(self, responses, callback, *params): deferreds = [Deferred() for r in responses] for i, (s, r) in enumerate(responses): r.deliverBody(PrinterClient(deferreds[i])) dl = DeferredList(deferreds) dl.addCallback(callback, *params) return dl
def enqueue(self): now = int(time.time()) # Compare the heap min timestamp with now(). # If it's time for the item to be queued, pop it, update the # timestamp and add it back to the heap for the next go round. queue_items = [] if self.amqp_queue_size < 100000: queue_items_a = queue_items.append LOGGER.debug("%s:%s" % (self.heap[0][0], now)) while self.heap[0][0] < now and len(queue_items) < 1000: job = heappop(self.heap) uuid = UUID(bytes=job[1][0]) if not uuid.hex in self.unscheduled_items: queue_items_a(job[1][0]) new_job = (now + job[1][1], job[1]) heappush(self.heap, new_job) else: self.unscheduled_items.remove(uuid.hex) else: LOGGER.critical('AMQP queue is at or beyond max limit (%d/100000)' % self.amqp_queue_size) # add items to amqp if queue_items: LOGGER.info('Found %d new uuids, adding them to the queue' % len(queue_items)) msgs = [Content(uuid) for uuid in queue_items] deferreds = [self.chan.basic_publish( exchange=self.amqp_exchange, content=msg) for msg in msgs] d = DeferredList(deferreds, consumeErrors=True) d.addCallbacks(self._addToQueueComplete, self._addToQueueErr) else: self.enqueueCallLater = reactor.callLater(1, self.enqueue)
def test_complex2(self, runtime): def check(ls): if (2 == runtime.id) or (1 == runtime.id): self.assertEquals(ls[0][1], "V1") self.assertEquals(ls[1][1], "V1") self.assertEquals(ls[2][1], "V1") self.assertEquals(ls[3][1], "V2") else: self.assertEquals(ls[0][1], "V1") self.assertEquals(ls[1][1], "V1") self.assertEquals(ls[2][1], "V1") self.assertEquals(ls[3][1], "V2") self.assertEquals(ls[4][1], "V2") field = self.Zp results = [] results += runtime.broadcast(runtime.players.keys(), runtime.players.keys(), "V1") if runtime.id in [1, 2]: v = runtime.broadcast([1, 2], [3], "V2") if isinstance(v, list): results += v else: results.append(v) else: results += runtime.broadcast([1, 2], [3]) if 3 == runtime.id: results += [runtime.broadcast([3], runtime.players.keys(), str(7))] else: results += [runtime.broadcast([3], runtime.players.keys())] dls = DeferredList(results) runtime.schedule_callback(dls, check) dls.addErrback(runtime.error_handler) return dls
def tearDown(self): deferreds = [] deferreds.append(self.spider.shutdown()) d = DeferredList(deferreds) d.addCallback(self._tearDownCallback) return d
def provision(self, request): """ Provision the device with credentials from a cloud controller. """ cors.config_cors(request) body = json.loads(request.content.read().decode('utf-8')) routerId = body['routerId'] apitoken = body['apitoken'] pdserver = body['pdserver'] wampRouter = body['wampRouter'] changed = False if routerId != nexus.core.info.pdid \ or pdserver != nexus.core.info.pdserver \ or wampRouter != nexus.core.info.wampRouter: if pdserver and wampRouter: nexus.core.provision(routerId, pdserver, wampRouter) else: nexus.core.provision(routerId) changed = True if apitoken != nexus.core.getKey('apitoken'): nexus.core.saveKey(apitoken, 'apitoken') changed = True if changed: PDServerRequest.resetToken() nexus.core.jwt_valid = False def set_update_fetcher(session): session.set_update_fetcher(self.update_fetcher) @inlineCallbacks def start_polling(result): yield self.update_fetcher.start_polling() def send_response(result): response = dict() response['provisioned'] = True response['httpConnected'] = nexus.core.jwt_valid response['wampConnected'] = nexus.core.wamp_connected request.setHeader('Content-Type', 'application/json') return json.dumps(response) wampDeferred = nexus.core.connect(WampSession) wampDeferred.addCallback(set_update_fetcher) httpDeferred = sendStateReport() httpDeferred.addCallback(start_polling) identDeferred = sendNodeIdentity() dl = DeferredList([wampDeferred, httpDeferred, identDeferred], consumeErrors=True) dl.addBoth(send_response) reactor.callLater(6, dl.cancel) return dl else: return json.dumps({'success': False, 'message': 'No change on the provision parameters'})
def trigger_convergence_groups(authenticator, region, groups, concurrency_limit, no_error_group): """ Trigger convergence on given groups :param IAuthenticator authenticator: Otter authenticator :param str region: Region where this is running :param list groups: List of group dicts :param int concurrency_limit: Concurrency limit :param bool no_error_group: If true then do not converge ERROR groups :return: Deferred fired with None """ sem = DeferredSemaphore(concurrency_limit) d = DeferredList( [sem.run(trigger_convergence, authenticator, region, group, no_error_group) for group in groups], fireOnOneCallback=False, fireOnOneErrback=False, consumeErrors=True) d.addCallback( lambda results: [(g["tenantId"], g["groupId"], f.value) for g, (s, f) in zip(groups, results) if not s]) return d
def report_crash(request): def finishRequest(_): # TODO: this should return a meaningful error code request.write( '<?xml version="1.0" encoding="UTF-8"?><result>0</result>') xml = request.args.get('xmlstring', '')[0] crashes = BeautifulSoup(xml).findAll('crash') deferreds = [] for crashXML in crashes: crash = db.Crash( applicationname=crashXML.applicationname.text, bundleidentifier=crashXML.bundleidentifier.text, contact=crashXML.contact.text, description=crashXML.description.text, log=crashXML.log.text, platform=crashXML.platform.text, senderversion=crashXML.senderversion.text, systemversion=crashXML.systemversion.text, user=crashXML.userid.text, version=crashXML.version.text ) deferreds.append(crash.save()) deferredList = DeferredList(deferreds) return deferredList.addCallback(finishRequest)
def get(self): """Get the results of a search. @raise SearchError: Raised if a query could not be resolved (because Solr returned an error). @return: A C{Deferred} that fires with C{dict} that maps L{Query} instances to search results. """ # Raise errors found when resolving special results. for result in self._specialResults.values(): if isinstance(result, SearchError): return fail(result) deferreds = [] for query in self._queries: deferreds.append(self._index.search(query)) deferreds = DeferredList(deferreds, consumeErrors=True) def unpackValues(values): results = dict(self._specialResults) for i, (success, value) in enumerate(values): query = self._queries[i] if not success: # FIXME If there's more than one exception we'll # effectively ignore all but the first one with this # logic. It would be good if we didn't ignore/hide issues # like this. value.raiseException() results[query] = value return results return deferreds.addCallback(unpackValues)
def connect(self): self.servers = [] serverDeferreds = [] for connection_info in self.connection_list: try: if type(connection_info) == dict: def addServer(res): self.servers.append(res) return res d = redis.Connection(**connection_info) d.addCallback(addServer) serverDeferreds.append(d) else: server = connection_info self.servers.append(server) except Exception as e: raise Warning(str(e)) def checkQuorun(res): self.quorum = (len(self.connection_list) // 2) + 1 if len(self.servers) < self.quorum: raise CannotObtainLock( "Failed to connect to the majority of redis servers") return res dl = DeferredList(serverDeferreds) dl.addCallback(checkQuorun) return dl
def test_send_two_senders_in_parallel(self, runtime): """Test of send a value.""" self.Zp = GF(6277101735386680763835789423176059013767194773182842284081) def check(ls): for s, x in ls: self.assertEquals(int(x), 42) return ls value = 42 receivers = [2, 3] if 1 == runtime.id: d1 = runtime.broadcast([1], receivers, str(value)) else: d1 = runtime.broadcast([1], receivers) if 2 == runtime.id: d2 = runtime.broadcast([2], [3], str(value)) else: d2 = runtime.broadcast([2], [3]) ds = [d1] if [] != d2: ds.append(d2) dls = DeferredList(ds) dls.addCallback(check) return dls
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named): """Like send_catch_log but supports returning deferreds on signal handlers. Returns a deferred that gets fired once all signal handlers deferreds were fired. """ def logerror(failure, recv): if dont_log is None or not isinstance(failure.value, dont_log): logger.error( "Error caught on signal handler: %(receiver)s", {"receiver": recv}, exc_info=failure_to_exc_info(failure), extra={"spider": spider}, ) return failure dont_log = named.pop("dont_log", None) spider = named.get("spider", None) dfds = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender, *arguments, **named) d.addErrback(logerror, receiver) d.addBoth(lambda result: (receiver, result)) dfds.append(d) d = DeferredList(dfds) d.addCallback(lambda out: [x[1] for x in out]) return d
def view_createContainer(self, user, tag): """ Create a new Container object. @param user: User for which the container will be created. @type user: rce.core.user.User @param tag: Tag which is used to identify the container in subsequent requests. @type tag: str """ try: validateName(tag) except IllegalName as e: raise InvalidRequest('Container tag is invalid: {0}'.format(e)) if tag in user.containers or tag in user.robots: raise InvalidRequest('Tag is already used for a container ' 'or robot.') namespace, remote_container = user.realm.createContainer(user.userID) container = Container(namespace, remote_container) user.containers[tag] = container container.notifyOnDeath(user.containerDied) m = 'Container {0} successfully created.'.format(tag) d = DeferredList([namespace(), remote_container()], fireOnOneErrback=True, consumeErrors=True) return d.addCallback(lambda _: m)
def subscribe(self): def _logFailure(failure): log.debug("reported {message}", message=failure.getErrorMessage()) return failure def _logGrantedQoS(value): log.debug("response {value!r}", value=value) return True def _logAll(*args): log.debug("all subscriptions complete args={args!r}",args=args) d1 = self.protocol.subscribe("foo/bar/baz1", 2 ) d1.addCallbacks(_logGrantedQoS, _logFailure) d2 = self.protocol.subscribe("foo/bar/baz2", 2 ) d2.addCallbacks(_logGrantedQoS, _logFailure) d3 = self.protocol.subscribe("foo/bar/baz3", 2 ) d3.addCallbacks(_logGrantedQoS, _logFailure) dlist = DeferredList([d1,d2,d3], consumeErrors=True) dlist.addCallback(_logAll) return dlist
def replace_all(self, string): urls = self.find_urls.findall(string) # Concat the matched tuples urls = [''.join(url) for url in urls] d = DeferredList([self.lengthen_url(url) for url in urls]) d.addCallback(self._replace_all_cb, urls, string) return d
def stop(self): log.info("\n") log.info('end-of-execution-stopping-consumers') # Ask each of our consumers to stop. When a consumer fully stops, it # fires the deferred returned from its start() method. We saved all # those deferreds away (above, in start()) in self._consumer_d_list, # so now we'll use a DeferredList to wait for all of them... for consumer in self._consumer_list: consumer.stop() dl = DeferredList(self._consumer_d_list) # Once the consumers are all stopped, then close our client def _stop_client(result): if isinstance(result, Failure): log.error('error', result=result) else: log.info('all-consumers-stopped', client=self._client) self._client.close() return result dl.addBoth(_stop_client) # And once the client is shutdown, stop the reactor def _stop_reactor(result): reactor.stop() return result dl.addBoth(_stop_reactor)
def process_cluster_info(self, info, cluster, callback): """ process data received from ganeti. """ print '%s:' % cluster.hostname infos = json.loads(info) self.timer.tick('info fetched from ganeti ') updated = Counter() base = cluster.nodes.all() mtimes = base.values_list('hostname', 'id', 'mtime') data = {} for hostname, id, mtime in mtimes: data[hostname] = (id, float(mtime) if mtime else None) self.timer.tick('mtimes fetched from db ') deferreds = [self.update_node(cluster, info, data, updated) for info in infos] deferred_list = DeferredList(deferreds) # batch update the cache updated time for all Nodes in this cluster. This # will set the last updated time for both Nodes that were modified and for # those that weren't. even if it wasn't modified we want the last # updated time to be up to date. # # XXX don't bother checking to see whether this query needs to run. With # normal usage it will almost always need to def update_timestamps(result): print ' updated: %s out of %s' % (updated, len(infos)) base.update(cached=datetime.now()) self.timer.tick('records or timestamps updated') deferred_list.addCallback(update_timestamps) deferred_list.addCallback(callback) return deferred_list
def run(self): jobs, self._jobs = self._jobs[:], [] jobs_done = DeferredList(jobs) jobs_done.addBoth(lambda ignore: self._thread_pool.stop()) jobs_done.addBoth(lambda ignore: reactor.stop()) reactor.callWhenRunning(self._thread_pool.start) reactor.run(self._install_signal_handlers)
def _configure_ssh(self, deployment): """ :return: A ``Deferred`` which fires when all nodes have been configured with ssh keys. """ self.ssh_configuration.create_keypair() results = [] for node in deployment.nodes: results.append( deferToThread( self.ssh_configuration.configure_ssh, node.hostname, self.ssh_port ) ) d = DeferredList(results, fireOnOneErrback=True, consumeErrors=True) # Exit with ssh's output if it failed for some reason: def got_failure(failure): if failure.value.subFailure.check(CalledProcessError): raise SystemExit( b"Error connecting to cluster node: " + failure.value.subFailure.value.output) else: return failure d.addErrback(got_failure) return d
def tearDown(self): LOGGER.removeHandler(self.logging_handler) a = self.mini_web_server.shutdown() b = self.pg.clearCache() d = DeferredList([a, b]) d.addCallback(self._tearDownCallback) return d
def client_connected( protocol ): proxy = Proxy( Test_Stub( protocol ), Math_Stub( protocol )) request = EchoRequest() request.text = "Hello world!" echoed = proxy.Test.Echo( request ) echoed.addCallback( print_response ) request = PingRequest() pinged = proxy.Test.Ping( request ) pinged.addCallback( print_response ) request = MathBinaryOperationRequest() request.first = 2; request.second = 2; mathAddd = proxy.Math.Add( request ) mathAddd.addCallback( print_response ) mathMultiplyd = proxy.Math.Multiply( request ) mathMultiplyd.addCallback( print_response ) dl = DeferredList( [ echoed, pinged, mathAddd, mathMultiplyd ] ) dl.addCallback( client_finished ) return dl
def createJob(self, username, jobSpec): jobNo = self._getJobNo() job = JobPerspective(jobNo, jobSpec) self.jobs[jobNo] = job user = yield UserManager.get(username) log.debug("Creating job %d for user %s... connecting slave servers" % (jobNo, user.userSpec.username)) # allocate a bunch of slaves here slaves = yield SlaveAllocator.allocate(jobSpec) log.debug("Using slaves: %s" % slaves) # divide the client function to spread the load over all slaves in the set clientFunctionPerSlave = "(%s)/%s" % (jobSpec.clientFunction, len(slaves)) transferLimitPerSlave = jobSpec.transferLimit / len(slaves) modifiedJobSpec = JobSpec(jobSpec.toJson()) modifiedJobSpec.clientFunction = clientFunctionPerSlave modifiedJobSpec.transferLimit = transferLimitPerSlave deferred = Deferred() slaveRequests = [] for slave in slaves: request = slave.createJob(modifiedJobSpec) request.addCallback(self._createJobSlaveCallback, slave) slaveRequests.append(request) deferredList = DeferredList(slaveRequests) deferredList.addCallback(self._createJobCallback, jobNo, user, deferred) yield deferredList returnValue(jobNo)
def send_payment_request(self, readTokens, writeTokens): """Called by a Circuit object when it wants to actually make a payment @param readTokens: the number of read tokens to pay for at each hop in the circuit @type readTokens: int @param writeTokens: the number of read tokens to pay for at each hop in the circuit @type writeTokens: int""" assert (readTokens + writeTokens) / Globals.CELLS_PER_PAYMENT, "tried to pay for bad number of cells" #make sure our setup is done: if not self.setupDone: #have we even started? if not self.setupStarted: self.send_setup_message() self.queuedReadTokens += readTokens self.queuedWriteTokens += writeTokens return #dont bother trying to send payments for circuits that are already closed if self.circ.is_done(): return #send the payments deferreds = [] for paymentStream in self.paymentStreams.values(): deferreds.append(paymentStream.send_payment(readTokens, writeTokens)) paymentsDoneDeferred = DeferredList(deferreds) paymentsDoneDeferred.addErrback(self.generic_error_handler) addTokensDeferred = Deferred() self.inflightReadTokens += readTokens self.inflightWriteTokens += writeTokens #timeout in case the payment fails. We will close the circuit in this case. event = Scheduler.schedule_once(PaymentStream.PAR_TIMEOUT, self.all_receipts_received, None, addTokensDeferred, readTokens, writeTokens, None) paymentsDoneDeferred.addCallback(self.all_receipts_received, addTokensDeferred, readTokens, writeTokens, event) addTokensDeferred.addCallback(self._add_tokens_callback, readTokens, writeTokens) addTokensDeferred.addErrback(self.generic_error_handler)
def recordVideoProcess(self, resW, resH, totalTimeSec, framerate, serverIP, piName, recordTimesList, file): semi = DeferredSemaphore(1) jobs = [] for runs in range(len(recordTimesList)/2): print "recordTimes recordVideoProcess:", recordTimesList self.writeFile("recordTimes recordVideoProcess:") try: startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0)) jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\ int(framerate), startAtTime, serverIP, piName, file)) except: self.writeFile("That time was not valid. Calling next time.") self.writeFile("len recordTimesList: " + str(len(recordTimesList))) if len(recordTimesList)%2>0: self.writeFile("odd number") recordTimesList.pop(0) self.writeFile("new len: " + str(len(recordTimesList))) reactor.callLater(0.5, self.transport.write, "TIMEINPUTERROR {0}\n".format(piName)) continue jobs = DeferredList(jobs) print "Results: ", jobs.addCallback(self.getResults, piName) # self.writeFile("Results: ", jobs.addCallback(self.getResults, piName)) jobs.addCallback(lambda _: reactor.callLater(5, reactor.stop))
def _reportstate_on_nodes(self, deployment): """ Connect to all nodes and run ``flocker-reportstate``. :param Deployment deployment: The requested already parsed configuration. :return: ``Deferred`` that fires with a ``bytes`` in YAML format describing the current configuration. """ command = [b"flocker-reportstate"] results = [] for target in self._get_destinations(deployment): d = deferToThread(target.node.get_output, command) d.addCallback(safe_load) d.addCallback(lambda val, key=target.hostname: (key, val)) results.append(d) d = DeferredList(results, fireOnOneErrback=False, consumeErrors=True) def got_results(node_states): # Bail on errors: for succeeded, value in node_states: if not succeeded: return value return safe_dump(dict(pair for (_, pair) in node_states)) d.addCallback(got_results) return d
def start(): """ """ global _StartingDeferred if _StartingDeferred: lg.warn('driver.start already called') return _StartingDeferred if _Debug: lg.out(_DebugLevel - 6, 'driver.start') dl = [] for name in boot_up_order(): svc = services().get(name, None) if not svc: raise ServiceNotFound(name) if not svc.enabled(): continue if svc.state == 'ON': continue d = Deferred() dl.append(d) svc.automat('start', d) if len(dl) == 0: return succeed(1) _StartingDeferred = DeferredList(dl) _StartingDeferred.addCallback(on_started_all_services) return _StartingDeferred
def _storeData(self, data, request_hash, confirm_cache_write, http_history=None): if len(data["response"]) == 0: return self._storeDataErrback(Failure(exc_value=Exception("Response data is of length 0")), data, request_hash) #data["content-sha1"] = sha1(data["response"]).hexdigest() if http_history is None: http_history = {} if "content-sha1" not in http_history: http_history["content-sha1"] = data["content-sha1"] if "content-changes" not in http_history: http_history["content-changes"] = [] if data["content-sha1"] != http_history["content-sha1"]: http_history["content-changes"].append(str(int(self.time_offset + time.time()))) http_history["content-changes"] = http_history["content-changes"][-10:] headers = {} http_history["content-changes"] = filter(lambda x:len(x) > 0, http_history["content-changes"]) headers["content-changes"] = ",".join(http_history["content-changes"]) headers["content-sha1"] = data["content-sha1"] if "cache-control" in data["headers"]: if isinstance(data["headers"]["cache-control"], (list, tuple)): if "no-cache" in data["headers"]["cache-control"][0]: return data else: if "no-cache" in data["headers"]["cache-control"]: return data if "expires" in data["headers"]: if isinstance(data["headers"]["expires"], (list, tuple)): headers["cache-expires"] = data["headers"]["expires"][0] else: headers["cache-expires"] = data["headers"]["expires"] if "etag" in data["headers"]: if isinstance(data["headers"]["etag"], (list, tuple)): headers["cache-etag"] = data["headers"]["etag"][0] else: headers["cache-etag"] = data["headers"]["etag"] if "last-modified" in data["headers"]: if isinstance(data["headers"]["last-modified"], (list, tuple)): headers["cache-last-modified"] = data["headers"]["last-modified"][0] else: headers["cache-last-modified"] = data["headers"]["last-modified"] if "content-type" in data["headers"]: if isinstance(data["headers"]["content-type"], (list, tuple)): headers["content_type"] = data["headers"]["content-type"][0] else: headers["content_type"] = data["headers"]["content-type"] headers_key = 'headers:%s' % request_hash http_key = 'http:%s' % request_hash logger.debug("Writing data for request %s to redis." % request_hash) deferreds = [] deferreds.append(self.redis_client.set(headers_key, compress(json.dumps(headers), 1))) deferreds.append(self.redis_client.set(http_key, compress(json.dumps(data["response"]), 1))) d = DeferredList(deferreds, consumeErrors=True) if confirm_cache_write: d.addCallback(self._storeDataCallback, data) d.addErrback(self._storeDataErrback, data, request_hash) return d return data
def cleanUp(self, wasClean, code, reason): """Thorough clean-up method to cancel all remaining deferreds, and send connection metrics in""" self.ps.metrics.increment("client.socket.disconnect", tags=self.base_tags) elapsed = (ms_time() - self.ps.connected_at) / 1000.0 self.ps.metrics.timing("client.socket.lifespan", duration=elapsed, tags=self.base_tags) # Cleanup our client entry if self.ps.uaid and self.ap_settings.clients.get(self.ps.uaid) == self: del self.ap_settings.clients[self.ps.uaid] # Cancel any outstanding deferreds that weren't already called for d in self.ps._callbacks: if not d.called: d.cancel() # Attempt to deliver any notifications not originating from storage if self.ps.direct_updates: defers = [] if self.ps.use_webpush: for notifs in self.ps.direct_updates.values(): notifs = filter(lambda x: x.ttl != 0, notifs) defers.extend(map(self._save_webpush_notif, notifs)) else: for chid, version in self.ps.direct_updates.items(): defers.append(self._save_simple_notif(chid, version)) # Tag on the notifier once everything has been stored dl = DeferredList(defers) dl.addBoth(self._lookup_node) # Delete and remove remaining dicts and lists del self.ps.direct_updates del self.ps.updates_sent
def refreshDeviceLists(self): """Ask all GPIB bus servers for their available GPIB devices.""" servers = [s for n, s in self.client.servers.items() if (('GPIB Bus' in n) or ('gpib_bus' in n)) and \ (('List Devices' in s.settings) or \ ('list_devices' in s.settings))] names = [s.name for s in servers] print names print servers print 'Pinging servers:', names resp = yield DeferredList([s.list_devices() for s in servers]) for name, (success, addrs) in zip(names, resp): if not success: print 'Failed to get device list for:', name else: print 'Server %s has devices: %s' % (name, addrs) for addr in addrs: self.gpib_device_connect(name, addr)
def fireEvent(self) -> None: """ Call the triggers added to this event. """ self.state = "BEFORE" self.finishedBefore = [] beforeResults: List[Deferred[object]] = [] while self.before: callable, args, kwargs = self.before.pop(0) self.finishedBefore.append((callable, args, kwargs)) try: result = callable(*args, **kwargs) except BaseException: log.err() else: if isinstance(result, Deferred): beforeResults.append(result) DeferredList(beforeResults).addCallback(self._continueFiring)
def main(myName): # This file defines the network of virtual quantum nodes virtualFile = os.environ.get('NETSIM') + "/config/virtualNodes.cfg" # This file defines the network of CQC servers interfacing to virtual quantum nodes cqcFile = os.environ.get('NETSIM') + "/config/cqcNodes.cfg" # Read configuration files for the virtual quantum, as well as the classical network virtualNet = networkConfig(virtualFile) cqcNet = networkConfig(cqcFile) # Check if we should run a local classical server. If so, initialize the code # to handle remote connections on the classical communication network if myName in cqcNet.hostDict: myHost = cqcNet.hostDict[myName] cqc_factory = CQCFactory(myHost, myName, cqcNet) else: logging.error( "LOCAL %s: Cannot start classical communication servers.", myName, e.strerror) # Initialize Twisted callback framework dList = [] try: logging.debug( "LOCAL %s: Starting local classical communication server.", myName) myHost.root = cqc_factory myHost.factory = cqc_factory reactor.listenTCP(myHost.port, myHost.factory) except Exception as e: logging.error("LOCAL %s: Cannot start CQC server.", myName, e.strerror) return # Connect to the local virtual node simulating the "local" qubits try: logging.debug("LOCAL %s: Connecting to local virtual node.", myName) virtual_node = virtualNet.hostDict[myName] factory = pb.PBClientFactory() reactor.connectTCP(virtual_node.hostname, virtual_node.port, factory) deferVirtual = factory.getRootObject() dList.append(deferVirtual) deferList = DeferredList(dList, consumeErrors=True) deferList.addCallback(init_register, myName, cqc_factory) deferList.addErrback(localError) reactor.run() except Exception as e: logging.error("LOCAL %s: Cannot connect to SimulaQron backend.", myName) return
def cache_customers(path=None): """ Make sure identities of all customers we know are cached. """ dl = [] if path is None: path = settings.CustomerIDsFilename() lst = bpio._read_list(path) or [] for one_customer_idurl in lst: if one_customer_idurl: if not id_url.is_cached(one_customer_idurl): dl.append(identitycache.immediatelyCaching(one_customer_idurl)) if _Debug: lg.out( _DebugLevel, 'contactsdb.cache_customers prepared %d idurls to be cached' % len(dl)) return DeferredList(dl, consumeErrors=True)
def _do_identity_cache(ret): all_stories = [] for _supplier_idurl in ret['suppliers']: if _supplier_idurl: _supplier_idurl = id_url.to_bin(_supplier_idurl) if not id_url.is_cached( _supplier_idurl) or not identitycache.HasFile( _supplier_idurl): one_supplier_story = identitycache.immediatelyCaching( _supplier_idurl) if _Debug: one_supplier_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache' ) all_stories.append(one_supplier_story) _customer_idurl = id_url.to_bin(ret['customer_idurl']) if _customer_idurl and (not id_url.is_cached(_customer_idurl) or not identitycache.HasFile(_customer_idurl)): one_customer_story = identitycache.immediatelyCaching( _customer_idurl) if _Debug: one_customer_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache') all_stories.append(one_customer_story) if _Debug: lg.args(_DebugLevel, all_stories=len(all_stories), ret=ret) id_cache_story = DeferredList(all_stories, consumeErrors=True) id_cache_story.addCallback(_do_save_customer_suppliers, ret) if _Debug: id_cache_story.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_suppliers._do_identity_cache') id_cache_story.addErrback(result.errback) return id_cache_story
def add_gui_request(self, infohash, timeout=20, scrape_now=False): """ Public API for adding a GUI request. :param infohash: Torrent infohash. :param timeout: The timeout to use in the performed requests :param scrape_now: Flag whether we want to force scraping immediately """ result = self._torrent_db.getTorrent(infohash, (u'torrent_id', u'last_tracker_check', u'num_seeders', u'num_leechers'), False) if result is None: self._logger.warn(u"torrent info not found, skip. infohash: %s", hexlify(infohash)) return fail(Failure(RuntimeError("Torrent not found"))) torrent_id = result[u'torrent_id'] last_check = result[u'last_tracker_check'] time_diff = time.time() - last_check if time_diff < self._torrent_check_interval and not scrape_now: self._logger.debug(u"time interval too short, skip GUI request. infohash: %s", hexlify(infohash)) return succeed({"db": {"seeders": result[u'num_seeders'], "leechers": result[u'num_leechers'], "infohash": infohash.encode('hex')}}) # get torrent's tracker list from DB tracker_set = self.get_valid_trackers_of_torrent(torrent_id) if not tracker_set: self._logger.warn(u"no trackers, skip GUI request. infohash: %s", hexlify(infohash)) # TODO: add code to handle torrents with no tracker return fail(Failure(RuntimeError("No trackers available for this torrent"))) deferred_list = [] for tracker_url in tracker_set: if tracker_url == u'DHT': # Create a (fake) DHT session for the lookup session = FakeDHTSession(self.tribler_session, infohash, timeout) self._session_list['DHT'].append(session) deferred_list.append(session.connect_to_tracker(). addCallbacks(*self.get_callbacks_for_session(session))) elif tracker_url != u'no-DHT': session = self._create_session_for_request(tracker_url, timeout=timeout) session.add_infohash(infohash) deferred_list.append(session.connect_to_tracker(). addCallbacks(*self.get_callbacks_for_session(session))) return DeferredList(deferred_list, consumeErrors=True).addCallback( lambda res: self.on_gui_request_completed(infohash, res))
def test_stopStartReading(self): """ This test checks transport read state! There are three bits of it: 1) The transport producer is paused -- transport.reading is False) 2) The transport is about to schedule an OS read, on the next reactor iteration -- transport._readScheduled 3) The OS has a pending asynchronous read on our behalf -- transport._readScheduledInOS if 3) is not implemented, it is possible to trick IOCPReactor into scheduling an OS read before the previous one finishes """ sf = ServerFactory() sf.protocol = StopStartReadingProtocol sf.ready_d = Deferred() sf.stop_d = Deferred() p = reactor.listenTCP(0, sf) port = p.getHost().port cc = ClientCreator(reactor, Protocol) def proceed(protos, port): log.msg('PROCEEDING WITH THE TESTATHRON') self.assert_(protos[0]) self.assert_(protos[1]) protos = protos[0][1], protos[1][1] protos[0].transport.write( 'x' * (2 * protos[0].transport.readBufferSize) + 'y' * (2 * protos[0].transport.readBufferSize)) return sf.stop_d.addCallback(cleanup, protos, port) def cleanup(data, protos, port): self.assert_( data == 'x' * (2 * protos[0].transport.readBufferSize) + 'y' * (2 * protos[0].transport.readBufferSize), 'did not get the right data') return DeferredList([ maybeDeferred(protos[0].transport.loseConnection), maybeDeferred(protos[1].transport.loseConnection), maybeDeferred(port.stopListening) ]) return (DeferredList([cc.connectTCP('127.0.0.1', port), sf.ready_d]).addCallback(proceed, p))
def shutdown(x=None): """ This is a top level method which control the process of finishing the program. Calls method ``shutdown()`` in other modules. """ lg.out(2, "shutdowner.shutdown " + str(x)) from services import driver from main import control from main import events from logs import weblog from logs import webtraffic from system import tmpfile from system import run_upnpc from raid import eccmap from lib import net_misc from updates import git_proc from interface import api_jsonrpc_server from interface import api_rest_http_server from interface import ftp_server from userid import my_id from crypt import my_keys dl = [] my_keys.shutdown() my_id.shutdown() ftp_server.shutdown() api_jsonrpc_server.shutdown() api_rest_http_server.shutdown() driver.shutdown() eccmap.shutdown() run_upnpc.shutdown() net_misc.shutdown() git_proc.shutdown() events.clear_subscribers() tmpfile.shutdown() control.shutdown() weblog.shutdown() webtraffic.shutdown() survived_automats = list(automat.objects().values()) for a in survived_automats: if a.name != 'shutdowner': a.event('shutdown') return DeferredList(dl)
def onJoin(self, details, publish_ready=True): """ Called when worker process has joined the node's management realm. """ yield NativeProcessSession.onJoin(self, details) procs = [ # orderly shutdown worker "from inside" 'shutdown', # CPU affinity for this worker process 'get_cpu_count', 'get_cpu_affinity', 'set_cpu_affinity', # PYTHONPATH used for this worker 'get_pythonpath', 'add_pythonpath', # profiling control 'get_profilers', 'start_profiler', 'get_profile', ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # setup SIGTERM handler to orderly shutdown the worker def shutdown(sig, frame): self.log.warn("Native worker received SIGTERM - shutting down ..") self.shutdown() signal.signal(signal.SIGTERM, shutdown) # the worker is ready for work! if publish_ready: yield self.publish_ready()
def upload(path, agent=None): """ Splits and upload the file given by path Returns a Deferred that fires the manifest object. """ total_size = os.path.getsize(path) chunk_size = CHUNK_SIZE_IN_MB * 1024 * 1024 parts = int(math.ceil(float(total_size) / float(chunk_size))) logger.info("%d parts" % parts) uid = 0 offset = 0 final_data = [] with open(path, 'rb') as f: while offset < total_size: tasks = [] queued_tasks = 0 while queued_tasks < CONNECTION_COUNT and offset < total_size: if offset + chunk_size <= total_size: length = chunk_size else: length = total_size - offset deferred_data = read_chunk(f, length) data, shasum = yield deferred_data d = upload_part(data, shasum, uid, offset, length, agent=agent) queued_tasks += 1 tasks.append(d) offset += length uid += 1 final_data += yield DeferredList(tasks) for (r, res) in final_data: if r is not True: returnValue(res) returnValue([b for (a, b) in final_data])
def onJoin(self, details): self.log.info('HostMonitor connected (monitors available: {monitors})', monitors=sorted(MONITORS.keys())) yield WorkerController.onJoin(self, details, publish_ready=False) # register monitor procedures dl = [] for monitor in self._monitors.values(): d = self.register(monitor.get, u'{}.get_{}'.format(self._prefix, monitor.ID)) dl.append(d) res = yield DeferredList(dl, fireOnOneErrback=True) print(res) self.log.info('HostMonitor {pcnt} procedures registered', pcnt=len(res)) # signal this worker is done with setup and ready yield self.publish_ready()
def health_check(services_list=[]): if not services_list: services_list.extend(reversed(boot_up_order())) if _Debug: lg.out(_DebugLevel, 'driver.health_check with %d services' % len(services_list)) dl = [] for name in services_list: svc = services().get(name, None) if not svc: continue service_health = svc.health_check() if isinstance(service_health, Deferred): dl.append(service_health) else: d = Deferred() d.callback(bool(service_health)) dl.append(service_health) health_result = DeferredList(dl, consumeErrors=True) return health_result
def testSend(self): deferreds = [] # Just test functionality. d = self.vortexClient.send(Payload()) deferreds.append(d) d = self.vortexClient.send([Payload(), Payload()]) deferreds.append(d) d = self.vortexClient.sendVortexMsg(Payload().toVortexMsg()) deferreds.append(d) d = self.vortexClient.sendVortexMsg( [Payload().toVortexMsg(), Payload().toVortexMsg()]) deferreds.append(d) return DeferredList(deferreds)
def open(self): """ Resolves hostnames, opens socket. Callbacks when done. """ def one_resolved(ip, host): for i in xrange(self.addresses_len): if self.addresses[i][0] == host: self.addresses[i] = (ip, self.addresses[i][1]) def all_resolved(_): self._listener = reactor.listenUDP(0, self) self.addresses_len = len(self.addresses) deferreds = [ reactor.resolve(host).addCallback(one_resolved, host) for (host, _) in self.addresses ] return DeferredList( deferreds, fireOnOneErrback=True).addCallback(all_resolved)
def fireSystemEvent(self, eventType): """See twisted.internet.interfaces.IReactorCore.fireSystemEvent. """ sysEvtTriggers = self._eventTriggers.get(eventType) if not sysEvtTriggers: return defrList = [] for callable, args, kw in sysEvtTriggers[0]: try: d = callable(*args, **kw) except: log.deferr() else: if isinstance(d, Deferred): defrList.append(d) if defrList: DeferredList(defrList).addBoth(self._cbContinueSystemEvent, eventType) else: self.callLater(0, self._continueSystemEvent, eventType)
def test_robotstxt(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) return DeferredList( [ self.assertNotIgnored(Request("http://site.local/allowed"), middleware), self.assertIgnored(Request("http://site.local/admin/main"), middleware), self.assertIgnored(Request("http://site.local/static/"), middleware), self.assertIgnored( Request("http://site.local/wiki/K%C3%A4ytt%C3%A4j%C3%A4:"), middleware, ), self.assertIgnored(Request("http://site.local/wiki/Käyttäjä:"), middleware), ], fireOnOneErrback=True, )
def TestInternetConnection(remote_hosts=None, timeout=10): """ """ if remote_hosts is None: remote_hosts = [] from userid import known_servers for host, ports in known_servers.by_host().items(): remote_hosts.append('http://%s:%d' % ( host, ports[0], )) random.shuffle(remote_hosts) dl = [] for host in remote_hosts[:5]: dl.append(getPageTwisted(host, timeout=timeout)) return DeferredList(dl, fireOnOneCallback=True, fireOnOneErrback=False, consumeErrors=True)
def _receive(self): """ Processes all of the messages currently in the receive queue. """ log.msg('recv %s' % ''.join(map(str, self._queue))) ds = [] try: for x in self._queue: ds.append(self.messageReceived(x)) # Only return deferred if necessary. If we return deferred # from, for example, a Startup message, the client will disconnect # as it expects us to read its entire message. ds = [d for d in ds if d] if ds: return DeferredList(ds) finally: self._queue = []
def computeFileHashes(files): # Request the hash for all of the files # - The thread pool size will limit how many files are run in parrell # - The result of hashfile will be a tuple of (filepath, hash) hash_deferreds = [deferToThread(hashfile, fpath) for fpath in files] progress_printer = progressPrinter(hash_deferreds, 'Generating Hashes: %d/%d %.2f%%') for deferred in hash_deferreds: deferred.addCallback(progress_printer) results = yield DeferredList(hash_deferreds, fireOnOneErrback=True) results = [val for status, val in results if status] # Convert to map (hash -. [fpath ...] ) hash_to_matching_files = defaultdict(list) for fpath, file_hash in results: hash_to_matching_files[file_hash].append(fpath) returnValue(hash_to_matching_files)
def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, request): nodes_d = [] # start all 5 nodes in parallel for x in range(5): name = 'node{}'.format(x) web_port= 9990 + x nodes_d.append( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port="tcp:{}:interface=localhost".format(web_port), storage=True, ) ) nodes_status = pytest_twisted.blockon(DeferredList(nodes_d)) nodes = [] for ok, process in nodes_status: assert ok, "Storage node creation failed: {}".format(process) nodes.append(process) return nodes
def doSetDown(self, arg): """ Action method. """ lg.out(4, "id_server.doSetDown") shutlist = [] if self.web_listener: d = self.web_listener.stopListening() if d: shutlist.append(d) lg.out(4, " stopped web listener") if self.tcp_listener: d = self.tcp_listener.stopListening() if d: shutlist.append(d) lg.out(4, " stopped TCP listener") self.web_listener = None self.tcp_listener = None DeferredList(shutlist).addBoth(lambda x: self.automat('server-down'))
def onJoin(self, details): def got(res, started, msg): duration = 1000. * (time.clock() - started) print("{}: {} in {}".format(msg, res, duration)) t1 = time.clock() d1 = self.call('com.math.slowsquare', 3) d1.addCallback(got, t1, "Slow Square") t2 = time.clock() d2 = self.call('com.math.square', 3) d2.addCallback(got, t2, "Quick Square") def done(_): print("All finished.") self.leave() DeferredList ([d1, d2]).addBoth(done)
def testConnect(self): mutable = [] def gotConnection(conn): mutable.append(conn) def gotAll(null): prevItem = mutable.pop() while mutable: thisItem = mutable.pop() self.failUnlessEqual(thisItem, prevItem) prevItem = thisItem d1 = self.broker.connect().addCallback(gotConnection) d2 = self.broker.connect().addCallback(gotConnection) d3 = deferToDelay(None, DELAY) d3.addCallback(lambda _: self.broker.connect()) d3.addCallback(gotConnection) return DeferredList([d1, d2, d3]).addCallback(gotAll)
def test_rest_error(crossbar, request, rest_crossbar): """ an RPC call that raises an error """ session = yield functest_session( url=u"ws://localhost:8686", realm=u'some_realm', role="role0", ) def sad_method(*args, **kw): raise RuntimeError("sadness") reg = yield session.register(sad_method, u'sad.method') request.addfinalizer(lambda: reg.unregister()) body = { u"procedure": u"sad.method", } r = treq.post( "http://localhost:8585/", json.dumps(body).encode('utf8'), headers={'Content-Type': ['application/json']}, ) timeout = sleep(5) results = yield DeferredList([r, timeout], fireOnOneCallback=True, fireOnOneErrback=True) r = results[0] # the HTTP "call" succeeds... assert r.code >= 200 and r.code < 300 data = yield r.content() data = json.loads(data) # ...but there's an error key assert 'error' in data assert 'args' in data assert data['error'] == 'wamp.error.runtime_error' assert data['args'] == ['sadness']
def test_insertAndDelete(self): items = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4} def first(null): return self.i.delete('c').addCallback(second) def second(null): return self.i.names().addCallback(third) def third(nameList): desiredList = [x for x in items.keys() if x != 'c'] desiredList.sort() nameList.sort() self.failUnlessEqual(nameList, desiredList) dL = [] for name, value in items.iteritems(): dL.append(self.i.insert(name, value)) return DeferredList(dL).addCallback(first)
def _do_populate_tags(clients): """Send RPC calls to each rack controller, requesting evaluation of tags. :param clients: List of connected rack controllers that EvaluateTag will be called. """ def call_client(client_info): client = client_info["client"] return client( EvaluateTag, system_id=client_info["system_id"], tag_name=client_info["tag_name"], tag_definition=client_info["tag_definition"], tag_nsmap=client_info["tag_nsmap"], credentials=client_info["credentials"], nodes=client_info["nodes"], ) def check_results(results): for client_info, (success, result) in zip(clients, results): if success: maaslog.info( "Tag %s (%s) evaluated on rack controller %s (%s)", client_info["tag_name"], client_info["tag_definition"], client_info["hostname"], client_info["system_id"], ) else: maaslog.error( "Tag %s (%s) could not be evaluated on rack controller " "%s (%s): %s", client_info["tag_name"], client_info["tag_definition"], client_info["hostname"], client_info["system_id"], result.getErrorMessage(), ) d = DeferredList( (call_client(client_info) for client_info in clients), consumeErrors=True, ) d.addCallback(check_results) d.addErrback(log.err) # Do *not* return a Deferred; the caller is not meant to wait around for # this to finish. However, for the sake of testing, we return the Deferred # wrapped up in a list so that crochet does not block. return [d]
def run(self): client = txcloudstack.Client(self._url, self._api_key, self._secret_key) deferreds = [] if self._collect_events: # Prevent multiple simultaneous calls to the same API. lock = open(self._temp_filename('events.lock'), 'w') fcntl.flock(lock.fileno(), fcntl.LOCK_EX) # Go back two days to compensate for downtime and timezone # variance between poller and cloud. startdate = datetime.date.today() - datetime.timedelta(hours=1) deferreds.extend(( client.listAlerts(), client.listEvents(startdate=startdate.strftime('%Y-%m-%d')), )) else: # Prevent multiple simultaneous calls to the same API. lock = open(self._temp_filename('values.lock'), 'w') fcntl.flock(lock.fileno(), fcntl.LOCK_EX) saved_values = self._saved_values() if saved_values is not None: self._values = saved_values self._print_output() return deferreds.extend(( client.listCapacity(), client.listHosts(type="Routing"), client.listSystemVms(), client.listVirtualMachines(domainid='1', isrecursive=True, state="Running"), )) DeferredList(deferreds, consumeErrors=True).addCallback(self._callback) reactor.run()
def _send_batch(self): """ Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send. """ # We can be triggered by the LoopingCall, and have nothing to send... # Or, we've got SendRequest(s) to send, but are still processing the # previous batch... if (not self._batch_reqs) or self._batch_send_d: return # Save a local copy, and clear the global list & metrics requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 # Iterate over them, fetching the partition for each message batch d_list = [] for req in requests: # For each request, we get the topic & key and use that to lookup # the next partition on which we should produce d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() # Since DeferredList doesn't propagate cancel() calls to deferreds it # might be waiting on for a result, we need to use this structure, # rather than just using the DeferredList directly d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) # Once we finish fully processing the current batch, clear the # _batch_send_d and check if any more requests piled up when we # were busy. d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) # Fire off the callback to start processing... d.callback(None)
class Channels(PythonPlugin): """Twitch Channels modeler plugin.""" relname = 'twitchChannels' modname = 'ZenPacks.training.Twitch.TwitchChannel' requiredProperties = ('zTwitchChannels', 'zAuthorizationToken', 'zClientID',) deviceProperties = PythonPlugin.deviceProperties + requiredProperties @inlineCallbacks def collect(self, device, log): self.contextfactory = WebClientContextFactory() self.agent = Agent(reactor, self.contextfactory) """Asynchronously collect data from device. Return a deferred.""" log.info("%s: collecting data", device.id) channels = getattr(device, 'zTwitchChannels', None) auth_token = getattr(device, 'zAuthorizationToken', None) client_id = getattr(device, 'zClientID', None) headers = {'Client-ID': client_id, 'Authorization': ["Bearer " + auth_token]} if not channels: log.error("%s: No channels.", device.id) returnValue(None) responses = [] for channel in channels: try: response = yield self.agent.request("GET", "https://api.twitch.tv/helix/streams?user_login="******"%s: %s", device.id, e) returnValue(None) result = DeferredList(responses, consumeErrors=True) returnValue(result)
def SendServers(): """ My identity file can be stored in different locations, see the "sources" field. So I can use different identity servers to store more secure and reliable. This method will send my identity file to all my identity servers via transport_tcp. """ from transport.tcp import tcp_node _, sendfilename = tmpfile.make("propagate", close_fd=True) LocalIdentity = my_id.getLocalIdentity() bpio.WriteTextFile(sendfilename, LocalIdentity.serialize(as_text=True)) dlist = [] for idurl in LocalIdentity.getSources(as_originals=True): # sources for out identity are servers we need to send to protocol, host, port, filename = nameurl.UrlParse(idurl) # TODO: rebuild identity-server logic to be able to send my identity via HTTP POST instead of TCP and # get rid of second TCP port at all webport, tcpport = known_servers.by_host().get( host, ( # by default use "expected" port numbers settings.IdentityWebPort(), settings.IdentityServerPort())) normalized_address = net_misc.normalize_address(( host, int(tcpport), )) dlist.append( tcp_node.send( sendfilename, normalized_address, 'Identity', keep_alive=False, )) if _Debug: lg.args(_DebugLevel, normalized_address=normalized_address, filename=filename) dl = DeferredList(dlist, consumeErrors=True) return dl