def stop(services_list=[]): """ """ global _StopingDeferred global _StartingDeferred if _StopingDeferred: lg.warn('driver.stop already called') return _StopingDeferred if _StartingDeferred: d = Deferred() d.errback(Exception('currently another service is starting')) return d if not services_list: services_list.extend(reversed(boot_up_order())) if _Debug: lg.out(_DebugLevel, 'driver.stop with %d services' % len(services_list)) dl = [] for name in services_list: svc = services().get(name, None) if not svc: raise ServiceNotFound(name) d = Deferred() dl.append(d) svc.automat('stop', d) _StopingDeferred = DeferredList(dl) _StopingDeferred.addCallback(on_stopped_all_services) return _StopingDeferred
def ensureServices(self): """Ensures that services are in their desired state. :return: A mapping of service names to their current known state. """ def eb_ensureService(failure, service_name): # Only log if it's not the ServiceActionError; # ServiceActionError is already logged. if failure.check(ServiceActionError) is None: maaslog.error( "While monitoring service '%s' an error was " "encountered: %s", service_name, failure.value) # Return the current service state. return self._serviceStates[service_name] def cb_ensureService(state, service_name): return service_name, state def ensureService(service_name): # Wraps self.ensureService in error handling. Returns a Deferred. # Errors are logged and consumed; the Deferred always fires with a # (service-name, state) tuple. d = self.ensureService(service_name) d.addErrback(eb_ensureService, service_name) d.addCallback(cb_ensureService, service_name) return d def cb_buildResult(results): return dict(result for _, result in results) d = DeferredList(map(ensureService, self._services)) d.addCallback(cb_buildResult) return d
def _checkBodies(self, responses, callback, *params): deferreds = [Deferred() for r in responses] for i, (s, r) in enumerate(responses): r.deliverBody(PrinterClient(deferreds[i])) dl = DeferredList(deferreds) dl.addCallback(callback, *params) return dl
def start_processes(_): logpool.log( self.uuid, "internal", "Starting work on job %s, assignment of %s tasks." % (self.assignment["job"]["title"], len(self.assignment["tasks"])), ) self._before_start() logger.debug("%r.start()", self.__class__.__name__) try: self.start() self.start_called = True logger.debug("Collecting started deferreds from spawned " "processes") if not self.processes: logger.warning("No processes have been started, firing deferreds " "immediately.") self.started_deferred.callback(None) self.stopped_deferred.callback(None) else: logger.debug("Making deferred list for %s started " "processes", len(self.processes)) processes_deferred = DeferredList([process.started for process in self.processes.values()]) processes_deferred.addCallback(lambda x: self.started_deferred.callback(x)) except Exception as e: self.started_deferred.errback(e) self.stopped_deferred.errback(e)
def testInactiveDelay(self): '''all but one player is inactive. this player should win''' self.createTourney(players_quota=5, players_min=5, seats_per_game=2, inactive_delay=1000) tourney, clients = self.tourney, self.clients tourney.changeState(TOURNAMENT_STATE_RUNNING) serial = 1 client_online = None table_online = None dl = [] for client in clients.itervalues(): client.registerHandler(PACKET_POKER_TOURNEY_RANK, client.handleRank) dl.append(client.finished) dl = DeferredList(dl) for game_id,game in tourney.id2game.items(): if serial in game.serial2player: table_online = self.service.tables[game_id] client_online = clients[serial] client_online.registerHandler(client_online.filterPosition, client_online.handlePosition) break def checkForRank(res): self.assertEquals(tourney.winners[0], client_online.getSerial()) dl.addCallback(checkForRank) table_online.joinPlayer(client_online) table_online.update() return dl
def _start(self): deferreds = [] deferreds.append(self.getNetworkAddress()) if self.time_offset is None: deferreds.append(self.getTimeOffset()) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._startCallback)
def subscribe(self): def _logFailure(failure): log.debug("reported {message}", message=failure.getErrorMessage()) return failure def _logGrantedQoS(value): log.debug("response {value!r}", value=value) return True def _logAll(*args): log.debug("all subscriptions complete args={args!r}",args=args) d1 = self.protocol.subscribe("foo/bar/baz1", 2 ) d1.addCallbacks(_logGrantedQoS, _logFailure) d2 = self.protocol.subscribe("foo/bar/baz2", 2 ) d2.addCallbacks(_logGrantedQoS, _logFailure) d3 = self.protocol.subscribe("foo/bar/baz3", 2 ) d3.addCallbacks(_logGrantedQoS, _logFailure) dlist = DeferredList([d1,d2,d3], consumeErrors=True) dlist.addCallback(_logAll) return dlist
def add_folders(self, paths): paths_to_add = [] for path in paths: basename = os.path.basename(os.path.normpath(path)) if not os.path.isdir(path): QMessageBox.critical( self, "Cannot add {}.".format(basename), "Cannot add '{}'.\n\n{} currently only supports uploading " "and syncing folders, and not individual files. Please " "try again.".format(basename, APP_NAME)) elif self.gateway.magic_folder_exists(basename): QMessageBox.critical( self, "Folder already exists", 'You already belong to a folder named "{}" on {}. Please ' 'rename it and try again.'.format(basename, self.gateway.name)) else: paths_to_add.append(path) if paths_to_add: self.hide_drop_label() tasks = [] for path in paths_to_add: self.model().add_folder(path) tasks.append(self.gateway.create_magic_folder(path)) d = DeferredList(tasks) d.addCallback(self.maybe_restart_gateway)
def recordVideoProcess(self, resW, resH, totalTimeSec, framerate, serverIP, piName, recordTimesList, file): semi = DeferredSemaphore(1) jobs = [] for runs in range(len(recordTimesList)/2): print "recordTimes recordVideoProcess:", recordTimesList self.writeFile("recordTimes recordVideoProcess:") try: startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0)) jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\ int(framerate), startAtTime, serverIP, piName, file)) except: self.writeFile("That time was not valid. Calling next time.") self.writeFile("len recordTimesList: " + str(len(recordTimesList))) if len(recordTimesList)%2>0: self.writeFile("odd number") recordTimesList.pop(0) self.writeFile("new len: " + str(len(recordTimesList))) reactor.callLater(0.5, self.transport.write, "TIMEINPUTERROR {0}\n".format(piName)) continue jobs = DeferredList(jobs) print "Results: ", jobs.addCallback(self.getResults, piName) # self.writeFile("Results: ", jobs.addCallback(self.getResults, piName)) jobs.addCallback(lambda _: reactor.callLater(5, reactor.stop))
def test_across_frontends_and_topics(self): """This tests across topics simultaniously and hits all the frontends. Note that this test can take a bit of time if the servers are logging to stdout. """ servers = {'current': 0} def NextServer(): servers['current'] = (servers['current'] + 1) % len(self._servers) return self._servers[servers['current']] deferreds = [] for i in xrange(50): topic = str(i) deferreds.append(self._RunExampleTest(NextServer, topic=topic)) dl = DeferredList(deferreds) # There is probably a better way to use DeferredLists to propagate errors, # but I did not come across it. def ProcessDeferredList(args): for success, error in args: if not success: raise error dl.addCallback(ProcessDeferredList) return dl
def _storeData(self, data, request_hash, confirm_cache_write, http_history=None): if len(data["response"]) == 0: return self._storeDataErrback(Failure(exc_value=Exception("Response data is of length 0")), data, request_hash) #data["content-sha1"] = sha1(data["response"]).hexdigest() if http_history is None: http_history = {} if "content-sha1" not in http_history: http_history["content-sha1"] = data["content-sha1"] if "content-changes" not in http_history: http_history["content-changes"] = [] if data["content-sha1"] != http_history["content-sha1"]: http_history["content-changes"].append(str(int(self.time_offset + time.time()))) http_history["content-changes"] = http_history["content-changes"][-10:] headers = {} http_history["content-changes"] = filter(lambda x:len(x) > 0, http_history["content-changes"]) headers["content-changes"] = ",".join(http_history["content-changes"]) headers["content-sha1"] = data["content-sha1"] if "cache-control" in data["headers"]: if isinstance(data["headers"]["cache-control"], (list, tuple)): if "no-cache" in data["headers"]["cache-control"][0]: return data else: if "no-cache" in data["headers"]["cache-control"]: return data if "expires" in data["headers"]: if isinstance(data["headers"]["expires"], (list, tuple)): headers["cache-expires"] = data["headers"]["expires"][0] else: headers["cache-expires"] = data["headers"]["expires"] if "etag" in data["headers"]: if isinstance(data["headers"]["etag"], (list, tuple)): headers["cache-etag"] = data["headers"]["etag"][0] else: headers["cache-etag"] = data["headers"]["etag"] if "last-modified" in data["headers"]: if isinstance(data["headers"]["last-modified"], (list, tuple)): headers["cache-last-modified"] = data["headers"]["last-modified"][0] else: headers["cache-last-modified"] = data["headers"]["last-modified"] if "content-type" in data["headers"]: if isinstance(data["headers"]["content-type"], (list, tuple)): headers["content_type"] = data["headers"]["content-type"][0] else: headers["content_type"] = data["headers"]["content-type"] headers_key = 'headers:%s' % request_hash http_key = 'http:%s' % request_hash logger.debug("Writing data for request %s to redis." % request_hash) deferreds = [] deferreds.append(self.redis_client.set(headers_key, compress(json.dumps(headers), 1))) deferreds.append(self.redis_client.set(http_key, compress(json.dumps(data["response"]), 1))) d = DeferredList(deferreds, consumeErrors=True) if confirm_cache_write: d.addCallback(self._storeDataCallback, data) d.addErrback(self._storeDataErrback, data, request_hash) return d return data
def doBackupKeys(self, *args, **kwargs): """ Action method. """ for old_key_id in list(self.keys_to_rename.keys()): new_key_id, is_private = self.keys_to_rename[old_key_id] if old_key_id in self.stored_keys and new_key_id not in self.stored_keys: self.keys_to_upload.add(new_key_id) if new_key_id in self.stored_keys and old_key_id in self.stored_keys: self.keys_to_erase[old_key_id] = is_private for key_id in my_keys.known_keys().keys(): if key_id not in self.stored_keys or key_id in self.not_stored_keys: self.keys_to_upload.add(key_id) keys_saved = [] for key_id in self.keys_to_upload: res = key_ring.do_backup_key(key_id) keys_saved.append(res) self.saved_count += 1 if _Debug: lg.args(_DebugLevel, keys_saved=len(keys_saved)) wait_all_saved = DeferredList(keys_saved, fireOnOneErrback=False, consumeErrors=True) wait_all_saved.addCallback(lambda ok: self.automat('backup-ok', ok)) wait_all_saved.addErrback(lambda err: self.automat('error', err))
def confirm_unlink(self, folders): humanized_folders = humanized_list(folders, "folders") title = "Permanently remove {}?".format(humanized_folders) if len(folders) == 1: text = ("Are you sure you wish to <b>permanently</b> remove the " "'{}' folder? If you do, it will be unlinked from your " "rootcap and cannot be restored with your Recovery Key.". format(folders[0])) else: text = ("Are you sure you wish to <b>permanently</b> remove {}? " "If you do, they will be unlinked from your rootcap and " "cannot be restored with your Recovery Key.".format( humanized_folders)) reply = QMessageBox.question(self, title, text, QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: tasks = [] for folder in folders: d = self.gateway.unlink_magic_folder_from_rootcap(folder) d.addErrback(self.show_failure) tasks.append(d) self.model().removeRow(self.model().findItems(folder)[0].row()) d = DeferredList(tasks) d.addCallback(lambda _: self.model().monitor.scan_rootcap()) d.addCallback(self.show_drop_label)
def _observe_nodes(self, nodes): if self.stopped: lg.warn('DiscoveryTask[%r] : discovery process already stopped' % self.id) return if _Debug: lg.out( _DebugLevel, 'lookup.DiscoveryTask[%r]._observe_nodes started for %d items layer_id=%d' % ( self.id, len(nodes), self.layer_id, )) observe_list = [] for node in nodes: d = self.observe_method(node, layer_id=self.layer_id) d.addCallback(self._on_node_observed, node) d.addErrback(self._on_node_observe_failed, node) observe_list.append(d) self.observed_count = len(nodes) dl = DeferredList(observe_list, consumeErrors=False) dl.addCallback(self._on_all_nodes_observed) if _Debug: dl.addErrback(lg.errback, debug=_Debug, debug_level=_DebugLevel, method='DiscoveryTask._observe_nodes')
def client_connected( protocol ): proxy = Proxy( Test_Stub( protocol ), Math_Stub( protocol )) request = EchoRequest() request.text = "Hello world!" echoed = proxy.Test.Echo( request ) echoed.addCallback( print_response ) request = PingRequest() pinged = proxy.Test.Ping( request ) pinged.addCallback( print_response ) request = MathBinaryOperationRequest() request.first = 2; request.second = 2; mathAddd = proxy.Math.Add( request ) mathAddd.addCallback( print_response ) mathMultiplyd = proxy.Math.Multiply( request ) mathMultiplyd.addCallback( print_response ) dl = DeferredList( [ echoed, pinged, mathAddd, mathMultiplyd ] ) dl.addCallback( client_finished ) return dl
def handle_nodes(result, media_id, owner_username): """ I don't know what the hell this does. looks like nothing. @return: Unknown @rtype: Unknown The above comment was added by Clint. I left it here to illustrate something: Clint's full of shit. V """ if result[0] != 0: raise errors.APIError(result[1]) nodes = result[1] dl = [] for n in nodes: d2 = self._make_media_path(media_id, n, owner_username) d2.addCallback(store) d2.addCallback(lambda _: self.clear_renders(media_id, owner_username, n)) dl.append(d2) dList = DeferredList(dl) dList.addCallback(lambda _: "success") return dList
def tearDown(self): LOGGER.removeHandler(self.logging_handler) a = self.mini_web_server.shutdown() b = self.pg.clearCache() d = DeferredList([a, b]) d.addCallback(self._tearDownCallback) return d
def test_send_two_senders_in_parallel(self, runtime): """Test of send a value.""" self.Zp = GF( 6277101735386680763835789423176059013767194773182842284081) def check(ls): for s, x in ls: self.assertEquals(int(x), 42) return ls value = 42 receivers = [2, 3] if 1 == runtime.id: d1 = runtime.broadcast([1], receivers, str(value)) else: d1 = runtime.broadcast([1], receivers) if 2 == runtime.id: d2 = runtime.broadcast([2], [3], str(value)) else: d2 = runtime.broadcast([2], [3]) ds = [d1] if [] != d2: ds.append(d2) dls = DeferredList(ds) dls.addCallback(check) return dls
def send_payment_request(self, readTokens, writeTokens): """Called by a Circuit object when it wants to actually make a payment @param readTokens: the number of read tokens to pay for at each hop in the circuit @type readTokens: int @param writeTokens: the number of read tokens to pay for at each hop in the circuit @type writeTokens: int""" assert (readTokens + writeTokens) / Globals.CELLS_PER_PAYMENT, "tried to pay for bad number of cells" #make sure our setup is done: if not self.setupDone: #have we even started? if not self.setupStarted: self.send_setup_message() self.queuedReadTokens += readTokens self.queuedWriteTokens += writeTokens return #dont bother trying to send payments for circuits that are already closed if self.circ.is_done(): return #send the payments deferreds = [] for paymentStream in self.paymentStreams.values(): deferreds.append(paymentStream.send_payment(readTokens, writeTokens)) paymentsDoneDeferred = DeferredList(deferreds) paymentsDoneDeferred.addErrback(self.generic_error_handler) addTokensDeferred = Deferred() self.inflightReadTokens += readTokens self.inflightWriteTokens += writeTokens #timeout in case the payment fails. We will close the circuit in this case. event = Scheduler.schedule_once(PaymentStream.PAR_TIMEOUT, self.all_receipts_received, None, addTokensDeferred, readTokens, writeTokens, None) paymentsDoneDeferred.addCallback(self.all_receipts_received, addTokensDeferred, readTokens, writeTokens, event) addTokensDeferred.addCallback(self._add_tokens_callback, readTokens, writeTokens) addTokensDeferred.addErrback(self.generic_error_handler)
def test_send_two_senders_in_parallel(self, runtime): """Test of send a value.""" self.Zp = GF(6277101735386680763835789423176059013767194773182842284081) def check(ls): for s, x in ls: self.assertEquals(int(x), 42) return ls value = 42 receivers = [2, 3] if 1 == runtime.id: d1 = runtime.broadcast([1], receivers, str(value)) else: d1 = runtime.broadcast([1], receivers) if 2 == runtime.id: d2 = runtime.broadcast([2], [3], str(value)) else: d2 = runtime.broadcast([2], [3]) ds = [d1] if [] != d2: ds.append(d2) dls = DeferredList(ds) dls.addCallback(check) return dls
def start(services_list=[]): """ """ global _StartingDeferred global _StopingDeferred if _StartingDeferred: lg.warn('driver.start already called') return _StartingDeferred if _StopingDeferred: d = Deferred() d.errback(Exception('currently another service is stopping')) return d if not services_list: services_list.extend(boot_up_order()) if _Debug: lg.out(_DebugLevel - 6, 'driver.start with %d services' % len(services_list)) dl = [] for name in services_list: svc = services().get(name, None) if not svc: raise ServiceNotFound(name) if not svc.enabled(): continue if svc.state == 'ON': continue d = Deferred() dl.append(d) svc.automat('start', d) if len(dl) == 0: return succeed(1) _StartingDeferred = DeferredList(dl) _StartingDeferred.addCallback(on_started_all_services) return _StartingDeferred
def connect(self): self.servers = [] serverDeferreds = [] for connection_info in self.connection_list: try: if type(connection_info) == dict: def addServer(res): self.servers.append(res) return res d = redis.Connection(**connection_info) d.addCallback(addServer) serverDeferreds.append(d) else: server = connection_info self.servers.append(server) except Exception as e: raise Warning(str(e)) def checkQuorun(res): self.quorum = (len(self.connection_list) // 2) + 1 if len(self.servers) < self.quorum: raise CannotObtainLock( "Failed to connect to the majority of redis servers") return res dl = DeferredList(serverDeferreds) dl.addCallback(checkQuorun) return dl
def trigger_convergence_groups(authenticator, region, groups, concurrency_limit, no_error_group): """ Trigger convergence on given groups :param IAuthenticator authenticator: Otter authenticator :param str region: Region where this is running :param list groups: List of group dicts :param int concurrency_limit: Concurrency limit :param bool no_error_group: If true then do not converge ERROR groups :return: Deferred fired with None """ sem = DeferredSemaphore(concurrency_limit) d = DeferredList( [sem.run(trigger_convergence, authenticator, region, group, no_error_group) for group in groups], fireOnOneCallback=False, fireOnOneErrback=False, consumeErrors=True) d.addCallback( lambda results: [(g["tenantId"], g["groupId"], f.value) for g, (s, f) in zip(groups, results) if not s]) return d
def tearDown(self): deferreds = [] deferreds.append(self.spider.shutdown()) d = DeferredList(deferreds) d.addCallback(self._tearDownCallback) return d
def _copyDomainCallback3(self, data, source_domain, destination_domain, total_box_usage=0): xml = ET.fromstring(data["response"]) box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text) self.box_usage += box_usage total_box_usage += box_usage next_token_element = xml.find(".//%sNextToken" % SDB_NAMESPACE) if next_token_element is not None: next_token = next_token_element.text else: next_token = None items = xml.findall(".//%sItem" % SDB_NAMESPACE) results = {} for item in items: key = item.find("./%sName" % SDB_NAMESPACE).text attributes = item.findall("%sAttribute" % SDB_NAMESPACE) attribute_dict = {} for attribute in attributes: attr_name = attribute.find("./%sName" % SDB_NAMESPACE).text attr_value = attribute.find("./%sValue" % SDB_NAMESPACE).text if attr_name in attribute_dict: attribute_dict[attr_name].append(attr_value) else: attribute_dict[attr_name] = [attr_value] results[key] = attribute_dict deferreds = [] for key in results: d = self.putAttributes(destination_domain, key, results[key]) d.addErrback(self._copyPutAttributesErrback, destination_domain, key, results[key]) deferreds.append(d) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._copyDomainCallback4, source_domain, destination_domain, next_token=next_token, total_box_usage=total_box_usage) return d
def createJob(self, username, jobSpec): jobNo = self._getJobNo() job = JobPerspective(jobNo, jobSpec) self.jobs[jobNo] = job user = yield UserManager.get(username) log.debug("Creating job %d for user %s... connecting slave servers" % (jobNo, user.userSpec.username)) # allocate a bunch of slaves here slaves = yield SlaveAllocator.allocate(jobSpec) log.debug("Using slaves: %s" % slaves) # divide the client function to spread the load over all slaves in the set clientFunctionPerSlave = "(%s)/%s" % (jobSpec.clientFunction, len(slaves)) transferLimitPerSlave = jobSpec.transferLimit / len(slaves) modifiedJobSpec = JobSpec(jobSpec.toJson()) modifiedJobSpec.clientFunction = clientFunctionPerSlave modifiedJobSpec.transferLimit = transferLimitPerSlave deferred = Deferred() slaveRequests = [] for slave in slaves: request = slave.createJob(modifiedJobSpec) request.addCallback(self._createJobSlaveCallback, slave) slaveRequests.append(request) deferredList = DeferredList(slaveRequests) deferredList.addCallback(self._createJobCallback, jobNo, user, deferred) yield deferredList returnValue(jobNo)
def run(nodes): def callback(*args, **kwargs): print('callback', args) d = args[0] assert len(d) == 1 k, v = d.popitem() assert k.replace('key', '') == v.replace('value', '') def errback(*args, **kwargs): import traceback traceback.print_exc() def callback_dfl(*args): print('callback_dfl', args) reactor.stop() errback_dfl = errback time.sleep(3) try: list_of_deffered_set_value = [] for i in range(args.start, args.end): d = dht_service.get_json_value(str(i)) d.addBoth(callback) d.addErrback(errback) list_of_deffered_set_value.append(d) dfl = DeferredList(list_of_deffered_set_value) dfl.addCallback(callback_dfl) dfl.addErrback(errback_dfl) except Exception as exc: print('ERRRORO!!', exc) reactor.stop()
def process_cluster_info(self, info, cluster, callback): """ process data received from ganeti. """ print '%s:' % cluster.hostname infos = json.loads(info) self.timer.tick('info fetched from ganeti ') updated = Counter() base = cluster.nodes.all() mtimes = base.values_list('hostname', 'id', 'mtime') data = {} for hostname, id, mtime in mtimes: data[hostname] = (id, float(mtime) if mtime else None) self.timer.tick('mtimes fetched from db ') deferreds = [self.update_node(cluster, info, data, updated) for info in infos] deferred_list = DeferredList(deferreds) # batch update the cache updated time for all Nodes in this cluster. This # will set the last updated time for both Nodes that were modified and for # those that weren't. even if it wasn't modified we want the last # updated time to be up to date. # # XXX don't bother checking to see whether this query needs to run. With # normal usage it will almost always need to def update_timestamps(result): print ' updated: %s out of %s' % (updated, len(infos)) base.update(cached=datetime.now()) self.timer.tick('records or timestamps updated') deferred_list.addCallback(update_timestamps) deferred_list.addCallback(callback) return deferred_list
def _coordinateCallback2(self, discovered): existing_peers = set(self.peers.keys()) discovered_peers = set(discovered.keys()) new_peers = discovered_peers - existing_peers old_peers = existing_peers - discovered_peers for uuid in old_peers: LOGGER.debug("Removing peer %s" % uuid) if uuid in self.peers: del self.peers[uuid] deferreds = [] for uuid in new_peers: if uuid == self.uuid: self.peers[uuid] = { "uri":"http://127.0.0.1:%s" % self.port, "local_ip":"127.0.0.1", "port":self.port, "active":True } else: deferreds.append(self.verifyPeer(uuid, discovered[uuid])) if len(new_peers) > 0: if len(deferreds) > 0: d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._coordinateCallback3) return d else: self._coordinateCallback3(None) #Just found ourself. elif len(old_peers) > 0: self._coordinateCallback3(None) else: pass # No old, no new.
def _reportstate_on_nodes(self, deployment): """ Connect to all nodes and run ``flocker-reportstate``. :param Deployment deployment: The requested already parsed configuration. :return: ``Deferred`` that fires with a ``bytes`` in YAML format describing the current configuration. """ command = [b"flocker-reportstate"] results = [] for target in self._get_destinations(deployment): d = deferToThread(target.node.get_output, command) d.addCallback(safe_load) d.addCallback(lambda val, key=target.hostname: (key, val)) results.append(d) d = DeferredList(results, fireOnOneErrback=False, consumeErrors=True) def got_results(node_states): # Bail on errors: for succeeded, value in node_states: if not succeeded: return value return safe_dump(dict(pair for (_, pair) in node_states)) d.addCallback(got_results) return d
def confirm_remove(self, folders): humanized_folders = humanized_list(folders, "folders") title = "Remove {}?".format(humanized_folders) if len(folders) == 1: text = ("Are you sure you wish to remove the '{}' folder? If " "you do, it will remain on your computer, however, {} " "will no longer synchronize its contents with {}".format( folders[0], APP_NAME, self.gateway.name)) else: text = ("Are you sure you wish to remove {}? If you do, they " "will remain on your computer, however, {} will no " "longer synchronize their contents with {}.".format( humanized_folders, APP_NAME, self.gateway.name)) reply = QMessageBox.question(self, title, text, QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: tasks = [] for folder in folders: d = self.gateway.remove_magic_folder(folder) d.addErrback(self.show_failure) tasks.append(d) self.model().removeRow(self.model().findItems(folder)[0].row()) d = DeferredList(tasks) d.addCallback(lambda _: self.model().monitor.scan_rootcap()) d.addCallback(self.show_drop_label)
def query_status(host, loadbalancer_ips): ds = [ query_status_from_single_lb(host, lb_ip) for lb_ip in loadbalancer_ips ] dl = DeferredList(ds, consumeErrors=True) dl.addCallback(check_status_responses) return dl
def start(): """ """ global _StartingDeferred if _StartingDeferred: lg.warn('driver.start already called') return _StartingDeferred if _Debug: lg.out(_DebugLevel - 6, 'driver.start') dl = [] for name in boot_up_order(): svc = services().get(name, None) if not svc: raise ServiceNotFound(name) if not svc.enabled(): continue if svc.state == 'ON': continue d = Deferred() dl.append(d) svc.automat('start', d) if len(dl) == 0: return succeed(1) _StartingDeferred = DeferredList(dl) _StartingDeferred.addCallback(on_started_all_services) return _StartingDeferred
def _stop_(self, **kwargs): self.unload_deferred = Deferred() to_stop = [] logger.info("Stopping queues. Waiting for in-flight jobs to finish.") try: for name, queue in self.queues.items(): to_stop.append(queue.stop()) if queue.stopped is True: continue # print "stopping queue: %s " % name # print queue.size() # pending = queue.pending() # if len(pending) > 0: # for job in pending: # print "job in queue jobarg: %s" % job.__repr__() dl = DeferredList(to_stop) dl.addCallback(self.unload_deferred.callback) # self.unload_deferred.callback(1) return self.unload_deferred except Exception as e: logger.error( "---------------==(Traceback)==--------------------------") logger.error("{trace}", trace=traceback.format_exc()) logger.error( "--------------------------------------------------------")
def _stop_(self, **kwargs): self.unload_deferred = Deferred() to_stop = [] try: for name, queue in self.queues.items(): queue_size = queue.size() if queue.stopped is True: print("queue stopped") continue if queue_size[0] == 0 and queue_size[1] == 0: continue to_stop.append(queue.stop()) if len(to_stop) > 0: logger.info( "Stopping queues. Waiting for in-flight jobs to finish.") dl = DeferredList(to_stop) dl.addCallback(self.unload_deferred.callback) # self.unload_deferred.callback(1) return self.unload_deferred except Exception as e: logger.error( "---------------==(Traceback)==--------------------------") logger.error("{trace}", trace=traceback.format_exc()) logger.error( "--------------------------------------------------------")
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named): """Like send_catch_log but supports returning deferreds on signal handlers. Returns a deferred that gets fired once all signal handlers deferreds were fired. """ def logerror(failure, recv): if dont_log is None or not isinstance(failure.value, dont_log): log.err(failure, "Error caught on signal handler: %s" % recv, \ spider=spider) return failure dont_log = named.pop('dont_log', None) spider = named.get('spider', None) dfds = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender, *arguments, **named) d.addErrback(logerror, receiver) d.addBoth(lambda result: (receiver, result)) dfds.append(d) d = DeferredList(dfds) d.addCallback(lambda out: [x[1] for x in out]) return d
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named): """Like send_catch_log but supports returning deferreds on signal handlers. Returns a deferred that gets fired once all signal handlers deferreds were fired. """ def logerror(failure, recv): if dont_log is None or not isinstance(failure.value, dont_log): logger.error( "Error caught on signal handler: %(receiver)s", {"receiver": recv}, exc_info=failure_to_exc_info(failure), extra={"spider": spider}, ) return failure dont_log = named.pop("dont_log", None) spider = named.get("spider", None) dfds = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender, *arguments, **named) d.addErrback(logerror, receiver) d.addBoth(lambda result: (receiver, result)) dfds.append(d) d = DeferredList(dfds) d.addCallback(lambda out: [x[1] for x in out]) return d
def replace_all(self, string): urls = self.find_urls.findall(string) # Concat the matched tuples urls = [''.join(url) for url in urls] d = DeferredList([self.lengthen_url(url) for url in urls]) d.addCallback(self._replace_all_cb, urls, string) return d
def prepare_eth_account_list(account_num): eth_account_list = [] ds = [] for i in range(0, account_num): d = deferToThread(prepare_eth_account) ds.append(d) account_num -= 1 if account_num == 0: break def handle_result(result): for (success, value) in result: if success: account = value print('Creating account success: ', account.address) eth_account_list.append(account) else: print('Creating account failure: ', value.getErrorMessage()) return eth_account_list if ds: dl = DeferredList(ds, consumeErrors=True) dl.addCallback(handle_result) return dl
def confirm_unlink(self, folders): msgbox = QMessageBox(self) msgbox.setIcon(QMessageBox.Question) humanized_folders = humanized_list(folders, "folders") msgbox.setWindowTitle( "Permanently remove {}?".format(humanized_folders)) if len(folders) == 1: msgbox.setText( 'Are you sure you wish to <b>permanently</b> remove the "{}" ' "folder?".format(folders[0])) else: msgbox.setText( "Are you sure you wish to <b>permanently</b> remove {}?". format(humanized_folders)) msgbox.setInformativeText( "Permanently removed folders cannot be restored with your " "Recovery Key.") msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msgbox.setDefaultButton(QMessageBox.No) if msgbox.exec_() == QMessageBox.Yes: tasks = [] for folder in folders: tasks.append(self.unlink_folder(folder)) d = DeferredList(tasks) d.addCallback(self.maybe_rescan_rootcap)
def add_folders(self, paths): paths_to_add = [] for path in paths: basename = os.path.basename(os.path.normpath(path)) if not os.path.isdir(path): error( self, 'Cannot add "{}".'.format(basename), "{} only supports uploading and syncing folders," " and not individual files.".format(APP_NAME), ) elif self.gateway.magic_folder_exists(basename): error( self, "Folder already exists", 'You already belong to a folder named "{}" on {}. Please ' "rename it and try again.".format(basename, self.gateway.name), ) else: paths_to_add.append(path) if paths_to_add: self.hide_drop_label() tasks = [] for path in paths_to_add: tasks.append(self.add_folder(path)) d = DeferredList(tasks) d.addCallback(self.maybe_restart_gateway)
def run(nodes): def callback(*args, **kwargs): print('callback', args) l = args[0] assert len(l) > 0 def errback(*args, **kwargs): import traceback traceback.print_exc() def callback_dfl(*args): print('callback_dfl', args) reactor.stop() errback_dfl = errback time.sleep(3) try: list_of_deffered_set_value = [] for i in range(args.start, args.end): j = {'key'+str(i): 'value'+str(i), } d = dht_service.set_json_value(str(i), j, 60 * 60) d.addBoth(callback, j) d.addErrback(errback) list_of_deffered_set_value.append(d) dfl = DeferredList(list_of_deffered_set_value) dfl.addCallback(callback_dfl) dfl.addErrback(errback_dfl) except: print('ERRRORO!!') reactor.stop()
def test_guard_multiple_execution(self): foo = Bar() d1 = foo.slow_increment() d2 = foo.slow_increment() def validate_results(results): success, value = results[0] self.assertTrue(success) self.assertEqual(value, 1) success, value = results[1] self.assertTrue(success) self.assertEqual(value, False) return foo.slow_increment() def validate_value(results): # if the guard had not prevent execution the value # would be 3. self.assertEqual(results, 2) dlist = DeferredList([d1, d2]) dlist.addCallback(validate_results) dlist.addCallback(validate_value) return dlist
def client_connected(protocol): proxy = Proxy(Test_Stub(protocol), Math_Stub(protocol)) request = EchoRequest() request.text = "Hello world!" echoed = proxy.Test.Echo(request) echoed.addCallback(print_response) request = PingRequest() pinged = proxy.Test.Ping(request) pinged.addCallback(print_response) request = MathBinaryOperationRequest() request.first = 2 request.second = 2 mathAddd = proxy.Math.Add(request) mathAddd.addCallback(print_response) mathMultiplyd = proxy.Math.Multiply(request) mathMultiplyd.addCallback(print_response) dl = DeferredList([echoed, pinged, mathAddd, mathMultiplyd]) dl.addCallback(client_finished) return dl
def setup_local(myName, virtualNet, classicalNet, lNode, func): """ Sets up - local classical communication server (if desired according to the configuration file) - client connection to the local virtual node quantum backend - client connections to all other classical communication servers Arguments myName name of this node (string) virtualNet servers of the virtual nodes (dictionary of host objects) classicalNet servers on the classical communication network (dictionary of host objects) lNode Twisted PB root to use as local server (if applicable) func function to run if all connections are set up """ # Initialize Twisted callback framework dList = [] # If we are listed as a server node for the classical network, start this server if myName in classicalNet.hostDict: try: logging.debug( "LOCAL %s: Starting local classical communication server.", myName) nb = classicalNet.hostDict[myName] nb.root = lNode nb.factory = pb.PBServerFactory(nb.root) reactor.listenTCP(nb.port, nb.factory) except Exception as e: logging.error( "LOCAL %s: Cannot start classical communication servers.", myName, e.strerror) return # Give the server some time to start up time.sleep(1) # Connect to the local virtual node simulating the "local" qubits logging.debug("LOCAL %s: Connecting to local virtual node.", myName) node = virtualNet.hostDict[myName] factory = pb.PBClientFactory() reactor.connectTCP(node.hostname, node.port, factory) deferVirtual = factory.getRootObject() dList.append(deferVirtual) # Set up a connection to all the other nodes in the classical network for node in classicalNet.hostDict: nb = classicalNet.hostDict[node] if nb.name != myName: logging.debug("LOCAL %s: Making classical connection to %s.", myName, nb.name) nb.factory = pb.PBClientFactory() reactor.connectTCP(nb.hostname, nb.port, nb.factory) dList.append(nb.factory.getRootObject()) deferList = DeferredList(dList, consumeErrors=True) deferList.addCallback(init_register, myName, virtualNet, classicalNet, lNode, func) deferList.addErrback(localError) reactor.run()
def gather_results(deferreds, consume_errors=False): d = DeferredList(deferreds, fireOnOneErrback=1, consumeErrors=consume_errors) d.addCallback(lambda r: [x[1] for x in r]) d.addErrback(lambda f: f.value.subFailure) return d
def load_eth_account_list(account_num): eth_account_list = [] ds = [] for keystore in get_keystore_list(): d = deferToThread(load_eth_account, keystore) ds.append(d) # eth_account_list.append(load_eth_account(keystore)) account_num -= 1 if account_num == 0: break def handle_result(result): for (success, value) in result: if success: account = value print('Loading account success: ', account.address) eth_account_list.append(account) else: print('Loading account failure: ', value.getErrorMessage()) return eth_account_list if ds: dl = DeferredList(ds, consumeErrors=True) dl.addCallback(handle_result) else: dl = Deferred() dl.callback([]) return dl
def _do_read_brokers(): all_brokers_results = [] for position in positions: one_broker_result = Deferred() all_brokers_results.append(one_broker_result) d = dht_records.get_message_broker( customer_idurl=customer_idurl, position=position, return_details=return_details, use_cache=use_cache, ) d.addCallback(_do_verify, position, one_broker_result) if _Debug: d.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_message_brokers._do_read_brokers') d.addErrback(_on_error, position, one_broker_result) join_all_brokers = DeferredList(all_brokers_results, consumeErrors=False) join_all_brokers.addCallback(_do_collect_results) if _Debug: join_all_brokers.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='read_customer_message_brokers._do_read_brokers') join_all_brokers.addErrback(result.errback) return None
def render_GET(self, request): """ .. http:get:: /wallets A GET request to this endpoint will return information about all available wallets in Tribler. This includes information about the address, a human-readable wallet name and the balance. **Example request**: .. sourcecode:: none curl -X GET http://localhost:8085/wallets **Example response**: .. sourcecode:: javascript { "wallets": [{ "created": True, "name": "Bitcoin", "unlocked": True, "precision": 8, "min_unit": 100000, "address": "17AVS7n3zgBjPq1JT4uVmEXdcX3vgB2wAh", "balance": { "available": 0.000126, "pending": 0.0, "currency": "BTC" } }, ...] } """ wallets = {} balance_deferreds = [] for wallet_id in self.session.lm.wallets.iterkeys(): wallet = self.session.lm.wallets[wallet_id] wallets[wallet_id] = { 'created': wallet.created, 'unlocked': wallet.unlocked, 'address': wallet.get_address(), 'name': wallet.get_name(), 'precision': wallet.precision(), 'min_unit': wallet.min_unit() } balance_deferreds.append(wallet.get_balance().addCallback( lambda balance, wid=wallet_id: (wid, balance))) def on_received_balances(balances): for _, balance_info in balances: wallets[balance_info[0]]['balance'] = balance_info[1] request.write(json.dumps({"wallets": wallets})) request.finish() balance_deferred_list = DeferredList(balance_deferreds) balance_deferred_list.addCallback(on_received_balances) return NOT_DONE_YET
def process_cluster_info(self, info, cluster, callback): """ process data received from ganeti. """ print '%s:' % cluster.hostname # parse json and repackage ids as a list ids = set((int(d['id']) for d in json.loads(info))) print ids self.timer.tick('info fetched from ganeti ') updated = Counter() # fetch list of jobs in the cluster that are not yet finished. if the # job is already finished then we don't need to update it db_ids = set(cluster.jobs \ .exclude(status__in=COMPLETE_STATUS) \ .values_list('job_id', flat=True)) print 'running: ', db_ids # update all running jobs and archive any that aren't found # XXX this could be a choke point if there are many running jobs. each # job will be a separate ganeti query current = db_ids & ids archived = db_ids - ids deferreds = [self.update_job(cluster, id, updated) for id in current] ids -= current print ids, current, archived # get list of jobs that are finished. use this to filter the list of # ids further # XXX this could be a joke point if there are a lot of IDs that are have # completed but have not yet been archived by ganeti. db_ids = cluster.jobs \ .filter(job_id__in=ids, status__in=COMPLETE_STATUS) \ .values_list('job_id', flat=True) print 'completed: ', db_ids ids -= set(db_ids) print 'new: ', ids # any job id still left in the list is a new job. Create the job and # associate it with the object it relates to for id in ids: deferreds.append(self.import_job(cluster, id, updated)) # archive any jobs that we do not yet have a complete status for but # were not found in list of jobs returned by ganeti if archived: self.archive_jobs(cluster, archived, updated) # XXX it would be nice if the deferred list could be returned and this # callback hooked up outside of the method, but that doesn't seem # possible deferred_list = DeferredList(deferreds) deferred_list.addCallback(callback)
def _deleteFunctionReservationsCallback( self, data, function_name ): logger.debug( "Found reservations for function %s: %s" % (function_name, data) ) deferreds = [] for uuid in data: deferreds.append( self.sdb.delete(self.aws_sdb_reservation_domain, uuid) ) d = DeferredList(deferreds, consumeErrors = True ) d.addCallback( self._deleteFunctionReservationsCallback2, function_name ) return d
def _copyDomainCallback3(self, data, destination_domain): deferreds = [] for letter in "abcdefghijklmnopqrstuvwxyz": d = self.sdb.getAttributes(destination_domain, letter) deferreds.append(d) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._copyDomainCallback4, destination_domain) return d
def cb_list_delete_container(result, container): r, listing = result dl = [] for obj in listing: dl.append(swift.delete_object(container, obj["name"].encode("utf-8"))) d = DeferredList(dl, fireOnOneErrback=True) d.addCallback(cb_delete_container, container) return d
def _copyDomainCallback(self, data): deferreds = [] for letter in "abcdefghijklmnopqrstuvwxyz": d = self.sdb.putAttributes(self.uuid, letter, {letter:[letter]}) deferreds.append(d) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._copyDomainCallback2) return d
def _batchPutAttributesCallback2(self): deferreds = [] deferreds.append(self.sdb.getAttributes(self.uuid, "test_a")) deferreds.append(self.sdb.getAttributes(self.uuid, "test_b")) deferreds.append(self.sdb.getAttributes(self.uuid, "test_c")) d = DeferredList(deferreds, consumeErrors=True) d.addCallback(self._batchPutAttributesCallback3) return d