def get_homepage_listings(self, message_id): if message_id not in self.factory.outstanding: self.factory.outstanding[message_id] = [] vendors = self.factory.db.VendorStore().get_vendors() shuffle(vendors) def count_results(results): to_query = 30 for result in results: to_query -= result shuffle(vendors) if to_query/3 > 0 and len(vendors) > 0: for node in vendors[:to_query/3]: dl.append(self.factory.mserver.get_listings(node).addCallback(handle_response, node)) defer.gatherResults(dl).addCallback(count_results) def handle_response(listings, node): count = 0 if listings is not None: for l in listings.listing: if l.contract_hash not in self.factory.outstanding[message_id]: listing_json = { "id": message_id, "listing": { "guid": node.id.encode("hex"), "handle": listings.handle, "avatar_hash": listings.avatar_hash.encode("hex"), "title": l.title, "contract_hash": l.contract_hash.encode("hex"), "thumbnail_hash": l.thumbnail_hash.encode("hex"), "category": l.category, "price": l.price, "currency_code": l.currency_code, "nsfw": l.nsfw, "origin": str(CountryCode.Name(l.origin)), "ships_to": [] } } for country in l.ships_to: listing_json["listing"]["ships_to"].append(str(CountryCode.Name(country))) if not os.path.isfile(DATA_FOLDER + 'cache/' + l.thumbnail_hash.encode("hex")): self.factory.mserver.get_image(node, l.thumbnail_hash) if not os.path.isfile(DATA_FOLDER + 'cache/' + listings.avatar_hash.encode("hex")): self.factory.mserver.get_image(node, listings.avatar_hash) self.sendMessage(json.dumps(listing_json, indent=4), False) count += 1 self.factory.outstanding[message_id].append(l.contract_hash) if count == 3: return count vendors.remove(node) else: self.factory.db.VendorStore().delete_vendor(node.id) vendors.remove(node) return count dl = [] for vendor in vendors[:10]: dl.append(self.factory.mserver.get_listings(vendor).addCallback(handle_response, vendor)) defer.gatherResults(dl).addCallback(count_results)
def _buildsetComplete(self, key, msg): bsid = msg['bsid'] # first, just get the buildset and all build requests for our buildset # id dl = [self.master.db.buildsets.getBuildset(bsid=bsid), self.master.db.buildrequests.getBuildRequests(bsid=bsid)] (buildset, breqs) = yield defer.gatherResults(dl) # next, get the bdictlist for each build request dl = [] for breq in breqs: d = self.master.db.builds.getBuilds( buildrequestid=breq['buildrequestid']) dl.append(d) buildinfo = yield defer.gatherResults(dl) # next, get the builder for each build request, and for each bdict, # look up the actual build object, using the bdictlist retrieved above builds = [] for (breq, bdictlist) in zip(breqs, buildinfo): builder = self.master_status.getBuilder(breq['buildername']) for bdict in bdictlist: build = builder.getBuild(bdict['number']) if build is not None: builds.append(build) if builds: # We've received all of the information about the builds in this # buildset; now send out the summary self.sendBuildSetSummary(buildset, builds)
def setUp(self): self.__mongod = [Mongod(port=p, replset=self.rsname) for p in self.ports] yield defer.gatherResults([mongo.start() for mongo in self.__mongod]) master_uri = "mongodb://localhost:{0}/?readPreference=secondaryPreferred".format(self.ports[0]) master = ConnectionPool(master_uri) yield master.admin.command("replSetInitiate", self.rsconfig) ready = False n_tries = int(self.__init_timeout / self.__ping_interval) for i in xrange(n_tries): yield self.__sleep(self.__ping_interval) # My practice shows that we need to query both ismaster and replSetGetStatus # to be sure that replica set is up and running, primary is elected and all # secondaries are in sync and ready to became new primary ismaster_req = master.admin.command("ismaster", check=False) replstatus_req = master.admin.command("replSetGetStatus", check=False) ismaster, replstatus = yield defer.gatherResults([ismaster_req, replstatus_req]) initialized = replstatus["ok"] ok_states = set(["PRIMARY", "SECONDARY"]) states_ready = all(m["stateStr"] in ok_states for m in replstatus.get("members", [])) ready = initialized and ismaster["ismaster"] and states_ready if ready: break if not ready: yield self.tearDown() raise Exception("ReplicaSet initialization took more than {0}s".format(self.__init_timeout)) yield master.disconnect()
def _do_power_level_conflict_res(self, new_branch, current_branch, common_ancestor): new_powers_deferreds = [] for e in new_branch[:-1] if common_ancestor else new_branch: if hasattr(e, "user_id"): new_powers_deferreds.append( self.store.get_power_level(e.context, e.user_id) ) current_powers_deferreds = [] for e in current_branch[:-1] if common_ancestor else current_branch: if hasattr(e, "user_id"): current_powers_deferreds.append( self.store.get_power_level(e.context, e.user_id) ) new_powers = yield defer.gatherResults( new_powers_deferreds, consumeErrors=True ) current_powers = yield defer.gatherResults( current_powers_deferreds, consumeErrors=True ) max_power_new = max(new_powers) max_power_current = max(current_powers) defer.returnValue( (max_power_new, max_power_current) )
def read_config(results): self.log.debug('read-config', results=results) config = OltConfig.Pon.decode([results]) assert self.pon_id in config, 'sync-pon-not-found-{}'.format(self.pon_id) config = config[self.pon_id] self._in_sync = True dl = [] if self.enabled != config.enabled: self._in_sync = False self._expedite_sync = True dl.append(self._set_pon_config("enabled", self.enabled)) elif self._state == PonPort.State.RUNNING: if self.deployment_range != config.deployment_range: self._in_sync = False self._expedite_sync = True dl.append(self._set_pon_config("deployment-range", self.deployment_range)) if self.downstream_fec_enable != config.downstream_fec_enable: self._in_sync = False self._expedite_sync = True dl.append(self._set_pon_config("downstream-fec-enable", self.downstream_fec_enable)) if self.upstream_fec_enable != config.upstream_fec_enable: self._in_sync = False self._expedite_sync = True dl.append(self._set_pon_config("upstream-fec-enable", self.upstream_fec_enable)) defer.gatherResults(dl, consumeErrors=True) return config.onus
def main(reactor, *args): c = Configurator(configFile=sys.argv[1]) # Run flocker-diagnostics deferreds = [] log("Running Flocker-diagnostics on agent nodes.") for node in c.config["agent_nodes"]: d = c.runSSHAsync(node["public"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") d.addCallback(report_completion, public_ip=node["public"], message=" * Ran diagnostics on agent node.") deferreds.append(d) d = c.runSSHAsync(c.config["control_node"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Ran diagnostics on control node.") deferreds.append(d) yield gatherResults(deferreds) # Let flocker diagnostics run time.sleep(5) # Gather flocker-diagnostics deferreds = [] log("Gathering Flocker-diagnostics on agent nodes.") for node in c.config["agent_nodes"]: d = c.scp("./", node["public"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) d.addCallback(report_completion, public_ip=node["public"], message=" * Gathering diagnostics on agent node.") deferreds.append(d) d = c.scp("./", c.config["control_node"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Gathering diagnostics on control node.") deferreds.append(d) yield gatherResults(deferreds)
def get_public_room_list(self): chunk = yield self.store.get_rooms(is_public=True) room_members = yield defer.gatherResults( [ self.store.get_users_in_room(room["room_id"]) for room in chunk ], consumeErrors=True, ).addErrback(unwrapFirstError) avatar_urls = yield defer.gatherResults( [ self.get_room_avatar_url(room["room_id"]) for room in chunk ], consumeErrors=True, ).addErrback(unwrapFirstError) for i, room in enumerate(chunk): room["num_joined_members"] = len(room_members[i]) if avatar_urls[i]: room["avatar_url"] = avatar_urls[i] # FIXME (erikj): START is no longer a valid value defer.returnValue({"start": "START", "end": "END", "chunk": chunk})
def _exchange_double(self, shares, rvec1, rvec2, T, field, d1, d2): """Exchange and (if possible) verify shares.""" svec1, svec2 = shares pc = tuple(self.program_counter) inputters = range(1, self.num_players + 1) # We send our shares to the verifying players. for offset, (s1, s2) in enumerate(zip(svec1, svec2)): if T+1+offset != self.id: self.protocols[T+1+offset].sendShare(pc, s1) self.protocols[T+1+offset].sendShare(pc, s2) if self.id > T: # The other players will send us their shares of si_1 # and si_2 and we will verify it. si_1 = [] si_2 = [] for peer_id in inputters: if self.id == peer_id: si_1.append(Share(self, field, svec1[peer_id - T - 1])) si_2.append(Share(self, field, svec2[peer_id - T - 1])) else: si_1.append(self._expect_share(peer_id, field)) si_2.append(self._expect_share(peer_id, field)) result = gatherResults([gatherResults(si_1), gatherResults(si_2)]) result.addCallback(self._verify_double, rvec1, rvec2, T, field, d1, d2) return result else: # We cannot verify anything. return (rvec1[:T], rvec2[:T]) # do actual communication self.activate_reactor()
def testLIST(self): self._anonymousLogin() downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) self.assertEqual('', downloader.buffer) os.mkdir(os.path.join(self.directory, 'foo')) os.mkdir(os.path.join(self.directory, 'bar')) downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) self.assertEqual(2, len(downloader.buffer[:-2].split('\r\n'))) downloader = self._makeDataConnection() d = self.client.queueStringCommand('NLST ') wait(defer.gatherResults([d, downloader.d])) filenames = downloader.buffer[:-2].split('\r\n') filenames.sort() self.assertEqual(['bar', 'foo'], filenames) downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST foo') wait(defer.gatherResults([d, downloader.d])) self.assertEqual('', downloader.buffer) wait(self.client.queueStringCommand('CWD foo')) downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) self.assertEqual('', downloader.buffer)
def check(certs): panicing = set() expiring = set() for server_name, objects in certs.items(): if len(objects) == 0: panicing.add(server_name) for o in filter(lambda o: isinstance(o, Certificate), objects): cert = x509.load_pem_x509_certificate( o.as_bytes(), default_backend()) until_expiry = cert.not_valid_after - self._now() if until_expiry <= self.panic_interval: panicing.add(server_name) elif until_expiry <= self.reissue_interval: expiring.add(server_name) d1 = ( gatherResults( [self._issue_cert(server_name) .addErrback(self._panic, server_name) for server_name in panicing], consumeErrors=True) .addCallback(done_panicing)) d2 = gatherResults( [self._issue_cert(server_name) .addErrback( lambda f: log.failure( u'Error issuing certificate for: {server_name!r}', f, server_name=server_name)) for server_name in expiring], consumeErrors=True) return gatherResults([d1, d2], consumeErrors=True)
def _runDefer(self, thing, creature, end, returnVal, **kwargs): deferList = [] if thing in self.thingScripts: for func in self.thingScripts[thing]: deferList.append(defer.maybeDeferred(func, creature=creature, thing=thing, **kwargs)) thingId = thing.thingId() if thingId in self.scripts: for func in self.scripts[thingId]: deferList.append(defer.maybeDeferred(func, creature=creature, thing=thing, **kwargs)) for aid in thing.actionIds(): if aid in self.scripts: for func in self.scripts[aid]: deferList.append(defer.maybeDeferred(func, creature=creature, thing=thing, **kwargs)) if returnVal: # This is actually blocking code, but is rarely used. d = defer.gatherResults(deferList) elif end: d = defer.gatherResults(deferList) d.addCallback(self.handleCallback(end)) else: d = defer.DeferredList(deferList) d.addErrback(log.err) yield d
def count_results(results): to_query = 0 for result in results: if not result: to_query += 1 for node in vendors[:to_query]: dl.append(self.factory.mserver.get_user_metadata(node).addCallback(handle_response, node)) defer.gatherResults(dl).addCallback(count_results)
def render_pageTitle(self, context): # Wait for the title and site name to resolve into strings so we can mess with them a bit more result = defer.Deferred () defer.gatherResults ([ defer.maybeDeferred (self.render_mainTitle, context), defer.maybeDeferred (self.render_siteName, context), ]).addCallback (self._render_pageTitle, context, result).addErrback (result.errback) return result
def run_experiment (): # wait for machines to be ready # todo: with some timeout self._log("Waiting for machines") try: result = yield defer.gatherResults( [m.ready for m in self._machines] ) except: self._log("Error") raise # deal with Busy / errback. # reset machines # todo: with some timeout self._log("Resetting machines") try: result = yield defer.gatherResults( [defer.maybeDeferred(m.reset) for m in self._machines] ) except: self._log("Error") raise # deal with errback. # start logging # add event listeners to step self._log("Starting logging") self.set_log_output(self.default_log_output) self.interface.event += self._interface_event #(log, passthrough to marshal) self.step.event += self._step_event #(log, passthrough to marshal) self.step.log += self._step_log #(log, passthrough to marshal) # run step self._log("Running experiment sequence") try: self.started() yield self.step.run() self.state = State.COMPLETE except Exception as error: self._log(error) self.error(error) self.state = State.ERROR raise finally: # remove event listeners self.interface.event -= self._interface_event self.step.event -= self._step_event self.step.log -= self._step_log # pop experiment from marshal self._log("Waiting for marshal") yield self._marshal.popExperiment() # stop logging self.stop_logging() self.finished()
def count_results(results): to_query = 30 for result in results: to_query -= result shuffle(vendors) if to_query/3 > 0 and len(vendors) > 0: for node in vendors[:to_query/3]: dl.append(self.factory.mserver.get_listings(node).addCallback(handle_response, node)) defer.gatherResults(dl).addCallback(count_results)
def startReceivingBoxes(self, sender): AMP.startReceivingBoxes(self, sender) counts = [] for i in range(random.randrange(1, 5)): d = self.callRemote(Count) d.addCallback(display, self.identifier) counts.append(d) gatherResults(counts).chainDeferred(self.finished)
def testGatherResults(self): l = [] defer.gatherResults([defer.succeed(1), defer.succeed(2)]).addCallback(l.append) self.assertEquals(l, [[1, 2]]) l = [] dl = [defer.succeed(1), defer.fail(ValueError)] defer.gatherResults(dl).addErrback(l.append) self.assertEquals(len(l), 1) self.assert_(isinstance(l[0], failure.Failure)) dl[1].addErrback(lambda e: 1)
def getCache (): results = [] for moduleObj in getSearchDirectories(): componentPath = moduleObj.filePath dropinPath = componentPath.parent().child('components.cache') try: lastModified = componentPath.getModificationTime() except: log.err("Could not stat {:s}".format(str(componentPath))) continue # Look for cache try: lastCached = dropinPath.getModificationTime() collection = pickle.load(dropinPath.open('r')) except: lastCached = 0 if lastCached < lastModified: stale = True else: stale = False for component in collection.components: if FilePath(component.fileName).getModificationTime() > lastCached: stale = True if stale: try: module = moduleObj.load() if type(module.__components__) is dict: def loaded (collection): try: dropinPath.setContent(pickle.dumps(collection)) except OSError as e: log.err("Unable to write cache file {:s}".format(dropinPath)) return collection results.append(_generateCacheEntry(module).addCallback(loaded)) except KeyError as e: log.err("Component module {:s} failed to load".format(componentPath)) except: log.err() else: results.append(defer.succeed(collection)) d = defer.Deferred() defer.gatherResults(results).addCallbacks(d.callback, d.errback) return d
def getDetailsForBuilds(master, buildset, builds, wantProperties=False, wantSteps=False, wantPreviousBuild=False, wantLogs=False): builderids = set([build['builderid'] for build in builds]) builders = yield defer.gatherResults([master.data.get(("builders", _id)) for _id in builderids]) buildersbyid = dict([(builder['builderid'], builder) for builder in builders]) if wantProperties: buildproperties = yield defer.gatherResults( [master.data.get(("builds", build['buildid'], 'properties')) for build in builds]) else: # we still need a list for the big zip buildproperties = lrange(len(builds)) if wantPreviousBuild: prev_builds = yield defer.gatherResults( [getPreviousBuild(master, build) for build in builds]) else: # we still need a list for the big zip prev_builds = lrange(len(builds)) if wantSteps: buildsteps = yield defer.gatherResults( [master.data.get(("builds", build['buildid'], 'steps')) for build in builds]) if wantLogs: for s in flatten(buildsteps, types=(list, UserList)): logs = yield master.data.get(("steps", s['stepid'], 'logs')) s['logs'] = list(logs) for l in s['logs']: l['content'] = yield master.data.get(("logs", l['logid'], 'contents')) else: # we still need a list for the big zip buildsteps = lrange(len(builds)) # a big zip to connect everything together for build, properties, steps, prev in zip(builds, buildproperties, buildsteps, prev_builds): build['builder'] = buildersbyid[build['builderid']] build['buildset'] = buildset build['url'] = getURLForBuild( master, build['builderid'], build['number']) if wantProperties: build['properties'] = properties if wantSteps: build['steps'] = list(steps) if wantPreviousBuild: build['prev_build'] = prev
def getConnectedClientAndServer(self, reactor, interface, addressFamily): """ Return a L{Deferred} firing with a L{MyClientFactory} and L{MyServerFactory} connected pair, and the listening C{Port}. The particularity is that the server protocol has been obtained after doing a C{adoptStreamConnection} against the original server connection. """ firstServer = MyServerFactory() firstServer.protocolConnectionMade = Deferred() server = MyServerFactory() server.protocolConnectionMade = Deferred() server.protocolConnectionLost = Deferred() client = MyClientFactory() client.protocolConnectionMade = Deferred() client.protocolConnectionLost = Deferred() # self.mktemp() often returns a path which is too long to be used. path = mktemp(suffix='.sock', dir='.') port = reactor.listenUNIX(path, firstServer) def firstServerConnected(proto): reactor.removeReader(proto.transport) reactor.removeWriter(proto.transport) reactor.adoptStreamConnection( proto.transport.fileno(), AF_UNIX, server) firstServer.protocolConnectionMade.addCallback(firstServerConnected) lostDeferred = gatherResults([client.protocolConnectionLost, server.protocolConnectionLost]) def stop(result): if reactor.running: reactor.stop() return result lostDeferred.addBoth(stop) deferred = Deferred() deferred.addErrback(stop) startDeferred = gatherResults([client.protocolConnectionMade, server.protocolConnectionMade]) def start(protocols): client, server = protocols deferred.callback((client, server, port)) startDeferred.addCallback(start) reactor.connectUNIX(port.getHost().name, client) return deferred
def testGatherResults(self): # test successful list of deferreds l = [] defer.gatherResults([defer.succeed(1), defer.succeed(2)]).addCallback(l.append) self.assertEquals(l, [[1, 2]]) # test failing list of deferreds l = [] dl = [defer.succeed(1), defer.fail(ValueError)] defer.gatherResults(dl).addErrback(l.append) self.assertEquals(len(l), 1) self.assert_(isinstance(l[0], failure.Failure)) # get rid of error dl[1].addErrback(lambda e: 1)
def _buildsetData(self, buildset): """ @returns: L{tuple} of sourcestamp dicts and buildrequest dicts. """ dl = [] dl.append( self.master.db.sourcestamps.getSourceStamps( buildset.bsdict['sourcestampsetid'])) dl.append(buildset.getBuilderNamesAndBuildRequests() .addCallback( lambda res: gatherResults([br.asDict_async() for br in res.values()]))) return gatherResults(dl)
def getCache (): results = [] for moduleObj in getSearchDirectories(): componentPath = moduleObj.filePath dropinPath = componentPath.parent().child('components.cache') # Look for cache try: lastCached = dropinPath.getModificationTime() collection = pickle.load(dropinPath.open('r')) # FIXME: what kind of error do we expect? except: stale = True else: stale = False for path in componentPath.parent().walk(): if path.isfile() and path.splitext()[-1] == '.py': try: lastModified = path.getModificationTime() except: log.err("Could not stat {:s}".format(str(componentPath))) else: if lastModified > lastCached: stale = True break if stale: try: module = moduleObj.load() if type(module.__components__) is dict: def loaded (collection): try: dropinPath.setContent(pickle.dumps(collection)) except OSError as e: log.err("Unable to write cache file {:s}".format(dropinPath)) return collection results.append(_generateCacheEntry(module).addCallback(loaded)) except (KeyError, AttributeError) as e: log.err("Component module {:s} failed to load".format(componentPath)) except: log.err() else: results.append(defer.succeed(collection)) d = defer.Deferred() defer.gatherResults(results).addCallbacks(d.callback, d.errback) return d
def main(): for title_image_cls in title_image_clsses: title_images = session.query(EVJPTitleImage).\ join(title_image_cls).\ filter(EVJPTitleImage.created > datetime.today() - timedelta(weeks=1)).\ all() if title_images: #waiting = [downloadPage(str(title_image.original_url), create_filename(title_image)) for title_image in title_images] waiting = [downloadPage(str(title_image.original_url), create_filename(title_image)).addCallback(debug, url=title_image.original_url) for title_image in title_images if title_image.original_url] #waiting = [w.setTimeout(60) for w in waiting] defer.gatherResults(waiting).addBoth(lambda _: reactor.stop()) reactor.run()
def async_map(async_func, iterable, connections=0): """parallel map for deferred callables using cooperative multitasking http://stackoverflow.com/a/20376166/408556 """ if connections and not reactor.fake: results = [] work = (async_func(x).addCallback(results.append) for x in iterable) deferreds = [get_task().coiterate(work) for _ in range(connections)] yield gatherResults(deferreds, consumeErrors=True) else: deferreds = map(async_func, iterable) results = yield gatherResults(deferreds, consumeErrors=True) return_value(results)
def setUp(self): """ TODO : Creating a fake environment """ logging.basicConfig(level=logging.DEBUG) (self.serie, self.episode) = tests.common_test.get_serie_and_ep() defe = component.start() defer.gatherResults([defe]) try: os.makedirs(os.path.join(self.serie.get_path_to_serie(), "saison6")) except Exception: pass return
def testLIST(self): # Login self._anonymousLogin() # Download a listing downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) # No files, so the file listing should be empty self.assertEqual('', downloader.buffer) # Make some directories os.mkdir(os.path.join(self.directory, 'foo')) os.mkdir(os.path.join(self.directory, 'bar')) # Download a listing again downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) # Now we expect 2 lines because there are two files. self.assertEqual(2, len(downloader.buffer[:-2].split('\r\n'))) # Download a names-only listing downloader = self._makeDataConnection() d = self.client.queueStringCommand('NLST ') wait(defer.gatherResults([d, downloader.d])) filenames = downloader.buffer[:-2].split('\r\n') filenames.sort() self.assertEqual(['bar', 'foo'], filenames) # Download a listing of the 'foo' subdirectory downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST foo') wait(defer.gatherResults([d, downloader.d])) # 'foo' has no files, so the file listing should be empty self.assertEqual('', downloader.buffer) # Change the current working directory to 'foo' wait(self.client.queueStringCommand('CWD foo')) # Download a listing from within 'foo', and again it should be empty downloader = self._makeDataConnection() d = self.client.queueStringCommand('LIST') wait(defer.gatherResults([d, downloader.d])) self.assertEqual('', downloader.buffer)
def test_shutdown(self): d1 = defer.Deferred() self.patch(reactor, "stop", lambda: d1.callback(None)) d2 = self.bot.callRemote("shutdown") # don't return until both the shutdown method has returned, and # reactor.stop has been called return defer.gatherResults([d1, d2])
def _mergeRequests(self, breq, unclaimed_requests, mergeRequests_fn): """Use C{mergeRequests_fn} to merge C{breq} against C{unclaimed_requests}, where both are build request dictionaries""" # short circuit if there is no merging to do if not mergeRequests_fn or len(unclaimed_requests) == 1: defer.returnValue([ breq ]) return # we'll need BuildRequest objects, so get those first unclaimed_request_objects = yield defer.gatherResults( [ self._brdictToBuildRequest(brdict) for brdict in unclaimed_requests ]) breq_object = unclaimed_request_objects[unclaimed_requests.index(breq)] # gather the mergeable requests merged_request_objects = [] for other_breq_object in unclaimed_request_objects: if (yield defer.maybeDeferred( lambda : mergeRequests_fn(self, breq_object, other_breq_object))): merged_request_objects.append(other_breq_object) # convert them back to brdicts and return merged_requests = [ br.brdict for br in merged_request_objects ] defer.returnValue(merged_requests)
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) properties_dict = dict((k,(v,s)) for (k,v,s) in properties.asList()) ssList = bs.getSourceStamps(absolute=True) if ssList: sourcestampsetid = yield ssList[0].getSourceStampSetId(self.control.master) dl = [] for ss in ssList[1:]: # add defered to the list dl.append(ss.addSourceStampToDatabase(self.control.master, sourcestampsetid)) yield defer.gatherResults(dl) bsid, brids = yield self.control.master.addBuildset( builderNames=[self.original.name], sourcestampsetid=sourcestampsetid, reason=reason, properties=properties_dict) defer.returnValue((bsid, brids)) else: log.msg('Cannot start rebuild, rebuild has no sourcestamps for a new build') defer.returnValue(None)
def create_indexes(_): deferreds = [] for index, definition in db_indexes.items(): d = dbpool.runU1DBQuery("create_index", index, *definition) deferreds.append(d) return defer.gatherResults(deferreds)
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, perspective_name, perspective_keys): # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. query_response = yield self.client.post_json( destination=perspective_name, path=b"/_matrix/key/v2/query", data={ u"server_keys": { server_name: { key_id: { u"minimum_valid_until_ts": 0 } for key_id in key_ids } for server_name, key_ids in server_names_and_key_ids } }, long_retries=True, ) keys = {} responses = query_response["server_keys"] for response in responses: if (u"signatures" not in response or perspective_name not in response[u"signatures"]): raise KeyLookupError( "Key response not signed by perspective server" " %r" % (perspective_name,) ) verified = False for key_id in response[u"signatures"][perspective_name]: if key_id in perspective_keys: verify_signed_json( response, perspective_name, perspective_keys[key_id] ) verified = True if not verified: logging.info( "Response from perspective server %r not signed with a" " known key, signed with: %r, known keys: %r", perspective_name, list(response[u"signatures"][perspective_name]), list(perspective_keys) ) raise KeyLookupError( "Response not signed with a known key for perspective" " server %r" % (perspective_name,) ) processed_response = yield self.process_v2_response( perspective_name, response, only_from_server=False ) for server_name, response_keys in processed_response.items(): keys.setdefault(server_name, {}).update(response_keys) yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store_keys)( server_name=server_name, from_server=perspective_name, verify_keys=response_keys, ) for server_name, response_keys in keys.items() ], consumeErrors=True ).addErrback(unwrapFirstError)) defer.returnValue(keys)
def wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its # invalidate() whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs) keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names] list_args = arg_dict[self.list_name] results = {} def update_results_dict(res, arg): results[arg] = res # list of deferreds to wait for cached_defers = [] missing = set() # If the cache takes a single arg then that is used as the key, # otherwise a tuple is used. if num_args == 1: def arg_to_cache_key(arg): return arg else: keylist = list(keyargs) def arg_to_cache_key(arg): keylist[self.list_pos] = arg return tuple(keylist) for arg in list_args: try: res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback) if not isinstance(res, ObservableDeferred): results[arg] = res elif not res.has_succeeded(): res = res.observe() res.addCallback(update_results_dict, arg) cached_defers.append(res) else: results[arg] = res.get_result() except KeyError: missing.add(arg) if missing: # we need a deferred for each entry in the list, # which we put in the cache. Each deferred resolves with the # relevant result for that key. deferreds_map = {} for arg in missing: deferred = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) cache.set(key, deferred, callback=invalidate_callback) def complete_all(res): # the wrapped function has completed. It returns a # a dict. We can now resolve the observable deferreds in # the cache and update our own result map. for e in missing: val = res.get(e, None) deferreds_map[e].callback(val) results[e] = val def errback(f): # the wrapped function has failed. Invalidate any cache # entries we're supposed to be populating, and fail # their deferreds. for e in missing: key = arg_to_cache_key(e) cache.invalidate(key) deferreds_map[e].errback(f) # return the failure, to propagate to our caller. return f args_to_call = dict(arg_dict) args_to_call[self.list_name] = list(missing) cached_defers.append( defer.maybeDeferred(preserve_fn(self.function_to_call), **args_to_call).addCallbacks( complete_all, errback)) if cached_defers: d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError) return make_deferred_yieldable(d) else: return defer.succeed(results)
def test_get_order_matches_use_order(self, get_config, api_auth_token, voucher, extra_tokens): """ The first unblinded token returned in a response to a **GET** request is the first token to be used to authorize a storage request. """ def after(d, f): new_d = Deferred() def f_and_continue(result): maybeDeferred(f).chainDeferred(new_d) return result d.addCallback(f_and_continue) return new_d def get_tokens(): d = authorized_request( api_auth_token, agent, b"GET", b"http://127.0.0.1/unblinded-token", ) d.addCallback(readBody) d.addCallback(lambda body: loads(body)[u"unblinded-tokens"], ) return d def use_a_token(): root.store.discard_unblinded_tokens( root.store.get_unblinded_tokens(1), ) config = get_config_with_api_token( self.useFixture(TempDir()), get_config, api_auth_token, ) root = root_from_config(config, datetime.now) num_tokens = root.controller.num_redemption_groups + extra_tokens # Put in a number of tokens with which to test. redeeming = root.controller.redeem(voucher, num_tokens) # Make sure the operation completed before proceeding. self.assertThat( redeeming, succeeded(Always()), ) agent = RequestTraversalAgent(root) getting_initial_tokens = get_tokens() using_a_token = after(getting_initial_tokens, use_a_token) getting_tokens_after = after(using_a_token, get_tokens) self.assertThat( gatherResults([getting_initial_tokens, getting_tokens_after]), succeeded( MatchesPredicate( lambda (initial_tokens, tokens_after): initial_tokens[1:] == tokens_after, u"initial, after (%s): initial[1:] != after", ), ), )
def process_v2_response(self, from_server, response_json, requested_ids=[], only_from_server=True): time_now_ms = self.clock.time_msec() response_keys = {} verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_key.time_added = time_now_ms verify_keys[key_id] = verify_key old_verify_keys = {} for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_key.expired = key_data["expired_ts"] verify_key.time_added = time_now_ms old_verify_keys[key_id] = verify_key results = {} server_name = response_json["server_name"] if only_from_server: if server_name != from_server: raise KeyLookupError( "Expected a response for server %r not %r" % ( from_server, server_name ) ) for key_id in response_json["signatures"].get(server_name, {}): if key_id not in response_json["verify_keys"]: raise KeyLookupError( "Key response must include verification keys for all" " signatures" ) if key_id in verify_keys: verify_signed_json( response_json, server_name, verify_keys[key_id] ) signed_key_json = sign_json( response_json, self.config.server_name, self.config.signing_key[0], ) signed_key_json_bytes = encode_canonical_json(signed_key_json) ts_valid_until_ms = signed_key_json[u"valid_until_ts"] updated_key_ids = set(requested_ids) updated_key_ids.update(verify_keys) updated_key_ids.update(old_verify_keys) response_keys.update(verify_keys) response_keys.update(old_verify_keys) yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store.store_server_keys_json)( server_name=server_name, key_id=key_id, from_server=server_name, ts_now_ms=time_now_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) for key_id in updated_key_ids ], consumeErrors=True, ).addErrback(unwrapFirstError)) results[server_name] = response_keys defer.returnValue(results)
def get_changes(changeids): return defer.gatherResults( [self.getChange(changeid) for changeid in changeids])
assert pool.size == cpu_count() try: result = yield pool.on_ready(timeout=5) except Exception, e: result = e assert result is pool assert pool.get_number_of_workers() == pool.size directory = (os.path.dirname(__file__), ) dirs = (directory, ) * 4 calls = ('os.path.exists', 'os.path.isdir', 'os.path.isfile', 'os.path.islink') results = yield gatherResults(map(pool.apply_async, calls, dirs)) exists, isdir, isfile, islink = results assert exists is True assert isdir is True assert isfile is False assert islink is False try: result = yield pool.apply_async('math.sqrt', (-1, ), timeout=5) except Exception, e: result = e assert isinstance(result, PoolError) try:
def save_router(self, config): '''Saves the configuration of a router''' d1 = self.store_value(self.get_router_key(config['id']), json.dumps(config)) d2 = self.add_set_item(self.get_router_set_key(), config['id']) return gatherResults([d1, d2])
def lookup_message_ids(self, protocol, seq_nums): message_stash = protocol.service.message_stash lookup_func = message_stash.get_sequence_number_message_id return gatherResults([lookup_func(seq_num) for seq_num in seq_nums])
def delete_router(self, router_id): """Removes the configuration of the router with id ``router_id``""" d1 = self.remove_value(self.get_router_key(router_id)) d2 = self.remove_set_item(self.get_router_set_key(), router_id) return gatherResults([d1, d2])
def delete_router_destination(self, router_id, destination_id): d1 = self.remove_value( self.get_router_destination_key(router_id, destination_id)) d2 = self.remove_set_item( self.get_router_destination_set_key(router_id), destination_id) return gatherResults([d1, d2])
group.addChild("macro_size", macroSize) group.addChild("ft03", ft03) group.addChild("inputs", inputs) group.addChild("iv08", iv08) group.addChild("pc01", pc01) d1 = group.getChild("cpu_usage") d2 = group.getChild("iv08") #d = group.get() def onResult(data): print "*** group:", data print "*** json(group):", json.dumps(data) from twisted.internet import defer d = defer.gatherResults([d1, d2]) def onResults(data): d1, d2 = data print "d1:", d1 print "d2:", d2 merge(d1, d2) print "new d1:", d1 print "json(d1):", json.dumps(d1) d.addCallback(onResults)
async def process_v2_response( self, from_server: str, response_json: JsonDict, time_added_ms: int) -> Dict[str, FetchKeyResult]: """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from GET /_matrix/key/v2/server, or a single entry from the list returned by POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. Args: from_server: the name of the server producing this result: either the origin server for a /_matrix/key/v2/server request, or the notary for a /_matrix/key/v2/query. response_json: the json-decoded Server Keys response object time_added_ms: the timestamp to record in server_keys_json Returns: Map from key_id to result object """ ts_valid_until_ms = response_json["valid_until_ts"] # start by extracting the keys from the response, since they may be required # to validate the signature on the response. verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=ts_valid_until_ms) server_name = response_json["server_name"] verified = False for key_id in response_json["signatures"].get(server_name, {}): key = verify_keys.get(key_id) if not key: # the key may not be present in verify_keys if: # * we got the key from the notary server, and: # * the key belongs to the notary server, and: # * the notary server is using a different key to sign notary # responses. continue verify_signed_json(response_json, server_name, key.verify_key) verified = True break if not verified: raise KeyLookupError( "Key response for %s is not signed by the origin server" % (server_name, )) for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=key_data["expired_ts"]) key_json_bytes = encode_canonical_json(response_json) await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self.store.store_server_keys_json, server_name=server_name, key_id=key_id, from_server=from_server, ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=key_json_bytes, ) for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError)) return verify_keys
def _process_event_queue_loop(self): try: self._is_processing = True while True: last_token = yield self.store.get_federation_out_pos("events") next_token, events = yield self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100, ) logger.debug("Handling %s -> %s", last_token, next_token) if not events and next_token >= self._last_poked_id: break @defer.inlineCallbacks def handle_event(event): # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of( ) is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: return try: # Get the state from before the event. # We need to make sure that this is the state from before # the event and not from after it. # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. destinations = yield self.state.get_current_hosts_in_room( event.room_id, latest_event_ids=event.prev_event_ids(), ) except Exception: logger.exception( "Failed to calculate hosts in room for event: %s", event.event_id, ) return destinations = set(destinations) if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) logger.debug("Sending %s to %r", event, destinations) self._send_pdu(event, destinations) @defer.inlineCallbacks def handle_room_events(events): for event in events: yield handle_event(event) events_by_room = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) yield logcontext.make_deferred_yieldable( defer.gatherResults([ logcontext.run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True)) yield self.store.update_federation_out_pos( "events", next_token) if events: now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "federation_sender").set(ts) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "federation_sender").inc(len(events_by_room)) event_processing_loop_counter.labels("federation_sender").inc() synapse.metrics.event_processing_positions.labels( "federation_sender").set(next_token) finally: self._is_processing = False