def requestAvatarId(self, c): creds = credentials.IUsernamePassword(c, None) if creds is not None: locks = [] pool = HTTPConnectionPool(reactor, persistent=False) pool.cachedConnectionTimeout = self.timeout if self.max_concurrency: pool.persistent = True pool.maxPersistentPerHost = self.max_concurrency locks.append(defer.DeferredSemaphore(self.max_concurrency)) if self.global_max_concurrency: locks.append( defer.DeferredSemaphore(self.global_max_concurrency)) conn = ThrottledSwiftConnection(locks, self.auth_url, creds.username, creds.password, pool=pool, extra_headers=self.extra_headers, verbose=self.verbose) conn.user_agent = USER_AGENT d = conn.authenticate() d.addCallback(self._after_auth, conn) d.addErrback(eb_failed_auth) return d return defer.fail(error.UnauthorizedLogin())
def __init__(self, node): self._node = node self._get_time = self._node.clock.seconds self._queue = deque() self._enqueued_contacts = {} self._semaphore = defer.DeferredSemaphore(1) self._ping_semaphore = defer.DeferredSemaphore(constants.alpha) self._process_lc = node.get_looping_call(self._semaphore.run, self._process)
def start_sending(self, interval=5.0, stopAfter=None): self.will_finish = defer.Deferred() sensors_will_finish = [] if stopAfter is None: stopAfter = self.stopAfter def start_sending(sensor): logger.debug("sensor start_sending: %r", sensor) sensor_will_start, sensor_will_finish = sensor.start_sending() if sensor_will_finish: sensors_will_finish.append(sensor_will_finish) return sensor_will_start one_at_a_time = defer.DeferredSemaphore(1) sensors_will_start = [] for sensor in self.sensors: d = one_at_a_time.run(start_sending, sensor) sensors_will_start.append(d) will_start = defer.DeferredList(sensors_will_start) # When all sensors finish, deregister this board all_sensors_finish = defer.DeferredList(sensors_will_finish) all_sensors_finish.addCallback(self.deregister) \ .addCallback(self.will_finish.callback) return will_start, self.will_finish
def __init__(self, redis_host='10.91.0.1', ssl_cert=None, ssl_key=None, ssl_ca=None, concurrency_level=None): self.redis_host = redis_host if ssl_cert is not None: self.cacheclient_cert = ssl_cert else: self.cacheclient_cert = os.path.join(self.client_certs_dir, 'client.crt') if ssl_key is not None: self.cacheclient_key = ssl_key else: self.cacheclient_key = os.path.join(self.client_certs_dir, 'client.key') if ssl_ca is not None: self.cacheclient_ca = ssl_ca else: self.cacheclient_ca = os.path.join(self.client_certs_dir, 'ca.crt') # Setup connections to redis self.redis = redis.StrictRedis(host=self.redis_host, port=6379, db=0) self.get_certs() self.http_agent = create_agent(self.cacheclient_ca, self.cacheclient_cert, self.cacheclient_key) if concurrency_level is not None: deferred_semaphore = defer.DeferredSemaphore(concurrency_level) else: deferred_semaphore = None self.http_agent.deferred_semaphore = deferred_semaphore self.log = logging.getLogger(__name__)
def testSemaphore(self): N = 13 sem = defer.DeferredSemaphore(N) controlDeferred = defer.Deferred() def helper(self, arg): self.arg = arg return controlDeferred results = [] uniqueObject = object() resultDeferred = sem.run(helper, self=self, arg=uniqueObject) resultDeferred.addCallback(results.append) resultDeferred.addCallback(self._incr) self.assertEquals(results, []) self.assertEquals(self.arg, uniqueObject) controlDeferred.callback(None) self.assertEquals(results.pop(), None) self.assertEquals(self.counter, 1) self.counter = 0 for i in range(1, 1 + N): sem.acquire().addCallback(self._incr) self.assertEquals(self.counter, i) sem.acquire().addCallback(self._incr) self.assertEquals(self.counter, N) sem.release() self.assertEquals(self.counter, N + 1) for i in range(1, 1 + N): sem.release() self.assertEquals(self.counter, N + 1)
def check_schema_versions(couch_url, agent=None): """ Check that all user databases use the correct couch schema. :param couch_url: The URL for the couch database. :type couch_url: str :param agent: an optional agent for doing requests, used in tests. :type agent: twisted.web.client.Agent """ url = urlsplit(couch_url) auth = (url.username, url.password) if url.username else None url = "%s://%s:%d" % (url.scheme, url.hostname, url.port) deferreds = [] semaphore = defer.DeferredSemaphore(20) logger.info('Starting CouchDB schema versions check...') dbs = yield list_dbs(url, auth, agent) for db in dbs: if not db.startswith('user-'): continue d = semaphore.run(_check_db_schema_version, url, db, auth, agent=agent) deferreds.append(d) d = defer.gatherResults(deferreds, consumeErrors=True) try: yield d logger.info('Finished CouchDB schema versions check.') except Exception as e: msg = 'Error checking CouchDB schema versions: %r' % e logger.error(msg) raise Exception(msg)
def main(): if (len(sys.argv) == 2 and sys.argv[1][-4:] == ".csv"): # Check for input file global ifile ifile = sys.argv[1] global outfile_errors outfile_errors = sys.argv[1][:-4] + "_errors.txt" global outfile_success outfile_success = sys.argv[1][:-4] + "_success.txt" with open(ifile, 'rb') as f: reader = csv.reader(f, skipinitialspace=True, delimiter='|', quoting=csv.QUOTE_ALL) try: sem = defer.DeferredSemaphore(500) deferreds = [] counter = 0 for row in reader: print "Counter: ", counter row_list = [] row_list = list(row) print "Row List: ", row_list deferreds.append(sem.run(_deferred, row_list)) counter += 1 #print "hello" d = defer.gatherResults(deferreds) d.addCallback(stop_reactor) except csv.Error as e: sys.exit('file %s, line %d: %s' % (ifile, reader.line_num, e)) else: print "Usage: python StreamCSV_with_Twisted.py infile.csv" sys.exit()
def __init__(self, _ignored, *connargs, **connkw): """ Create a new connection pool. Any positional or keyword arguments other than the first one and a 'min' keyword argument are passed to the L{Connection} when connecting. Use these arguments to pass database names, usernames, passwords, etc. @type _ignored: Any object. @param _ignored: Ignored, for L{adbapi.ConnectionPool} compatibility. """ if not self.reactor: from twisted.internet import reactor self.reactor = reactor # for adbapi compatibility, min can be passed in kwargs if 'min' in connkw: self.min = connkw.pop('min') self.connargs = connargs self.connkw = connkw self.connections = set( [self.connectionFactory(self.reactor) for _ in range(self.min)]) # to avoid checking out more connections than there are pooled in total self._semaphore = defer.DeferredSemaphore(self.min)
def resolve_list(self, hostname_list, qtype='A', tokens=100): ''' Resolves a list of hostnames asynchronously using Twisted @param: hostname_list, list of hostnames (str) @param: nameservers, a list of nameservers you wish to query @return: a dictionary where the keys are hostnames and the values are lists of IP addresses (dotted quad format) ''' jobs = [] # a semaphore to limit the number of concurrent jobs # if token=100 then job 101 will wait until a previous job completes sem = defer.DeferredSemaphore(tokens) # create a deferred for each hostname for host in hostname_list: self.results[host] = {} self.results[host]['status'] = 'TIMEOUT' d = sem.run(self.do_lookup, host, qtype.upper()) d.addCallbacks(self.got_result, self.got_failure) jobs.append(d) # gather the results d = defer.gatherResults(jobs, consumeErrors=False) # stop the reactor when we're done d.addCallback(lambda fin: reactor.stop()) reactor.run() return self.results
def __init__(self, blob_manager, blob_hashes=None, servers=None, client=None, sd_hashes=None, retry=True, clock=None): if not clock: from twisted.internet import reactor self.clock = reactor else: self.clock = clock self.blob_manager = blob_manager self.servers = servers or [] self.client = client or treq self.blob_hashes = blob_hashes or [] self.missing_blob_hashes = [] self.downloaded_blob_hashes = [] self.sd_hashes = sd_hashes or [] self.head_blob_hashes = [] self.max_failures = 3 self.semaphore = defer.DeferredSemaphore(2) self.deferreds = [] self.writers = [] self.retry = retry self.looping_call = task.LoopingCall(self._download_lc) self.looping_call.clock = self.clock self.finished_deferred = defer.Deferred() self.finished_deferred.addErrback(lambda err: err.trap(defer.CancelledError)) self.short_delay = 30 self.long_delay = 600 self.delay = self.short_delay self.last_missing = 10000000 self.lc_deferred = None
def __init__(self, wwu, description, series=[], raw=False, thread=False, N_concurrent=1): """ Constructs me with a reference I{wwu} to a L{WireWorkerUniverse} and a client connection I{description} and immediately connects to a L{WireServer} running on another Python interpreter via the AMP protocol. @keyword N_concurrent: The number of tasks I can have outstanding. """ def connected(ap): self.ap = ap self.dLock.release() WireWorkerUniverse.check(wwu) self.tasks = [] self.raw = raw self.thread = thread self.iQualified = series # Lock that is acquired until AMP connection made self.dLock = util.DeferredLock(allowZombies=True) self.dLock.addStopper(self.stopper) self.dLock.acquire() # Limit tasks outstanding self.ds = defer.DeferredSemaphore(N_concurrent) # Make the connection dest = endpoints.clientFromString(reactor, description) endpoints.connectProtocol(dest, self.AMP(locator=wwu)).addCallback(connected)
def immediate_announce(self, blob_hashes): self.hash_queue.extend(b for b in blob_hashes if b not in self.hash_queue) log.info("Announcing %i blobs", len(self.hash_queue)) start = self.clock.seconds() progress_lc = task.LoopingCall(self._show_announce_progress, len(self.hash_queue), start) progress_lc.start(60, now=False) s = defer.DeferredSemaphore(self.concurrent_announcers) results = yield utils.DeferredDict({ blob_hash: s.run(self.do_store, blob_hash) for blob_hash in blob_hashes }) now = self.clock.seconds() progress_lc.stop() announced_to = [ blob_hash for blob_hash in results if results[blob_hash][0] ] if len(announced_to) != len(results): log.debug("Failed to announce %i blobs", len(results) - len(announced_to)) if announced_to: log.info( 'Took %s seconds to announce %i of %i attempted hashes (%f hashes per second)', now - start, len(announced_to), len(blob_hashes), int(float(len(blob_hashes)) / float(now - start))) defer.returnValue(results)
def _run_command_on_target_tree(self, command_name): target_root = self._load_manofest() number_of_parallel_commands = (1 if self._args.parallel is None else self._args.parallel) semaphore = defer.DeferredSemaphore(number_of_parallel_commands) yield self._run_command_on_target_children(target_root, command_name, semaphore)
def test_multi_lock(self): lock = defer.DeferredLock() sem = defer.DeferredSemaphore(2) conn = ThrottledSwiftConnection([lock, sem], 'http://127.0.0.1:8080/auth/v1.0', 'username', 'api_key', verbose=True) conn.agent = self.agent conn.storage_url = 'http://127.0.0.1:8080/v1/AUTH_user' conn.auth_token = 'TOKEN_123' conn.make_request('method', 'path') self.assertEqual(len(self.agent.requests), 1) self.assertEqual(lock.locked, 1) self.assertEqual(sem.tokens, 1) conn.make_request('method', 'path2') self.assertEqual(len(self.agent.requests), 1) d, args, kwargs = self.agent.requests[0] d.callback(StubResponse(200)) self.assertEqual(len(self.agent.requests), 2) d, args, kwargs = self.agent.requests[1] d.callback(StubResponse(200)) self.assertEqual(lock.locked, 0) self.assertEqual(sem.tokens, 2)
def __init__(self, node, shortlist, key, rpc, exclude=None): self.exclude = set(exclude or []) self.node = node self.finished_deferred = defer.Deferred() # all distance operations in this class only care about the distance # to self.key, so this makes it easier to calculate those self.distance = Distance(key) # The closest known and active node yet found self.closest_node = None if not shortlist else shortlist[0] self.prev_closest_node = None # Shortlist of contact objects (the k closest known contacts to the key from the routing table) self.shortlist = shortlist # The search key self.key = str(key) # The rpc method name (findValue or findNode) self.rpc = rpc # List of active queries; len() indicates number of active probes self.active_probes = [] # List of contact (address, port) tuples that have already been queried, includes contacts that didn't reply self.already_contacted = [] # A list of found and known-to-be-active remote nodes (Contact objects) self.active_contacts = [] # Ensure only one searchIteration call is running at a time self._search_iteration_semaphore = defer.DeferredSemaphore(1) self._iteration_count = 0 self.find_value_result = {} self.pending_iteration_calls = []
def downloadPicons(self): no_drive = False if self['list'].getCurrent(): if not os.path.isdir(self.picondir): txt = "%s\n" % self.picondir + _("is not installed.") self.session.open(MessageBox, txt, MessageBox.TYPE_INFO, timeout = 3) no_drive = True if not no_drive: if not os.path.isdir(self.piconfolder): print "[PiconManager] create folder %s" % self.piconfolder os.makedirs(self.piconfolder) urls = [] if int(self.countchlist) > 0 and not self.keyLocked and self['list'].getCurrent(): if len(self['list'].getCurrent()[0]) >= 2: self.auswahl = self['list'].getCurrent()[0][1]+"/" for channel in self.chlist: downloadPiconUrl = channel.replace(':', '_') downloadPiconUrl = downloadPiconUrl[:-1] + ".png" downloadPiconPath = self.piconfolder + downloadPiconUrl downloadPiconUrl = self.auswahl + downloadPiconUrl urls.append((downloadPiconUrl, downloadPiconPath)) if len(urls) > 0: self.countload = 0 self.counterrors = 0 ds = defer.DeferredSemaphore(tokens = 10) downloads = [ds.run(self.download, downloadPiconUrl, downloadPiconPath).addCallback(self.downloadDone).addErrback(self.downloadError) for downloadPiconUrl, downloadPiconPath in urls] finished = defer.DeferredList(downloads).addErrback(self.dataError)
def __init__(self, state, clock, log_dir, stopped, partitions, this_partition, build_duration, circuit_timeout, circuit_generator, log_chunk_size, max_concurrency): """ state: the txtorcon state object clock: this argument is normally the twisted global reactor object but unit tests might set this to a clock object which can time travel for faster testing. log_dir: the directory to write log files stopped: callable to call when done partitions: the number of partitions to use for processing the set of circuits this_partition: which partition of circuit we will process build_duration: build a new circuit every specified duration circuit_timeout: circuit build timeout duration """ self.state = state self.clock = clock self.log_dir = log_dir self.stopped = stopped self.partitions = partitions self.this_partition = this_partition self.circuit_life_duration = circuit_timeout self.circuit_build_duration = build_duration self.circuits = circuit_generator self.log_chunk_size = log_chunk_size self.semaphore = defer.DeferredSemaphore(max_concurrency) self.lazy_tail = defer.succeed(None) self.tasks = {} self.call_id = None # XXX adjust me self.result_sink = ResultSink(log_dir, chunk_size=log_chunk_size)
def __init__(self, cert_path=None, timeout=DEFAULT_HTTP_TIMEOUT, pool=None): """ Init the HTTP client :param cert_file: The path to the ca certificate file to verify certificates, if None given the system's CAs will be used. :type cert_file: str :param timeout: The amount of time that this Agent will wait for the peer to accept a connection and for each request to be finished. If a pool is passed, then this argument is ignored. :type timeout: float """ self._timeout = timeout self._pool = pool if pool is not None else self._pool if cert_path is None: trustRoot = getCertifiTrustRoot() else: if not os.path.isfile(cert_path): raise RuntimeError('Certificate file %s cannot be found' % cert_path) trustRoot = cert_path self._agent = Agent(reactor, contextFactory=getPolicyForHTTPS(trustRoot), pool=self._pool, connectTimeout=self._timeout) self._semaphore = defer.DeferredSemaphore( self._pool.maxPersistentPerHost)
def send(self, vals): inverterDetails = vals.copy() inverterDetails.pop('Serial', None) inverterDetails.pop('#SolaxClient', None) url = self.host + "/input/post" vars = {} vars['apikey'] = self.apikey vars['node'] = inverterDetails.pop('name', None) vars['fulljson'] = json.dumps(inverterDetails) url += "?" + urllib.parse.urlencode(vars) httpContext = { 'method': b'GET', 'url': url, 'callback': nullResponse, 'errback': httpError, 'connect_timeout': self.timeout } semaphore = defer.DeferredSemaphore(1) semaphore.run(requestHTTP, context=httpContext)
def test(txbenchmark, payload, tmpdir): """ Read many blobs of the same size from the filesystem backend. """ backend = FilesystemBlobsBackend(blobs_path=tmpdir.strpath) data = payload(size) # first write blobs to the backend... semaphore = defer.DeferredSemaphore(100) deferreds = [] for i in xrange(amount): producer = FileBodyProducer(BytesIO(data)) d = semaphore.run(backend.write_blob, 'user', str(i), producer) deferreds.append(d) yield defer.gatherResults(deferreds) # ... then measure the read operation @pytest.inlineCallbacks def read(): deferreds = [] for i in xrange(amount): consumer = DevNull() d = semaphore.run(backend.read_blob, 'user', str(i), consumer) deferreds.append(d) yield defer.gatherResults(deferreds) yield txbenchmark(read)
def charts_list(self, data): # old V1.0 songs = re.findall("<a href='/watch/.*?' title='(.*?)'><img id='i(.*?)' src='(.*?)'.*?<span class='vViews'>(.*?)</span>.*?<span class='chartTop.*?'>(.*?)</span>", data, re.S) songs = re.findall( "<a href='/watch/.*?' title='(.*?)'><img id='i(\d+)'.*?longdesc='(.*?.jpg)'.*?<span class='vViews'>(.*?)</span>.*?<span class='chartTop.*?'>(.*?)</span>", data, re.S) self.count = len(songs) self.loading = 0 url_list = [] for each in songs: (title, token, image, runtime, place) = each jpg_store = "/tmp/mtv/%s.jpg" % str(place) title = self.umlaute(title) url_list.append((token, title, place, jpg_store, runtime, image)) if len(url_list) != 0: ds = defer.DeferredSemaphore(tokens=1) downloads = [ ds.run(self.download, item[5], item[3]).addCallback(self.create_own_playlist, item[0], item[1], item[2], item[3], item[4]).addErrback(self.errorload) for item in url_list ] finished = defer.DeferredList(downloads).addErrback(self.errorload)
def __init__(self, settings): # self.options = settings.get('SELENIUM_OPTIONS', {}) max_run = settings.get('SELENIUM_MAXRUN', 10) self.sem = defer.DeferredSemaphore(max_run) self.queue = queue.LifoQueue(max_run) SignalManager(dispatcher.Any).connect(self._close, signal=signals.spider_closed)
def reflect_lbry_files(self): sem = defer.DeferredSemaphore(self.CONCURRENT_REFLECTS) ds = [] sd_hashes_to_reflect = yield self.storage.get_streams_to_re_reflect() for lbry_file in self.lbry_files: if lbry_file.sd_hash in sd_hashes_to_reflect: ds.append(sem.run(reflect_file, lbry_file)) yield defer.DeferredList(ds)
def getOverallDeferredSemaphore(): global OVERALL_SEMAPHORE if OVERALL_SEMAPHORE is None: preferences = zope.component.queryUtility(ICollectorPreferences, 'zenpython') if preferences: OVERALL_SEMAPHORE = defer.DeferredSemaphore( preferences.options.twistedconcurrenthttp) else: # When we are running in a daemon other than zenpython, the preferences # value will not be available OVERALL_SEMAPHORE = defer.DeferredSemaphore( DEFAULT_TWISTEDCONCURRENTHTTP) return OVERALL_SEMAPHORE
def __init__(self, settings): self.options = settings.get('PHANTOMJS_OPTIONS', {}) # 默认空 max_run = settings.get('PHANTOMJS_MAXRUN', 10) # PhantomJS 可以同时运行最大的个数, 默认10 self.sem = defer.DeferredSemaphore(max_run) self.queue = Queue.LifoQueue(maxsize=max_run) # LifoQueue 后进先出队列 SignalManager(dispatcher.Any).connect(receiver=self._close, signal=signals.spider_closed)
def xmlCallback(self, xmlstring): self["statustext"].text = "" metric = 0 index = 0 UnitSystemText = "F" IconDownloadList = [] root = cet_fromstring(xmlstring) for childs in root.findall("weather"): for items in childs: if items.tag == "problem_cause": self["statustext"].text = items.attrib.get("data").encode("utf-8", 'ignore') elif items.tag == "forecast_information": for items2 in items: if items2.tag == "city": self["caption"].text = items2.attrib.get("data").encode("utf-8", 'ignore') elif items2.tag == "unit_system": if items2.attrib.get("data").encode("utf-8", 'ignore') == "SI": metric = 1 UnitSystemText = "C" elif items.tag == "current_conditions": for items2 in items: if items2.tag == "condition": self["condition"].text = _("Current: %s") % items2.attrib.get("data").encode("utf-8", 'ignore') elif items2.tag == "temp_f" and metric == 0: self["currentTemp"].text = ("%s °F" % items2.attrib.get("data").encode("utf-8", 'ignore')) elif items2.tag == "temp_c" and metric == 1: self["currentTemp"].text = ("%s °C" % items2.attrib.get("data").encode("utf-8", 'ignore')) elif items2.tag == "humidity": self["humidity"].text = items2.attrib.get("data").encode("utf-8", 'ignore') elif items2.tag == "wind_condition": self["wind_condition"].text = items2.attrib.get("data").encode("utf-8", 'ignore') elif items.tag == "forecast_conditions": index = index + 1 lowTemp = "" highTemp = "" icon = "" for items2 in items: if items2.tag == "day_of_week": self["weekday%s" % index].text = items2.attrib.get("data").encode("utf-8", 'ignore') elif items2.tag == "low": lowTemp = items2.attrib.get("data").encode("utf-8", 'ignore') elif items2.tag == "high": highTemp = items2.attrib.get("data").encode("utf-8", 'ignore') self["weekday%s_temp" % index].text = "%s °%s | %s °%s" % (highTemp, UnitSystemText, lowTemp, UnitSystemText) elif items2.tag == "icon": url = items2.attrib.get("data").encode("utf-8", 'ignore') if not url.startswith("http://"): url = "http://www.google.com%s" % items2.attrib.get("data").encode("utf-8", 'ignore') parts = url.split("/") filename = self.appdir + parts[-1] if not os_path.exists(filename): IconDownloadList.append(WeatherIconItem(url = url,filename = filename, index = index)) else: self.showIcon(index,filename) if len(IconDownloadList) != 0: ds = defer.DeferredSemaphore(tokens=len(IconDownloadList)) downloads = [ds.run(download,item ).addErrback(self.errorIconDownload, item).addCallback(self.finishedIconDownload,item) for item in IconDownloadList] finished = defer.DeferredList(downloads).addErrback(self.error)
def run(self): self.installThreadIO() signal('runner_start').send(self) scale = self.scale or len(self.hosts) semaphore = defer.DeferredSemaphore(scale) consumer = partial(semaphore.run, self.runWorker) workers = map(consumer, self.hosts) yield defer.DeferredList(workers, consumeErrors=False) signal('runner_finish').send(self)
def __init__(self, reactor, resolver, pools, scansets, env): """"Initialize. pools is a mapping pool name -> size scansets is a mapping scanset name -> ScanSet object """ self.reactor = reactor self.resolver = resolver self.env = ScanEnvironment(reactor, resolver) # XXX quick hack, refactor later. for k, v in env.items(): setattr(self.env, k, v) self.scans = {} self.pools = dict( (name, defer.DeferredSemaphore(n)) for name, n in pools.items()) self.dnspool = self.pools.get( 'dns', defer.DeferredSemaphore(default_dns_pool_size)) self.scansets = scansets
def __init__(self, settings): self.options = settings.get('PHANTOMJS_OPTIONS', {}) max_run = settings.get('PHANTOMJS_MAXRUN', 10) self.sem = defer.DeferredSemaphore(max_run) self.queue = Queue.LifoQueue(max_run) self.create_phantomjs_count = 0 self._fallback_handler = load_object(FALLBACK_HANDLER)(settings) SignalManager(dispatcher.Any).connect(self._close, signal=signals.spider_closed)
def test(): deferreds = [] sem = defer.DeferredSemaphore(maxRun) for url in urls: d = sem.run(getPage, url) deferreds.append(d) dl = defer.DeferredList(deferreds) dl.addCallback(listCallback) dl.addCallback(finish)