コード例 #1
0
ファイル: gcmpushkin.py プロジェクト: yousefalatari/sygnal
    def __init__(self, name, sygnal, config, canonical_reg_id_store):
        super(GcmPushkin, self).__init__(name, sygnal, config)

        nonunderstood = set(self.cfg.keys()).difference(
            self.UNDERSTOOD_CONFIG_FIELDS)
        if len(nonunderstood) > 0:
            logger.warning(
                "The following configuration fields are not understood: %s",
                nonunderstood,
            )

        self.http_pool = HTTPConnectionPool(reactor=sygnal.reactor)
        self.max_connections = self.get_config("max_connections",
                                               DEFAULT_MAX_CONNECTIONS)
        self.connection_semaphore = DeferredSemaphore(self.max_connections)
        self.http_pool.maxPersistentPerHost = self.max_connections

        tls_client_options_factory = ClientTLSOptionsFactory()

        self.http_agent = Agent(
            reactor=sygnal.reactor,
            pool=self.http_pool,
            contextFactory=tls_client_options_factory,
        )

        self.db = sygnal.database
        self.canonical_reg_id_store = canonical_reg_id_store

        self.api_key = self.get_config("api_key")
        if not self.api_key:
            raise PushkinSetupException("No API key set in config")
コード例 #2
0
class ThreadedUrllib2TestMixin(object):

    def setUp(self):
        self._semaphore = DeferredSemaphore(2)

    def tearDown(self):
        pass


    def getPages(self, count, url):
        return gatherResults([self.getPage(url) for i in xrange(0, count)])

    @inlineCallbacks
    def getPage(self, url):
        yield self._semaphore.acquire()
        page = yield deferToThread(self._openPage, url)
        self._semaphore.release()
        returnValue(page)

    def _openPage(self, url):
        log.msg("Opening url: %r" % url)
        return urlopen(url).read()

    @inlineCallbacks
    def getPageLength(self, url):
        response = yield self.getPage(url)
        returnValue(len(response))
コード例 #3
0
    def collect(self, config):
        log.debug('Starting Delivery health collect')
        # Runs once at Application level and once more at components level

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        # Gather the info about applications
        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for datasource in config.datasources:
            applicationComponentID = datasource.params['applicationComponentID']
            if applicationComponentID in applicationList:
                continue
            applicationList.append(applicationComponentID)
            applicationNameID = datasource.params['applicationNameID']
            serviceURL = datasource.params['serviceURL']
            url = self.urls[datasource.datasource].format(serviceURL)
            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": datasource.zIVGroups,
                            "iv-user": datasource.zIVUser,
                        },
                        )
            tag = '{}_{}'.format(datasource.datasource, applicationNameID)
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
コード例 #4
0
ファイル: connectionpool.py プロジェクト: indx/indx-core
    def __init__(self, conn_str):
        self.waiting = []
        self.inuse = []
        self.free = []

        self.semaphore = DeferredSemaphore(1)
        self.updateTime()
コード例 #5
0
    def collect(self, config):
        log.debug('Starting Delivery orders collect')
        # TODO : cleanup job collect

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for datasource in config.datasources:
            applicationNameID = datasource.params['applicationNameID']
            if applicationNameID in applicationList:
                continue
            applicationList.append(applicationNameID)
            serviceURL = datasource.params['serviceURL']
            url = self.urls[datasource.datasource].format(serviceURL)
            # TODO : move headers to Config properties
            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": datasource.zIVGroups,
                            "iv-user": datasource.zIVUser,
                        },
                        )
            tag = '{}_{}'.format(datasource.datasource, applicationNameID)      # order_app_delivery_service_3db30547
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
コード例 #6
0
ファイル: testUploadSpeed4.py プロジェクト: msit18/UrbanFlows
	def recordVideoProcess(self, resW, resH, totalTimeSec, framerate, serverIP, piName, recordTimesList, file):
		semi = DeferredSemaphore(1)

		jobs = []
		for runs in range(len(recordTimesList)/2):
			print  "recordTimes recordVideoProcess:", recordTimesList
			self.writeFile("recordTimes recordVideoProcess:")
			try:
				startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0))
				jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\
						int(framerate), startAtTime, serverIP, piName, file))
			except:
				self.writeFile("That time was not valid. Calling next time.")
				self.writeFile("len recordTimesList: " + str(len(recordTimesList)))
				if len(recordTimesList)%2>0:
					self.writeFile("odd number")
					recordTimesList.pop(0)
					self.writeFile("new len: " + str(len(recordTimesList)))
					reactor.callLater(0.5, self.transport.write, "TIMEINPUTERROR {0}\n".format(piName))
				continue
			
		jobs = DeferredList(jobs)

		print  "Results: ", jobs.addCallback(self.getResults, piName)
		# self.writeFile("Results: ", jobs.addCallback(self.getResults, piName))
		jobs.addCallback(lambda _: reactor.callLater(5, reactor.stop))
コード例 #7
0
ファイル: trigger_convergence.py プロジェクト: meker12/otter
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    d = DeferredList(
        [sem.run(trigger_convergence, authenticator, region, group,
                 no_error_group)
         for group in groups],
        fireOnOneCallback=False,
        fireOnOneErrback=False,
        consumeErrors=True)
    d.addCallback(
        lambda results: [(g["tenantId"], g["groupId"], f.value)
                         for g, (s, f) in zip(groups, results) if not s])
    return d
コード例 #8
0
 def __init__(self, buffer, start_callback=None):
     self.buffer = buffer
     self._done = False
     self._flush = DeferredSemaphore(1)
     self._waiter = DeferredSemaphore(1)
     self._flush.acquire()
     self._started = start_callback
     self._keepalive = LoopingCall(self._send_keepalive)
コード例 #9
0
ファイル: utils.py プロジェクト: dellis23/rollingpin
def parallel_map(iterable, fn, *args, **kwargs):
    deferreds = []
    parallelism_limiter = DeferredSemaphore(MAX_PARALLELISM)
    for item in iterable:
        d = parallelism_limiter.run(fn, item, *args, **kwargs)
        deferreds.append(d)
    results = yield gatherResults(deferreds)
    returnValue(results)
コード例 #10
0
 def __init__(self, queue):
     self.managedPackageVersions = set()
     self.certifiDirectory = tempfile.mkdtemp()
     self.testSemaphore = DeferredSemaphore(tokens=32)
     self.binPath = os.path.join(
         os.path.split(__file__)[0], 'certifi_test.py')
     self.queue = queue
     self._log = logger.new(object="supervisor")
コード例 #11
0
ファイル: utils.py プロジェクト: huzichunjohn/rollingpin
def parallel_map(iterable, fn, *args, **kwargs):
    deferreds = []
    parallelism_limiter = DeferredSemaphore(MAX_PARALLELISM)
    for item in iterable:
        d = parallelism_limiter.run(fn, item, *args, **kwargs)
        deferreds.append(d)
    results = yield gatherResults(deferreds)
    returnValue(results)
コード例 #12
0
ファイル: util.py プロジェクト: e000/prickle
 def __init__(self, tokens = 5):
     if tokens < 1:
         raise ValueError("tokens must be > 0")
     
     if tokens == 1:
         self.lock = DeferredLock()
     else:
         self.lock = DeferredSemaphore(tokens)
コード例 #13
0
    def __init__(self, name, sygnal, config):
        super(WebpushPushkin, self).__init__(name, sygnal, config)

        nonunderstood = self.cfg.keys() - self.UNDERSTOOD_CONFIG_FIELDS
        if nonunderstood:
            logger.warning(
                "The following configuration fields are not understood: %s",
                nonunderstood,
            )

        self.http_pool = HTTPConnectionPool(reactor=sygnal.reactor)
        self.max_connections = self.get_config("max_connections",
                                               DEFAULT_MAX_CONNECTIONS)
        self.connection_semaphore = DeferredSemaphore(self.max_connections)
        self.http_pool.maxPersistentPerHost = self.max_connections

        tls_client_options_factory = ClientTLSOptionsFactory()

        # use the Sygnal global proxy configuration
        proxy_url = sygnal.config.get("proxy")

        self.http_agent = ProxyAgent(
            reactor=sygnal.reactor,
            pool=self.http_pool,
            contextFactory=tls_client_options_factory,
            proxy_url_str=proxy_url,
        )
        self.http_agent_wrapper = HttpAgentWrapper(self.http_agent)

        self.allowed_endpoints = None  # type: Optional[List[Pattern]]
        allowed_endpoints = self.get_config("allowed_endpoints")
        if allowed_endpoints:
            if not isinstance(allowed_endpoints, list):
                raise PushkinSetupException(
                    "'allowed_endpoints' should be a list or not set")
            self.allowed_endpoints = list(map(glob_to_regex,
                                              allowed_endpoints))
        privkey_filename = self.get_config("vapid_private_key")
        if not privkey_filename:
            raise PushkinSetupException(
                "'vapid_private_key' not set in config")
        if not os.path.exists(privkey_filename):
            raise PushkinSetupException(
                "path in 'vapid_private_key' does not exist")
        try:
            self.vapid_private_key = Vapid.from_file(
                private_key_file=privkey_filename)
        except VapidException as e:
            raise PushkinSetupException(
                "invalid 'vapid_private_key' file") from e
        self.vapid_contact_email = self.get_config("vapid_contact_email")
        if not self.vapid_contact_email:
            raise PushkinSetupException(
                "'vapid_contact_email' not set in config")
        self.ttl = self.get_config("ttl", DEFAULT_TTL)
        if not isinstance(self.ttl, int):
            raise PushkinSetupException("'ttl' must be an int if set")
コード例 #14
0
ファイル: trigger_convergence.py プロジェクト: glyph/otter
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit):
    """
    Trigger convergence on given groups
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults(
        [sem.run(trigger_convergence, authenticator, region, group)
         for group in groups],
        consumeErrors=True)
コード例 #15
0
ファイル: updater.py プロジェクト: jsza/tempus-map-updater
 def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
     assert isinstance(mapsPath, str) and len(mapsPath)
     assert isinstance(fetchURL, str) and len(fetchURL)
     self.mapsPath = FilePath(mapsPath)
     self.downloadTempPath = self.mapsPath.child('mapupdater')
     self.fetchURL = URLPath.fromString(fetchURL)
     self.deleteIfNotPresent = deleteIfNotPresent
     self.tfLevelSounds = tfLevelSounds
     self.semaphore = DeferredSemaphore(1)
     self.downloadSemaphore = DeferredSemaphore(4)
     for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
         fp.remove()
コード例 #16
0
ファイル: updater.py プロジェクト: jsza/tempus-map-updater
 def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
     assert isinstance(mapsPath, str) and len(mapsPath)
     assert isinstance(fetchURL, str) and len(fetchURL)
     self.mapsPath = FilePath(mapsPath)
     self.downloadTempPath = self.mapsPath.child('mapupdater')
     self.fetchURL = URLPath.fromString(fetchURL)
     self.deleteIfNotPresent = deleteIfNotPresent
     self.tfLevelSounds = tfLevelSounds
     self.semaphore = DeferredSemaphore(1)
     self.downloadSemaphore = DeferredSemaphore(4)
     for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
         fp.remove()
コード例 #17
0
def query_all_nodes(nodes, max_concurrency=5, clock=reactor):
    """Queries the given nodes for their power state.

    Nodes' states are reported back to the region.

    :return: A deferred, which fires once all nodes have been queried,
        successfully or not.
    """
    semaphore = DeferredSemaphore(tokens=max_concurrency)
    queries = (semaphore.run(query_node, node, clock) for node in nodes
               if node["power_type"] in PowerDriverRegistry)
    return DeferredList(queries, consumeErrors=True)
コード例 #18
0
ファイル: wholeProcess.py プロジェクト: msit18/UrbanFlows
	def runFiles():
	    semi = DeferredSemaphore(1)

	    jobs = []
	    for runs in range(5):
	        jobs.append(semi.run(collectFiles))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    jobs.addCallback(cbFinished)
	    return jobs
コード例 #19
0
ファイル: server.py プロジェクト: brandonivey/mixtapes
class Processor():
    """
    Whenever mixtapeReceived is called, deferToThread is scheduled to be run as
    soon as a "slot" for being run is available. There is currently 1 slot
    deferToThread runs process_mixtape in another thread, and releases the
    slot when its that process is done
    """
    def __init__(self):
        self.sem = DeferredSemaphore(1) #do one thing at a time

    def mixtapeReceived(self, mixtape):
        debug("Adding %s to be processed" % mixtape)
        self.sem.run(deferToThread, process.process_mixtape, *mixtape)
コード例 #20
0
    def __init__(self, expected_subscribers, experiment_start_delay):
        self.expected_subscribers = expected_subscribers
        self.experiment_start_delay = experiment_start_delay
        self.parsing_semaphore = DeferredSemaphore(500)
        self.connection_counter = -1
        self.connections_made = []
        self.connections_ready = []
        self.vars_received = []

        self._made_looping_call = None
        self._subscriber_looping_call = None
        self._subscriber_received_looping_call = None
        self._timeout_delayed_call = None
コード例 #21
0
    def request(self, method, uri, headers=None, bodyProducer=None):
        """
        Issue a new request.
        @param method: The request method to send.
        @type method: C{str}
        @param uri: The request URI send.
        @type uri: C{str}
        @param scheme: A string like C{'http'} or C{'https'} (the only two
            supported values) to use to determine how to establish the
            connection.
 
        @param host: A C{str} giving the hostname which will be connected to in
            order to issue a request.

        @param port: An C{int} giving the port number the connection will be on.

        @param path: A C{str} giving the path portion of the request URL.
        @param headers: The request headers to send.  If no I{Host} header is
            included, one will be added based on the request URI.
        @type headers: L{Headers}
        @param bodyProducer: An object which will produce the request body or,
            if the request body is to be empty, L{None}.
        @type bodyProducer: L{IBodyProducer} provider
        @return: A L{Deferred} which fires with the result of the request (a
            L{Response} instance), or fails if there is a problem setting up a
            connection over which to issue the request.  It may also fail with
            L{SchemeNotSupported} if the scheme of the given URI is not
            supported.
        @rtype: L{Deferred}
        """
        scheme, host, port, path = _parse(uri)
        if headers is None:
            headers = Headers()
        if not headers.hasHeader('host'):
            # This is a lot of copying.  It might be nice if there were a bit
            # less.
            headers = Headers(dict(headers.getAllRawHeaders()))
            headers.addRawHeader(
                'host', self._computeHostValue(scheme, host, port))
        if self.persistent:
            sem = self._semaphores.get((scheme, host, port))
            if sem is None:
                sem = DeferredSemaphore(self.maxConnectionsPerHostName)
                self._semaphores[scheme, host, port] = sem
            return sem.run(self._request, method, scheme, host, port, path,
                           headers, bodyProducer)
        else:
            return self._request(
                method, scheme, host, port, path, headers, bodyProducer)
コード例 #22
0
ファイル: beerme_crawler.py プロジェクト: buckhx/py-beer
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(5)
    print "Loading IDs"
    ids = getBeermeIds()
    ids = ids[:100]
    print "Done Loading %s IDs" % str(len(ids))
    jobs = []
    for id in ids:
        jobs.append(sem.run(beerme_request,id,agent))
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)

    print "Starting reactor..."
    reactor.run()
コード例 #23
0
ファイル: vbms.py プロジェクト: parastoo-62/efolder-express
    def __init__(self, reactor, connect_vbms_path, bundle_path, endpoint_url,
                 keyfile, samlfile, key, keypass, ca_cert, client_cert):
        self._reactor = reactor

        self._connect_vbms_path = connect_vbms_path
        self._bundle_path = bundle_path
        self._endpoint_url = endpoint_url
        self._keyfile = keyfile
        self._samlfile = samlfile
        self._key = key
        self._keypass = keypass
        self._ca_cert = ca_cert
        self._client_cert = client_cert

        self._connect_vbms_semaphore = DeferredSemaphore(tokens=8)
コード例 #24
0
ファイル: beerme_crawler.py プロジェクト: buckhx/py-beer
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(5)
    print "Loading IDs"
    ids = getBeermeIds()
    ids = ids[:100]
    print "Done Loading %s IDs" % str(len(ids))
    jobs = []
    for id in ids:
        jobs.append(sem.run(beerme_request, id, agent))
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)

    print "Starting reactor..."
    reactor.run()
コード例 #25
0
ファイル: wholeProcess.py プロジェクト: msit18/UrbanFlows
	def collectFiles():
	    semaphore = DeferredSemaphore(1)
	    files = glob.glob('*.py')
	    dl = list()

	    for item in range(len(files)):
	        #Queues list of things to be sent and runs it
	        dl.append(semaphore.run(sendFiles, files[item]))

	    # convert to a DefferedList. Allows for callback call
	    dl = DeferredList(dl)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    dl.addCallback(cbFinished)
	    return dl
コード例 #26
0
    def runU1DBQuery(self, meth, *args, **kw):
        """
        Execute a U1DB query in a thread, using a pooled connection.

        Concurrent threads trying to update the same database may timeout
        because of other threads holding the database lock. Because of this,
        we will retry SQLCIPHER_MAX_RETRIES times and fail after that.

        :param meth: The U1DB wrapper method name.
        :type meth: str

        :return: a Deferred which will fire the return value of
            'self._runU1DBQuery(Transaction(...), *args, **kw)', or a Failure.
        :rtype: twisted.internet.defer.Deferred
        """
        meth = "u1db_%s" % meth
        semaphore = DeferredSemaphore(SQLCIPHER_MAX_RETRIES)

        def _run_interaction():
            return self.runInteraction(self._runU1DBQuery, meth, *args, **kw)

        def _errback(failure):
            failure.trap(dbapi2.OperationalError)
            if failure.getErrorMessage() == "database is locked":
                logger.warn("database operation timed out")
                should_retry = semaphore.acquire()
                if should_retry:
                    logger.warn("trying again...")
                    return _run_interaction()
                logger.warn("giving up!")
            return failure

        d = _run_interaction()
        d.addErrback(_errback)
        return d
コード例 #27
0
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(10)
    print "Loading breweries..."
    mongo = MongoClient().entities.breweries
    breweries = loadBreweries(mongo)
    print "Done loading breweries."
    jobs = []
    for brewery in breweries:
        jobs.append(sem.run(socialRequest, brewery, agent, mongo))
    #    if len(jobs) % 50 == 0:
    #        print "Brewery Jobs started: %d" % len(jobs)
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)
    print "Let the Reactor BEGIN!"
    reactor.run()
コード例 #28
0
ファイル: find_social.py プロジェクト: buckhx/py-beer
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(10)
    print "Loading breweries..."
    mongo = MongoClient().entities.breweries
    breweries = loadBreweries(mongo)
    print "Done loading breweries."
    jobs = []
    for brewery in breweries:
        jobs.append(sem.run(socialRequest,brewery,agent,mongo))
    #    if len(jobs) % 50 == 0:
    #        print "Brewery Jobs started: %d" % len(jobs) 
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)
    print "Let the Reactor BEGIN!"
    reactor.run()
コード例 #29
0
ファイル: crawlah.py プロジェクト: rmcdonnell/greenCall
    def start(self):
        """ get each page """
        deferreds = []
        sem = DeferredSemaphore(self.MAX_RUN)
        
        for key in self.book.keys():

            sleep(self.RATE_LIMIT)
            d =  sem.run(getPage, self.book[key])
            d.addCallback(self.pageCallback, key)
            d.addErrback(self.errorHandler, key)
            deferreds.append(d)

        dl = DeferredList(deferreds)
        dl.addCallback(self.listCallback)
        dl.addCallback(self.finish)
コード例 #30
0
ファイル: util.py プロジェクト: e000/prickle
class DeferredConcurrencyLimiter:
    """Initiliaze me, and then use me as a decorator, to limit the ammount of defers that can execute asynchronously."""
    
    def __init__(self, tokens = 5):
        if tokens < 1:
            raise ValueError("tokens must be > 0")
        
        if tokens == 1:
            self.lock = DeferredLock()
        else:
            self.lock = DeferredSemaphore(tokens)
    
    def _releaseLock(self, response, lock):
        lock.release()
        return response
    
    def _lockAcquired(self, lock, f, *a, **kw):
        d = maybeDeferred(f, *a, **kw)
        d.addBoth(self._releaseLock, lock)
        return d
    
    def __call__(self, f):
        @wraps(f)
        def wrapped(*a, **kw):
            d = self.lock.acquire()
            d.addCallback(self._lockAcquired, f, *a, **kw)
            return d
        
        return wrapped
コード例 #31
0
ファイル: boot_images.py プロジェクト: zeronewb/maas
    def run(self, concurrency=1):
        """Ask the rack controllers to download the region's boot resources.

        Report the results via the log.

        :param concurrency: Limit the number of rack controllers importing at
            one time to no more than `concurrency`.
        """
        lock = DeferredSemaphore(concurrency)

        def report(results):
            message_success = (
                "Rack controller (%s) has imported boot resources.")
            message_failure = (
                "Rack controller (%s) failed to import boot resources.")
            message_disconn = (
                "Rack controller (%s) did not import boot resources; it is "
                "not connected to the region at this time.")
            for system_id, (success, result) in zip(self.system_ids, results):
                if success:
                    log.msg(message_success % system_id)
                elif result.check(NoConnectionsAvailable):
                    log.msg(message_disconn % system_id)
                else:
                    log.err(result, message_failure % system_id)

        return self(lock).addCallback(report).addErrback(
            log.err, "General failure syncing boot resources.")
コード例 #32
0
ファイル: connectionpool.py プロジェクト: sociam/indx
    def __init__(self, conn_str):
        self.waiting = []
        self.inuse = []
        self.free = []

        self.semaphore = DeferredSemaphore(1)
        self.updateTime()
コード例 #33
0
class PlotlyStreamProducer(object):
    """Implements a producer that copies from a buffer to a plot.ly
    connection.
    """
    implements(IBodyProducer)
    length = UNKNOWN_LENGTH

    def __init__(self, buffer, start_callback=None):
        self.buffer = buffer
        self._done = False
        self._flush = DeferredSemaphore(1)
        self._waiter = DeferredSemaphore(1)
        self._flush.acquire()
        self._started = start_callback
        self._keepalive = LoopingCall(self._send_keepalive)

    @inlineCallbacks
    def startProducing(self, consumer):
        self._keepalive.start(60)
        self._started.callback(None)
        while True:
            # if paused, this will block
            yield self._waiter.acquire()
            while len(self.buffer):
                v = self.buffer.pop(0)
                if v is not None:
                    consumer.write(json.dumps(v))
                consumer.write("\n")
            yield self._waiter.release()

            if self._done: 
                return
            yield self._flush.acquire()

    def pauseProducing(self):
        return self._waiter.acquire()

    def resumeProducing(self):
        return self._waiter.release()

    def stopProducing(self):
        self._done = True
        if self._keepalive.running:
            self._keepalive.stop()

    def _send_keepalive(self):
        self.buffer.append(None)
        self.flush()

    def flush(self):
        if self._flush.tokens == 0:
            self._flush.release()
コード例 #34
0
 def __init__(self, crawler, show_window=False, qt_platform="minimal",
              enable_webkit_dev_tools=False, page_limit=4,
              cookies_middleware=None):
     super(BaseQtWebKitMiddleware, self).__init__()
     self._crawler = crawler
     self.show_window = show_window
     self.qt_platform = qt_platform
     self.enable_webkit_dev_tools = enable_webkit_dev_tools
     if page_limit != 1:
         if QWebSettings is not None:
             QWebSettings.setObjectCacheCapacities(0, 0, 0)
     if page_limit is None:
         self.semaphore = DummySemaphore()
     else:
         self.semaphore = DeferredSemaphore(page_limit)
     self.cookies_middleware = cookies_middleware
     self._references = set()
コード例 #35
0
def trigger_convergence_groups(authenticator, region, groups, concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults(
        [sem.run(trigger_convergence, authenticator, region, group, no_error_group) for group in groups],
        consumeErrors=True,
    ).addCallback(lambda _: None)
コード例 #36
0
    def _cbReqPhotoPage(self, photo_list):
        def photoConnectLost(message, url):
            log.err(message)
            raise Exception("can't access {0!s}".format(url))

        dl = []
        sem = DeferredSemaphore(20)
        for i in photo_list:
            d = sem.run(self._agent.request, b'GET', bytes(i.get('url'), 'ascii'))
            d.addCallback(self._cbGetPhotoDlLink, *[i.get('url')])
            d.addErrback(photoConnectLost, *[i.get('url')])
            d.addCallback(self._cbDownloadPhoto)
            d.addErrback(log.err)
            dl.append(d)
        deferreds = DeferredList(dl, consumeErrors=True)

        return deferreds
コード例 #37
0
ファイル: supervisor.py プロジェクト: Lukasa/testifi
 def __init__(self, queue):
     self.managedPackageVersions = set()
     self.certifiDirectory = tempfile.mkdtemp()
     self.testSemaphore = DeferredSemaphore(tokens=32)
     self.binPath = os.path.join(
         os.path.split(__file__)[0], 'certifi_test.py'
     )
     self.queue = queue
     self._log = logger.new(object="supervisor")
コード例 #38
0
    def __init__(self, name, sygnal, config, canonical_reg_id_store):
        super(GcmPushkin, self).__init__(name, sygnal, config)

        nonunderstood = set(self.cfg.keys()).difference(
            self.UNDERSTOOD_CONFIG_FIELDS)
        if len(nonunderstood) > 0:
            logger.warning(
                "The following configuration fields are not understood: %s",
                nonunderstood,
            )

        self.http_pool = HTTPConnectionPool(reactor=sygnal.reactor)
        self.max_connections = self.get_config("max_connections",
                                               DEFAULT_MAX_CONNECTIONS)
        self.connection_semaphore = DeferredSemaphore(self.max_connections)
        self.http_pool.maxPersistentPerHost = self.max_connections

        tls_client_options_factory = ClientTLSOptionsFactory()

        # use the Sygnal global proxy configuration
        proxy_url = sygnal.config.get("proxy")

        self.http_agent = ProxyAgent(
            reactor=sygnal.reactor,
            pool=self.http_pool,
            contextFactory=tls_client_options_factory,
            proxy_url_str=proxy_url,
        )

        self.db = sygnal.database
        self.canonical_reg_id_store = canonical_reg_id_store

        self.api_key = self.get_config("api_key")
        if not self.api_key:
            raise PushkinSetupException("No API key set in config")

        # Use the fcm_options config dictionary as a foundation for the body;
        # this lets the Sygnal admin choose custom FCM options
        # (e.g. content_available).
        self.base_request_body: dict = self.get_config("fcm_options", {})
        if not isinstance(self.base_request_body, dict):
            raise PushkinSetupException(
                "Config field fcm_options, if set, must be a dictionary of options"
            )
コード例 #39
0
ファイル: cafe-process.py プロジェクト: rackerlabs/otter
def run(packages, modules, other_args, reactor, limit, excludes):
    sem = DeferredSemaphore(limit)
    proc_argss = get_cafe_args(packages, modules, excludes)
    deferreds = [
        sem.run(getProcessOutputAndValue, 'cafe-runner',
                other_args + proc_args, env=os.environ, reactor=reactor)
        for proc_args in proc_argss]
    results = yield gatherResults(deferreds, consumeErrors=True)

    failed = False
    for proc_args, (stdout, stderr, code) in zip(proc_argss, results):
        if code == 0:
            continue
        failed = True
        print('Error when running ', ' '.join(proc_args))
        print('Stdout\n', stdout, 'Stderr\n', stderr)

    if failed:
        raise SystemExit('Some tests failed')
コード例 #40
0
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults([
        sem.run(trigger_convergence, authenticator, region, group,
                no_error_group) for group in groups
    ],
                         consumeErrors=True).addCallback(lambda _: None)
コード例 #41
0
 def run(self, host, probe_func):
     # Use a MultiLock with one semaphore limiting the overall
     # connections and another limiting the per-host connections.
     if host in self.host_locks:
         multi_lock = self.host_locks[host]
     else:
         multi_lock = MultiLock(self.overall_semaphore,
                                DeferredSemaphore(PER_HOST_REQUESTS))
         self.host_locks[host] = multi_lock
     return multi_lock.run(probe_func)
コード例 #42
0
    def runDeferredCommand(self):
        self.startTime = time.time()
        self.log.info('%s using BBCApplicationSNMP' % self.device.id)
        self.stats['totalRequests'] = len(self.device.modelledOids)
        semaf = DeferredSemaphore(self.workers)
        jobs = []

        for oids in self.device.modelledOids:
            self.stats['collectedAppOidCounter'] += oids.count('.75025.')
            df = semaf.run(utils.getProcessOutputAndValue, '/usr/bin/snmpget',
                           self.prepareSnmpCmdArgList() + oids.split())
            df.addErrback(self.handleError)
            jobs.append(df)

        df = gatherResults(jobs)
        df.addErrback(self.handleError)
        df.addCallback(self.parseOutput)

        return df
コード例 #43
0
ファイル: base.py プロジェクト: osiloke/Flumotion-Transcoder
    def moveFiles(self, virtSrcBase, virtDestBase, relFiles):
        self.debug("MOVING: %r, %r, %r", virtSrcBase, virtDestBase, relFiles)
        if not self._local:
            raise TranscoderError("Component not properly setup yet")

        def move_failed(failure, src, dest):
            msg = ("Fail to move file '%s' to '%s': %s"
                   % (src, dest, log.getFailureMessage(failure)))
            self.warning("%s", msg)
            raise TranscoderError(msg, cause=failure)

        def move_file(src, dest, attr=None):
            self.debug("Moving file '%s' to '%s'", src, dest)
            dest_dir = os.path.dirname(dest)
            safe_mkdirs(dest_dir, "input file destination", attr)
            d = deferToThread(shutil.move, src, dest)
            d.addErrback(move_failed, src, dest)
            return d

        def move_files_failed(results):
            first_failure = None
            for ok, result in results:
                if not ok:
                    if not first_failure:
                        first_failure = result
            return first_failure

        sem = DeferredSemaphore(1)
        move_tasks = []

        for file in relFiles:
            source_path = virtSrcBase.append(file).localize(self._local)
            dest_path = virtDestBase.append(file).localize(self._local)
            source_path = os.path.realpath(source_path)
            dest_path = os.path.realpath(dest_path)
            d = sem.run(move_file, source_path, dest_path, self._pathAttr)
            move_tasks.append(d)

        dl = DeferredList(move_tasks, consumeErrors=True)
        dl.addErrback(move_files_failed)
        return d
コード例 #44
0
ファイル: engine.py プロジェクト: pbkracker/minion-core
 def __init__(self, plan, configuration, database, plugin_service_api, artifacts_path):
     self.plan = plan
     self.configuration = configuration
     self.database = database
     self.plugin_service_api = plugin_service_api
     self.artifacts_path = artifacts_path
     self.id = str(uuid.uuid4())
     self.state = 'CREATED'
     self.plugin_configurations = []
     self.semaphore = DeferredSemaphore(1)
     self.plugin_sessions = []
     self.delete_when_stopped = False
コード例 #45
0
ファイル: threads.py プロジェクト: zhangrb/maas
def make_database_unpool(maxthreads=max_threads_for_database_pool):
    """Create a general non-thread-pool for database activity.

    Its consumer are the old-school web application, i.e. the plain HTTP and
    HTTP API services, and the WebSocket service, for the responsive web UI.
    Each thread is fully connected to the database.

    However, this is a :class:`ThreadUnpool`, which means that threads are not
    actually pooled: a new thread is created for each task. This is ideal for
    testing, to improve isolation between tests.
    """
    return ThreadUnpool(DeferredSemaphore(maxthreads), ExclusivelyConnected)
コード例 #46
0
ファイル: cafe-process.py プロジェクト: stephamon/otter
def run(packages, modules, other_args, reactor, limit, excludes):
    sem = DeferredSemaphore(limit)
    proc_argss = get_cafe_args(packages, modules, excludes)
    deferreds = [
        sem.run(getProcessOutputAndValue,
                'cafe-runner',
                other_args + proc_args,
                env=os.environ,
                reactor=reactor) for proc_args in proc_argss
    ]
    results = yield gatherResults(deferreds, consumeErrors=True)

    failed = False
    for proc_args, (stdout, stderr, code) in zip(proc_argss, results):
        if code == 0:
            continue
        failed = True
        print('Error when running ', ' '.join(proc_args))
        print('Stdout\n', stdout, 'Stderr\n', stderr)

    if failed:
        raise SystemExit('Some tests failed')
コード例 #47
0
ファイル: wholeProcess.py プロジェクト: msit18/UrbanFlows
	def runFiles(self):
	    semi = DeferredSemaphore(1)

	    jobs = []
	    recordTimes = "01/24/17 12:00:00 01/24/17 12:15:00 01/24/17 12:30:00"
	    recordTimesList = [data for data in recordTimes.split()]
	    for runs in range(len(recordTimesList)/2):
	    	print "recordTimes:", recordTimesList
	    	# recordTimeStartTime = recordTimesList.pop(0) + " " + recordTimesList.pop(0)
	    	# print "start time: ", recordTimeStartTime

	    	startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0))
	    #     jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\
					# int(framerate), startAtTime, serverIP, piName))
	        jobs.append(semi.run(self.sendFiles, startAtTime))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	        # reactor.callLater(0.5, self.transport.write, 'finished')
	    jobs.addCallback(cbFinished)
	    return jobs
コード例 #48
0
    def collect(self, device, log):
        log.debug('{}: Modeling collect'.format(device.id))

        port = getattr(device, 'zSpringBootPort', None)
        uri = getattr(device, 'zSpringBootURI', None)
        ivGroups = getattr(device, 'zIVGroups', None)
        ivUser = getattr(device, 'zIVUser', None)

        ip_address = device.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        deferreds = []
        sem = DeferredSemaphore(1)

        # TODO: remove loop
        for query in self.queries:
            url = query[1].format(ip_address, port, uri)
            log.debug('SBA collect url: {}'.format(url))
            d = sem.run(
                getPage,
                url,
                headers={
                    "Accept": "application/json",
                    "User-Agent": "Mozilla/3.0Gold",
                    "iv-groups": ivGroups,
                    "iv-user": ivUser,
                },
            )
            d.addCallback(self.add_tag, '{}'.format(query[0]))
            deferreds.append(d)

        results = yield DeferredList(deferreds, consumeErrors=True)
        for success, result in results:
            if not success:
                log.error('{}: {}'.format(device.id, result.getErrorMessage()))

        returnValue(results)
コード例 #49
0
 def __init__(self, numSecondsToWait, numMessagesToWaitFor, chordNode):
     '''
     Constructor
     '''
     self.numSecondsToWait = numSecondsToWait
     self.numMessagesToWaitFor = numMessagesToWaitFor
     self.numSecondsToWait = numSecondsToWait
     self.chordNode = chordNode
     self.semaphore = DeferredSemaphore(1)
     self.messageList = [] # Holds tuples of (message, envelope)
     
     # Construct a timer to wait
     self.timerID = None
コード例 #50
0
ファイル: sync.py プロジェクト: corpaul/gumby
    def __init__(self, expected_subscribers, experiment_start_delay):
        self.expected_subscribers = expected_subscribers
        self.experiment_start_delay = experiment_start_delay
        self.parsing_semaphore = DeferredSemaphore(500)
        self.connection_counter = -1
        self.connections_made = []
        self.connections_ready = []
        self.vars_received = []

        self._made_looping_call = None
        self._subscriber_looping_call = None
        self._subscriber_received_looping_call = None
        self._timeout_delayed_call = None
コード例 #51
0
class TwistedRequestDriver(HTTPRequestDriver):
    # Using a connection pool enables persistent connections, so we can avoid
    # the connection setup overhead when sending multiple messages to the
    # server.
    pool = HTTPConnectionPool(reactor, persistent=True)

    # Used to control the number of concurrent requests because
    # HTTPConnectionPool does not do that on its own.
    # Discussed here:
    # http://stackoverflow.com/questions/25552432/how-to-make-pooling-http-connection-with-twisted
    sem = DeferredSemaphore(settings.PDSERVER_MAX_CONCURRENT_REQUESTS)

    def receive(self, response):
        """
        Receive response from twisted web client and convert it to a
        PDServerResponse object.
        """
        deferred = Deferred()
        response.deliverBody(JSONReceiver(response, deferred))
        return deferred

    def request(self, method, url, body=None):
        def makeRequest(ignored):
            bodyProducer = None
            if body is not None:
                bodyProducer = FileBodyProducer(six.StringIO(body))

            headers = {}
            for key, value in six.iteritems(self.headers):
                headers[key] = [value]

            agent = Agent(reactor, pool=TwistedRequestDriver.pool)
            d = agent.request(method, url, Headers(headers), bodyProducer)
            d.addCallback(self.receive)
            return d

        def releaseSemaphore(result):
            TwistedRequestDriver.sem.release()

            # Forward the result to the next handler.
            return result

        d = TwistedRequestDriver.sem.acquire()

        # Make the request once we acquire the semaphore.
        d.addCallback(makeRequest)

        # Release the semaphore regardless of how the request goes.
        d.addBoth(releaseSemaphore)
        return d
コード例 #52
0
ファイル: qtView.py プロジェクト: raubkopierer/rattlekekz-qt
 def __init__(self,controller):
     self.name,self.version="rattlekekz-qt",20100806  # Diese Variablen werden vom View abgefragt
     self.controller=controller
     self.revision=rev
     self.alert=app.alert
     TabManager.__init__(self)
     self.spaces=re.compile(r"  {1,}")
     self.urls=re.compile(r"(?=\b)((?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~/|/)(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|edu|pro|asia|cat|coop|int|tel|post|xxx|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|/)+|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?)(?=\b)",re.I)
     self.blubb=lambda x:chr(ord(x)-43)
     self.plugins={}
     self._setup()
     self.addTab("$login",rattlekekzLoginTab)
     self.changeTab("$login")
     self.main.show()
     self.smilie_data=self.readSmilies()
     self.loading_data=open(sys.prefix+os.sep+'share'+os.sep+'emoticons'+os.sep+'rattlekekz'+os.sep+'loading.png').read()
     self.loading_image=QtGui.QImage()
     self.loading_image.loadFromData(self.loading_data,"PNG")
     self.images={}
     self.pendingImages=[]
     self.smilies={"s6":":-)",
              "s4":":-(",
              "s1":":-/",
              "s8":"X-O",
              "s7":"(-:",
              "s9":"?-|",
              "s10":"X-|",
              "s11":"8-)",
              "s2":":-D",
              "s3":":-P",
              "s5":";-)",
              "sxmas":"o:)",
              "s12":":-E",
              "s13":":-G"}
     self.colors={"red":"FF0000",
                  "blue":"0000FF",
                  "green":"008000",
                  "gray":"696969",
                  "cyan":"00FFFF",
                  "magenta":"FF00FF",
                  "orange":"FFA500",
                  "pink":"FFC0CB",
                  "yellow":"FFFF00",
                  "normal":"000000",
                  "normalaway":"696969",
                  "greenaway":"4D804D",
                  "blueaway":"5050E6",
                  "orangeaway":"E5B151",
                  "redaway":"E65151"}
     self.imageLock = DeferredSemaphore(1)
コード例 #53
0
    def collect(self, config):
        log.debug('Starting SBA JVM collect')
        # TODO : cleanup job collect

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        ds0 = config.datasources[0]
        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for metric, props in self.metrics.iteritems():
            applicationNameID = ds0.params['applicationNameID']        # app_delivery_service_8df95ae5
            # if applicationNameID in applicationList:
            #     continue
            # applicationList.append(applicationNameID)
            serviceURL = ds0.params['serviceURL']

            # url = self.urls[datasource.datasource].format(serviceURL)
            endpoint = props['endpoint']
            url = '{}/management/metrics/{}'.format(serviceURL, endpoint)
            log.debug('AAA url: {}'.format(url))

            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": ds0.zIVGroups,
                            "iv-user": ds0.zIVUser,
                            },
                        )
            tag = 'jvm_{}_{}'.format(metric, applicationNameID)      # order_delivery_service_3db30547
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
コード例 #54
0
    def __init__(self, reactor, connect_vbms_path, bundle_path, endpoint_url,
                 keyfile, samlfile, key, keypass, ca_cert, client_cert):
        self._reactor = reactor

        self._connect_vbms_path = connect_vbms_path
        self._bundle_path = bundle_path
        self._endpoint_url = endpoint_url
        self._keyfile = keyfile
        self._samlfile = samlfile
        self._key = key
        self._keypass = keypass
        self._ca_cert = ca_cert
        self._client_cert = client_cert

        self._connect_vbms_semaphore = DeferredSemaphore(tokens=8)
コード例 #55
0
class TwistedWebTestMixin(object):

    def setUp(self):
        self._semaphore = DeferredSemaphore(2)

    def tearDown(self):
        pass


    @inlineCallbacks
    def getPages(self, count, url):
        return gatherResults([self.getPage(url) for i in xrange(0, count)])

    @inlineCallbacks
    def getPage(self, url):
        yield self._semaphore.acquire()
        page = yield tx_getPage(url)
        self._semaphore.release()
        returnValue(page)

    @inlineCallbacks
    def getPageLength(self, url):
        response = yield self.getPage(url)
        returnValue(len(response))
コード例 #56
0
class RequestManager:

    overall_semaphore = DeferredSemaphore(OVERALL_REQUESTS)

    # Yes, I want a mutable class attribute because I want changes done in an
    # instance to be visible in other instances as well.
    host_locks = {}

    def run(self, host, probe_func):
        # Use a MultiLock with one semaphore limiting the overall
        # connections and another limiting the per-host connections.
        if host in self.host_locks:
            multi_lock = self.host_locks[host]
        else:
            multi_lock = MultiLock(self.overall_semaphore,
                                   DeferredSemaphore(PER_HOST_REQUESTS))
            self.host_locks[host] = multi_lock
        return multi_lock.run(probe_func)
コード例 #57
0
from config import MAX_CONNECT

def send_message(endpoint, dispatcher):
    outgoing_message = VOEventMessage(LOCAL_IVO)
    if dispatcher.ctr == N_OF_EVENTS:
        dispatcher.loop.stop()
    else:
        dispatcher.ctr += 1

    def do_send():
        # Set up a factory connected to the relevant endpoint
        d = endpoint.connect(VOEventSenderFactory())

        # And when the connection is ready, use it to send a message
        d.addCallback(lambda p: p.sendString(outgoing_message.to_string()))

        # The semaphore releases when the returned Deferred fires
        return d

    dispatcher.run(do_send)

if __name__ == "__main__":
    log.startLogging(sys.stdout)
    endpoint = clientFromString(reactor, CONNECT_TO)
    dispatcher = DeferredSemaphore(MAX_CONNECT)
    dispatcher.loop = task.LoopingCall(send_message, endpoint, dispatcher)
    dispatcher.ctr = 1
    dispatcher.loop.start(float(PERIOD)/N_OF_EVENTS)

    reactor.run()