Beispiel #1
0
    def testUnexpectedError(self):
        c = task.Cooperator()

        def myiter():
            if 0:
                yield None
            else:
                raise RuntimeError()

        d = c.coiterate(myiter())
        return self.assertFailure(d, RuntimeError)
    def twisted_developer_day(customers):
        print("Goodmorning from Twisted developer")
        work = (inline_install(customer) for customer in customers)

        # We use the Cooperator mechanism to make the secretary not service
        # more than 5 customers simultaneously.
        coop = task.Cooperator()
        join = defer.DeferredList([coop.coiterate(work) for i in range(5)])

        join.addCallback(lambda _: reactor.stop())
        print("Bye from Twisted developer!")
Beispiel #3
0
    def setUp(self):
        """Setup the test."""
        yield super(UploadTestCase, self).setUp()

        self.tmpdir = os.getcwd() + "/tmp/diskstorage_tests"
        os.makedirs(self.tmpdir)
        self.addCleanup(shutil.rmtree, self.tmpdir)

        def slowScheduler(x):
            """A slower scheduler for our cooperator."""
            return reactor.callLater(0.1, x)
        self._cooperator = task.Cooperator(scheduler=slowScheduler)
Beispiel #4
0
 def find_dead_links(self, url_list, jobs):
     coop = task.Cooperator()
     wait_task_deferreds = []
     for job_number in range(jobs):
         self.log.info('scheduling #%s task', job_number)
         gen = self.find_dead_url_generator(url_list)
         wait_task_deferreds.append(coop.coiterate(gen))
     self.log.info('waiting %s tasks to complete...',
                   len(wait_task_deferreds))
     yield defer.DeferredList(wait_task_deferreds)
     self.log.info('completed %s tasks : dead links finder',
                   len(wait_task_deferreds))
Beispiel #5
0
def taskRun():
    deferreds = []
    coop = task.Cooperator()
    work = doWork()
    maxRun = 50
    for i in xrange(maxRun):
        d = coop.coiterate(work)
        # d.addCallback(getResult)
        # d.addErrback(getError)
        deferreds.append(d)
    dl = defer.DeferredList(deferreds)
    dl.addCallback(finish)
Beispiel #6
0
 def doPing(self, port):
     # use the task cooperator to limit the number of connections
     # established to the same port to 1
     limiter = 1
     coop = task.Cooperator()
     def _doPing():
         for check in xrange(self.count):
             yield self.doFactory(port)
     ping = _doPing()
     pings = defer.DeferredList(
         [coop.coiterate(ping) for i in xrange(limiter)])
     return pings
Beispiel #7
0
 def _crawl_ip(self):
     page_size = self.page_size
     if not page_size:
         html = requests.get(self._url + '1', headers=self.headers)
         page_size = int(Selector(text=html.text).css("#listnav li:nth-child(9) a::text").get(""))
     works = (self._download_page(url) for url in range(1, page_size + 1))
     coop = task.Cooperator()
     join = defer.DeferredList([coop.coiterate(works) for i in range(self.concurrent)])
     if self.scrapy:
         join.addCallback(self._crawl_over)
     else:
         join.addCallback(lambda _: reactor.stop())
     yield None
Beispiel #8
0
def parallel(iterable, count, callable, *args, **named):
    """
    实现同时控制处理,多个defer的功能
    Cooperator是实现Cooperative task的,这是一个能够迭代的iterator,能够提供一个最基本的实现work。当处于yield状态的时候,
    Cooperator能够决定下个执行的task是哪个,如果yield,是一个deferred,work会一直等到这个deffered链执行完。
    当Cooperator有多个task的时候,它能够分配这些work在这些tasks中来回切换,相当于实现并行操作。
    cooperate返回是一个CooperativeTask,它的作用是启动一个给定的iterator作为一个长期执行的cooperative task
    这个task能够pause,resumed和waited on
    coiterate是添加一个iterator到正在运行的Cooperator的iterator list中去,等同于cooperate,但是返回的是一个Deferred
    """
    coop = task.Cooperator()
    work = (callable(elem, *args, **named) for elem in iterable)
    return defer.DeferredList([coop.coiterate(work) for _ in range(count)])
Beispiel #9
0
 def handle_spider_output(self, res, request, response):
     if res is None:
         d = defer.Deferred()
         reactor.callLater(0, d.callback, None)
         return d
     elif not isinstance(res, (dict, bytes)) and hasattr(res, '__iter__'):
         pass
     else:
         res = [res]
     it = self.iter(res, self.handle_spider_error, request, response)
     coop = task.Cooperator()
     work = (self.from_spider(output, request) for output in it)
     return defer.DeferredList(
         [coop.coiterate(work) for _ in range(self.max_concurency)])
Beispiel #10
0
 def iter_page():
     work = (getpage(FormRequest(url=url,
                                 headers=self.headers,
                                 formdata={
                                     'Type': 'elite',
                                     'PageIdx': str(page),
                                     'Uptime': '0'
                                 },
                                 meta={'download_timeout': 60}),
                     page=page)
             for page in range(1, self.maxpage + 1))
     coop = task.Cooperator()
     join = defer.DeferredList(
         coop.coiterate(work) for i in range(self.concurrent))
     join.addBoth(lambda _: reactor.stop())
Beispiel #11
0
    def testCooperation(self):
        L = []
        def myiter(things):
            for th in things:
                L.append(th)
                yield None

        groupsOfThings = ['abc', (1, 2, 3), 'def', (4, 5, 6)]

        c = task.Cooperator()
        tasks = []
        for stuff in groupsOfThings:
            tasks.append(c.coiterate(myiter(stuff)))

        return defer.DeferredList(tasks).addCallback(
            lambda ign: self.assertEqual(tuple(L), sum(zip(*groupsOfThings), ())))
Beispiel #12
0
 def iter_proxy():
     # work needs to be a generator, i tried to use list but failed to realize concurrent
     work = (getResponse(
         proxy,
         Request(url='http://myip.dnsdynamic.org',
                 headers=self.headers,
                 meta={
                     'proxy': "http://" + proxy,
                     'download_timeout': self.timeout
                 })) for proxy in self.proxy_list
             for times in range(self.checknum))
     coop = task.Cooperator()
     join = defer.DeferredList(
         coop.coiterate(work) for i in range(self.concurrent))
     join.addCallback(output_better_proxy)
     join.addCallback(lambda _: reactor.stop())
Beispiel #13
0
 def setUp(self):
     """
     Create a cooperator with a fake scheduler and a termination predicate
     that ensures only one unit of work will take place per tick.
     """
     self._doDeferNext = False
     self._doStopNext = False
     self._doDieNext = False
     self.work = []
     self.scheduler = FakeScheduler()
     self.cooperator = task.Cooperator(
         scheduler=self.scheduler,
         # Always stop after one iteration of work (return a function which
         # returns a function which always returns True)
         terminationPredicateFactory=lambda: lambda: True)
     self.task = self.cooperator.cooperate(self.worker())
     self.cooperator.start()
Beispiel #14
0
    def doScans(self):
        '''
        We use the Twisted task cooperator here to control the number of
        deferrds (and therefore connections) created at once, thus providing a
        way for systems to use the script efficiently.
        '''
        coop = task.Cooperator()

        def scanHosts():
            for host in self.hosts:
                for port in xrange(*self.portRange):
                    yield self.doFactory(host, port)

        scans = scanHosts()
        self.scans = defer.DeferredList(
            [coop.coiterate(scans) for i in xrange(self.batchSize)])
        return self.scans
Beispiel #15
0
    def process(self):
        startmsg = '%s Starting...' % str(self)
        print startmsg
        self.report('RUN')
        try:
            # Lancement des traitements en // des servers limite a self.maxparallel
            if self.maxparallel > 0:
                coop = task.Cooperator()
                work = ( server.process() for server in self.servers )
                d = finished_phase = yield defer.DeferredList( [ coop.coiterate(work) for i in xrange(self.maxparallel) ] )
            else:
                d = finished_phase = yield defer.DeferredList( [ s.process() for s in self.servers ] )
        
            self.report(' OK')
            defer.returnValue(d)

        except Exception, e:
            self.report('NOK')            
            defer.returnValue( defer.fail() )
Beispiel #16
0
 def testStopRunning(self):
     """
     Test that a running iterator will not run to completion when the
     cooperator is stopped.
     """
     c = task.Cooperator()
     def myiter():
         for myiter.value in range(3):
             yield myiter.value
     myiter.value = -1
     d = c.coiterate(myiter())
     d.addCallback(self.cbIter)
     d.addErrback(self.ebIter)
     c.stop()
     def doasserts(result):
         self.assertEqual(result, self.RESULT)
         self.assertEqual(myiter.value, -1)
     d.addCallback(doasserts)
     return d
Beispiel #17
0
 def __init__(self,
              func,
              width=0,
              size=None,
              backlog=None,
              name=None,
              save_on_exit=None):
     self._func = func
     self.stopped = self.paused = False
     self._queue = DeferredPriorityQueue(size, backlog)
     self._pool = DeferredPool()
     self._coop = task.Cooperator()
     self._currentWidth = 0
     self.pendingStops = 0
     self._underway = set()
     self.width = width
     self.name = name
     if save_on_exit is None:
         self.save_on_exit = True
     else:
         self.save_on_exit = save_on_exit
Beispiel #18
0
def main_task(reactor):
    def parse(data):
        global response_sum
        response_sum += len(data)

    def task_finished(*args, **kwargs):
        print(args, kwargs)
        print("done?")

    def fetch_urls():
        delay = 100
        num_iter = 500
        for url in generate_urls("http://127.0.0.1:8080/add?name=twisted&delay={}&".format(delay), num_iter):
            yield getPage(url).addCallback(parse)

    coop = task.Cooperator()
    urls = fetch_urls()

    return (defer.DeferredList([coop.coiterate(urls)
                                for _ in range(BATCH_SIZE)])
            .addCallback(task_finished))
Beispiel #19
0
    def testCallbackReCoiterate(self):
        """
        If a callback to a deferred returned by coiterate calls coiterate on
        the same Cooperator, we should make sure to only do the minimal amount
        of scheduling work.  (This test was added to demonstrate a specific bug
        that was found while writing the scheduler.)
        """
        calls = []

        class FakeCall:
            def __init__(self, func):
                self.func = func

            def __repr__(self) -> str:
                return '<FakeCall %r>' % (self.func, )

        def sched(f):
            self.assertFalse(calls, repr(calls))
            calls.append(FakeCall(f))
            return calls[-1]

        c = task.Cooperator(scheduler=sched,
                            terminationPredicateFactory=lambda: lambda: True)
        d = c.coiterate(iter(()))

        done = []

        def anotherTask(ign):
            c.coiterate(iter(())).addBoth(done.append)

        d.addCallback(anotherTask)

        work = 0
        while not done:
            work += 1
            while calls:
                calls.pop(0).func()
                work += 1
            if work > 50:
                self.fail("Cooperator took too long")
Beispiel #20
0
    def process(self):
        startmsg = '%s Starting...' % str(self)
        print startmsg
        self.mel.reportToListener(startmsg)
        try:
            # Lancement des traitements en // des servers limite a self.maxparallel
            if self.maxparallel > 0:
                coop = task.Cooperator()
                work = (server.process() for server in self.servers)
                d = finished_phase = yield defer.DeferredList(
                    [coop.coiterate(work) for i in xrange(self.maxparallel)])
            else:
                d = finished_phase = yield defer.DeferredList(
                    [s.process() for s in self.servers])

            endmsg = '%s Finished !' % str(self)
            print endmsg
            self.mel.reportToListener(endmsg)
            defer.returnValue(d)
        except Exception, e:
            endmsg = '%s Finished with errors !' % str(self)
            self.mel.reportToListener(endmsg)
Beispiel #21
0
    def testResourceExhaustion(self):
        output = []
        def myiter():
            for i in range(100):
                output.append(i)
                if i == 9:
                    _TPF.stopped = True
                yield i

        class _TPF:
            stopped = False
            def __call__(self):
                return self.stopped

        c = task.Cooperator(terminationPredicateFactory=_TPF)
        c.coiterate(myiter()).addErrback(self.ebIter)
        c._delayedCall.cancel()
        # testing a private method because only the test case will ever care
        # about this, so we have to carefully clean up after ourselves.
        c._tick()
        c.stop()
        self.failUnless(_TPF.stopped)
        self.assertEqual(output, list(range(10)))
Beispiel #22
0
def _main(reactor, args, config):
    summary_hooks = stethoscope.plugins.utils.instantiate_plugins(
        config, namespace='stethoscope.batch.plugins.summary')

    if args.input is None:
        emails = config['BATCH_GET_EMAILS']()
    else:
        emails = [email.strip().strip('"') for email in args.input.readlines()]
    logger.info("retrieving devices for {:d} users", len(emails))

    results = dict()
    deferreds = list()
    cooperator = task.Cooperator()
    work = work_generator(args, config, emails, results)
    for idx in six.moves.range(args.limit):
        deferreds.append(cooperator.coiterate(work))

    deferred = defer.gatherResults(deferreds)

    def log_results(_):
        num_devices = sum(len(values) for values in six.itervalues(results))
        logger.info("retrieved {:d} unique devices for {:d} users",
                    num_devices, len(emails))
        return _

    deferred.addCallback(log_results)

    if not args.collect_only:
        for summary_hook in summary_hooks:

            def _hook(_):
                summary_hook.obj.post(results)
                return _

            deferred.addCallback(_hook)

    return deferred
Beispiel #23
0
    def testStopOutstanding(self):
        """
        An iterator run with L{Cooperator.coiterate} paused on a L{Deferred}
        yielded by that iterator will fire its own L{Deferred} (the one
        returned by C{coiterate}) when L{Cooperator.stop} is called.
        """
        testControlD = defer.Deferred()
        outstandingD = defer.Deferred()
        def myiter():
            reactor.callLater(0, testControlD.callback, None)
            yield outstandingD
            self.fail()
        c = task.Cooperator()
        d = c.coiterate(myiter())
        def stopAndGo(ign):
            c.stop()
            outstandingD.callback('arglebargle')

        testControlD.addCallback(stopAndGo)
        d.addCallback(self.cbIter)
        d.addErrback(self.ebIter)

        return d.addCallback(
            lambda result: self.assertEqual(result, self.RESULT))
Beispiel #24
0
	def test():
		coop = task.Cooperator()
		from os import getpid
		import psutil
		me = psutil.Process(getpid())
		print "memoire %i"%(me.get_memory_info().rss/1024)
		cr = clientRedis()
		d = cr("ping")
		d.addCallbacks(printit,printit)
		def iterate():
			i= 0 
			d = cr("set","test",0)
			d.addCallbacks(printit,printit)
			while True:
				print "on envoi %i"%i
				d = cr("incr","test")
				d.addCallbacks(printit,printit)
				d = cr("get","test")
				d.addCallbacks(printit,printit)
				i += 1
				print "memoire %i"%(me.get_memory_info().rss/1024)
				sleep(0.1)
				yield
		coop.coiterate(iterate())
Beispiel #25
0
 def __init__(self):
     self.coop = task.Cooperator(started=False)
Beispiel #26
0
 def __init__(self, wait):
     self.wait = wait
     self.coop = task.Cooperator(started=False, scheduler=self.sched)
Beispiel #27
0
 def _do_parallel_dns(self):
     coop = task.Cooperator()
     work = (self._send_dns_query(name) for name in self.lookup_list)
     return defer.DeferredList(
         [coop.coiterate(work) for i in xrange(self.num_workers)])
Beispiel #28
0
def connectBeanstalk(xmpp, host, port=11300):
    coop = task.Cooperator()
    factory = BSFactory(xmpp).connectedCallback(__worker(
        xmpp, coop)).disconnectedCallback(__shutdown(coop))
    reactor.connectTCP(host, port, factory)
Beispiel #29
0
def parallel(iterable, count, f, *args, **named):
    coop = task.Cooperator()
    work = (f(elem, *args, **named) for elem in iterable)
    return defer.DeferredList([coop.coiterate(work) for i in xrange(count)])
Beispiel #30
0
from twisted.internet import epollreactor  
epollreactor.install()

from twisted.internet import reactor, task  
from twisted.web.client import HTTPConnectionPool  
import treq  
import random  
from datetime import datetime

req_generated = 0  
req_made = 0  
req_done = 0

cooperator = task.Cooperator()

pool = HTTPConnectionPool(reactor)

def counter():  
    print("Requests: {} generated; {} made; {} done".format(
            req_generated, req_made, req_done))
    req_generated = req_made = req_done = 0
    reactor.callLater(1, counter)

def body_received(body):  
    global req_done
    req_done += 1

def request_done(response):  
    global req_made
    deferred = treq.json_content(response)
    req_made += 1