def test_multiple_listeners_error(self): # if there was an error while calling a callback # it should not prevent the other listeners from being called # also, all of the errors should be logged, check the output # manually that they are p = proc.spawn(lambda: 5) results = [] def listener1(*args): results.append(10) raise ExpectedError('listener1') def listener2(*args): results.append(20) raise ExpectedError('listener2') def listener3(*args): raise ExpectedError('listener3') p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY * 10) assert results in [[10, 20], [20, 10]], results p = proc.spawn(lambda: getcurrent().throw( ExpectedError('test_multiple_listeners_error'))) results = [] p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY * 10) assert results in [[10, 20], [20, 10]], results
def test_multiple_listeners_error(self): # if there was an error while calling a callback # it should not prevent the other listeners from being called # also, all of the errors should be logged, check the output # manually that they are p = proc.spawn(lambda : 5) results = [] def listener1(*args): results.append(10) raise ExpectedError('listener1') def listener2(*args): results.append(20) raise ExpectedError('listener2') def listener3(*args): raise ExpectedError('listener3') p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY*10) assert results in [[10, 20], [20, 10]], results p = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_multiple_listeners_error'))) results = [] p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY*10) assert results in [[10, 20], [20, 10]], results
def test_multiple_listeners_error(self): # if there was an error while calling a callback # it should not prevent the other listeners from being called # also, all of the errors should be logged, check the output # manually that they are p = proc.spawn(lambda : 5) results = [] def listener1(*args): results.append(10) 1/0 def listener2(*args): results.append(20) 2/0 def listener3(*args): 3/0 p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY*10) assert results in [[10, 20], [20, 10]], results p = proc.spawn(int, 'hello') results = [] p.link(listener1) p.link(listener2) p.link(listener3) sleep(DELAY*10) assert results in [[10, 20], [20, 10]], results
def test_2_servers_same_port(self): server = self.get_server() server_uri_1 = server.prepare(self.get_server_uri()) server_uri_2 = server.prepare(self.get_server_uri()) assert len(server.ports)==1, server.ports assert len(server.ports.values()[0])==1, server.ports connector = self.get_connector() client1_full_local_path = connector.prepare() server_transport_event = TimeoutEvent() proc.spawn(server.complete, client1_full_local_path).link(server_transport_event) client1_transport = connector.complete(server_uri_1) server_transport = server_transport_event.wait() self._test_write_chunk(client1_transport, server_transport) self._test_write_chunk(server_transport, client1_transport) client1_transport.loseConnection() server_transport.loseConnection() client2_full_local_path = connector.prepare() server_transport_event = TimeoutEvent() proc.spawn(server.complete, client2_full_local_path).link(server_transport_event) client2_transport = connector.complete(server_uri_2) server_transport = server_transport_event.wait() self._test_write_chunk(client2_transport, server_transport) self._test_write_chunk(server_transport, client2_transport) client2_transport.loseConnection() server_transport.loseConnection()
def test_proc(self): p = proc.spawn(lambda: 100) receiver = proc.spawn(sleep, 1) p.link(receiver) self.assertRaises(proc.LinkedCompleted, receiver.wait) receiver2 = proc.spawn(sleep, 1) p.link(receiver2) self.assertRaises(proc.LinkedCompleted, receiver2.wait)
def test_proc(self): p = proc.spawn(lambda : 100) receiver = proc.spawn(sleep, 1) p.link(receiver) self.assertRaises(proc.LinkedCompleted, receiver.wait) receiver2 = proc.spawn(sleep, 1) p.link(receiver2) self.assertRaises(proc.LinkedCompleted, receiver2.wait)
def handler(local): client = str(local.getHost()) print 'accepted connection from %s' % client remote = GreenClientCreator(reactor, UnbufferedTransport).connectTCP(remote_host, remote_port) a = proc.spawn(forward, remote, local) b = proc.spawn(forward, local, remote) proc.waitall([a, b], trap_errors=True) print 'closed connection to %s' % client
def start(self): notification_center = NotificationCenter() notification_center.add_observer(self, name='SystemIPAddressDidChange') notification_center.add_observer(self, name='SystemDidWakeUpFromSleep') self._select_proc = proc.spawn(self._process_files) proc.spawn(self._handle_commands) # activate self._stopped = False self._command_channel.send(Command('discover'))
def handler(local): client = str(local.getHost()) print 'accepted connection from %s' % client remote = GreenClientCreator(reactor, UnbufferedTransport).connectTCP( remote_host, remote_port) a = proc.spawn(forward, remote, local) b = proc.spawn(forward, local, remote) proc.waitall([a, b], trap_errors=True) print 'closed connection to %s' % client
def test_wait_all_exception_order(self): # if there're several exceptions raised, the earliest one must be raised by wait def badint(): sleep(0.1) int('first') a = proc.spawn(badint) b = proc.spawn(int, 'second') try: proc.waitall([a, b]) except ValueError, ex: assert 'second' in str(ex), repr(str(ex))
def test_wait_all_exception_order(self): # if there're several exceptions raised, the earliest one must be raised by wait def first(): sleep(0.1) raise ExpectedError('first') a = proc.spawn(first) b = proc.spawn(lambda : getcurrent().throw(ExpectedError('second'))) try: proc.waitall([a, b]) except ExpectedError, ex: assert 'second' in str(ex), repr(str(ex))
def test_wait_noerrors(self): x = proc.spawn(lambda : 1) y = proc.spawn(lambda : 2) z = proc.spawn(lambda : 3) self.assertEqual(proc.waitall([x, y, z]), [1, 2, 3]) e = _event.Event() x.link(e) self.assertEqual(e.wait(), 1) x.unlink(e) e = _event.Event() x.link(e) self.assertEqual(e.wait(), 1) self.assertEqual([proc.waitall([X]) for X in [x, y, z]], [[1], [2], [3]])
def test_wait_noerrors(self): x = proc.spawn(lambda : 1) y = proc.spawn(lambda : 2) z = proc.spawn(lambda : 3) self.assertEqual(proc.waitall([x, y, z]), [1, 2, 3]) e = coros.event() x.link(e) self.assertEqual(e.wait(), 1) x.unlink(e) e = coros.event() x.link(e) self.assertEqual(e.wait(), 1) self.assertEqual([proc.waitall([X]) for X in [x, y, z]], [[1], [2], [3]])
def test_wait_error(self): def x(): sleep(DELAY) return 1 x = proc.spawn(x) z = proc.spawn(lambda : 3) y = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_wait_error'))) y.link(x) x.link(y) y.link(z) z.link(y) self.assertRaises(ExpectedError, proc.waitall, [x, y, z]) self.assertRaises(proc.LinkedFailed, proc.waitall, [x]) self.assertEqual(proc.waitall([z]), [3]) self.assertRaises(ExpectedError, proc.waitall, [y])
def test_wait_error(self): def x(): sleep(DELAY) return 1 x = proc.spawn(x) z = proc.spawn(lambda : 3) y = proc.spawn(int, 'badint') y.link(x) x.link(y) y.link(z) z.link(y) self.assertRaises(ValueError, proc.waitall, [x, y, z]) self.assertRaises(proc.LinkedFailed, proc.waitall, [x]) self.assertEqual(proc.waitall([z]), [3]) self.assertRaises(ValueError, proc.waitall, [y])
def test_raise(self): p = self.p = proc.spawn( lambda: getcurrent().throw(ExpectedError('test_raise'))) self._test_raise(p, True, proc.LinkedFailed) # repeating the same with dead process for _ in range(3): self._test_raise(p, False, proc.LinkedFailed)
def execute(self, func, *args, **kwargs): """Execute func in one of the coroutines maintained by the pool, when one is free. Immediately returns a :class:`~eventlet.proc.Proc` object which can be queried for the func's result. >>> pool = Pool() >>> task = pool.execute(lambda a: ('foo', a), 1) >>> task.wait() ('foo', 1) """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine if self.sem.locked() and api.getcurrent() in self.procs: p = proc.spawn(func, *args, **kwargs) try: p.wait() except: pass else: self.sem.acquire() p = self.procs.spawn(func, *args, **kwargs) # assuming the above line cannot raise p.link(lambda p: self.sem.release()) if self.results is not None: p.link(self.results) return p
def execute(self, func, *args, **kwargs): """Execute func in one of the coroutines maintained by the pool, when one is free. Immediately returns a Proc object which can be queried for the func's result. >>> pool = Pool() >>> task = pool.execute(lambda a: ('foo', a), 1) >>> task.wait() ('foo', 1) """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine if self.sem.locked() and api.getcurrent() in self.procs: p = proc.spawn(func, *args, **kwargs) try: p.wait() except: pass else: self.sem.acquire() p = self.procs.spawn(func, *args, **kwargs) # assuming the above line cannot raise p.link(lambda p: self.sem.release()) if self.results is not None: p.link(self.results) return p
def set_links(self, p, first_time, kill_exc_type): event = _event.Event() self.link(p, event) proc_flag = [] def receiver(): sleep(DELAY) proc_flag.append('finished') receiver = proc.spawn(receiver) self.link(p, receiver) queue = coros.queue(1) self.link(p, queue) try: self.link(p) except kill_exc_type: if first_time: raise else: assert first_time, 'not raising here only first time' callback_flag = ['initial'] self.link(p, lambda *args: callback_flag.remove('initial')) for _ in range(10): self.link(p, _event.Event()) self.link(p, coros.queue(1)) return event, receiver, proc_flag, queue, callback_flag
def test_return(self): def return25(): return 25 p = self.p = proc.spawn(return25) self._test_return(p, True, 25, proc.LinkedCompleted, lambda : sleep(0)) # repeating the same with dead process for _ in xrange(3): self._test_return(p, False, 25, proc.LinkedCompleted, lambda : sleep(0))
def test_event(self): p = proc.spawn(lambda: 100) event = _event.Event() p.link(event) self.assertEqual(event.wait(), 100) for i in range(3): event2 = _event.Event() p.link(event2) self.assertEqual(event2.wait(), 100)
def test_event(self): p = proc.spawn(lambda : 100) event = _event.Event() p.link(event) self.assertEqual(event.wait(), 100) for i in xrange(3): event2 = _event.Event() p.link(event2) self.assertEqual(event2.wait(), 100)
def __init__(self, msrptransport, accept_types=['*'], on_incoming_cb=None): self.msrp = msrptransport self.accept_types = accept_types if on_incoming_cb is not None: self._on_incoming_cb = on_incoming_cb self.expected_responses = {} self.outgoing = coros.queue() self.outgoing_files = coros.queue() self.reader_job = proc.spawn(self._reader) self.writer_job = proc.spawn(self._writer) self.state = 'CONNECTED' # -> 'FLUSHING' -> 'CLOSING' -> 'DONE' # in FLUSHING writer sends only while there's something in the outgoing queue # then it exits and sets state to 'CLOSING' which makes reader only pay attention # to responses and success reports. (XXX it could now discard incoming data chunks # with direct write() since writer is dead) self.reader_job.link(self.writer_job) self.last_expected_response = 0 if not callable(self._on_incoming_cb): raise TypeError('on_incoming_cb must be callable: %r' % (self._on_incoming_cb, ))
def set_links_timeout(self, link): # stuff that won't be touched event = _event.Event() link(event) proc_finished_flag = [] def myproc(): sleep(10) proc_finished_flag.append('finished') return 555 myproc = proc.spawn(myproc) link(myproc) queue = coros.queue(0) link(queue) return event, myproc, proc_finished_flag, queue
def set_links_timeout(self, link): # stuff that won't be touched event = coros.event() link(event) proc_finished_flag = [] def myproc(): sleep(10) proc_finished_flag.append('finished') return 555 myproc = proc.spawn(myproc) link(myproc) queue = coros.queue(0) link(queue) return event, myproc, proc_finished_flag, queue
def test_current(self): p = proc.spawn(lambda : 100) p.link() self.assertRaises(proc.LinkedCompleted, sleep, 0.1)
def test_kill(self): p = self.p = proc.spawn(sleep, DELAY) self._test_kill(p, True, proc.LinkedKilled) # repeating the same with dead process for _ in xrange(3): self._test_kill(p, False, proc.LinkedKilled)
def test_raise(self): p = self.p = proc.spawn(int, 'badint') self._test_raise(p, True, proc.LinkedFailed) # repeating the same with dead process for _ in xrange(3): self._test_raise(p, False, proc.LinkedFailed)
def _do_spawn(self, gtransport, protocol): g = proc.spawn(self._run_handler, gtransport, protocol) self.greenlets.add(g) g.link(lambda *_: self.greenlets.remove(g))
def test_multiple_listeners_error_unlink_Source(self): p = proc.Source() proc.spawn(p.send, 6) self._test_multiple_listeners_error_unlink(p)
def test_multiple_listeners_error_unlink_Proc(self): p = proc.spawn(lambda : 5) self._test_multiple_listeners_error_unlink(p)
Demonstrates how to use eventlet.green package and proc module. """ from eventlet import proc from eventlet.green import socket # this example works with both standard eventlet hubs and with twisted-based hub # uncomment the following line to use twisted hub #from twisted.internet import reactor def geturl(url): c = socket.socket() ip = socket.gethostbyname(url) c.connect((ip, 80)) print '%s connected' % url c.send('GET /\r\n\r\n') return c.recv(1024) urls = ['www.google.com', 'www.yandex.ru', 'www.python.org'] jobs = [proc.spawn(geturl, x) for x in urls] print 'spawned %s jobs' % len(jobs) # collect the results from workers results = proc.waitall(jobs) # Note, that any exception in the workers will be reraised by waitall # unless trap_errors argument specifies otherwise for url, result in zip(urls, results): print '%s: %s' % (url, repr(result)[:50])
def test_raise(self): p = self.p = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_raise'))) self._test_raise(p, True, proc.LinkedFailed) # repeating the same with dead process for _ in xrange(3): self._test_raise(p, False, proc.LinkedFailed)
def setup(): global client primary = squash.create_repo('.primary') spawn(primary.serve, ('localhost', 4001)) client = Client( 4001 ) sleep(.1)
def test_multiple_listeners_error_unlink_Proc(self): p = proc.spawn(lambda: 5) self._test_multiple_listeners_error_unlink(p)
def test_kill(self): p = self.p = proc.spawn(sleep, DELAY) self._test_kill(p, True, proc.LinkedKilled) # repeating the same with dead process for _ in range(3): self._test_kill(p, False, proc.LinkedKilled)
def test_current(self): p = proc.spawn(lambda: 100) p.link() self.assertRaises(proc.LinkedCompleted, sleep, 0.1)