def handle_client(self, json_socket): global running global run_main self.connected = True api.spawn(self.read_client, json_socket) if running: json_socket.send(["state", "connected"]) else: json_socket.send(["state", "idle"]) starttime = time.time() while self.connected: if self.current: cmd = self.current.out_queue.get() if cmd[0] == "quit": print("quit on handle client") break else: json_socket.send(cmd) #for cmd in self.current.queue.get(): # json_socket.send(cmd) #api.sleep(0) api.sleep(0) json_socket.close() if running: running.addCmd(["quit"]) running = None sys.exit()
def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-l", "--listen", dest="host", default="0.0.0.0", help="the ip interface to bind") parser.add_option("-p", "--port", default=7902, type=int, help="which port to listen") parser.add_option("-d", "--daemon", action="store_true", help="run in daemon", default=False) (options, args) = parser.parse_args() cfg = {"localhost:7901":range(16), "localhost:7902":range(16), "localhost:7903":range(16)} store = Client(cfg, 16) print "server listening on %s:%s" % (options.host, options.port) server = api.tcp_listener((options.host, options.port)) util.set_reuse_addr(server) while True: try: new_sock, address = server.accept() except KeyboardInterrupt: break api.spawn(handler, store, new_sock) print 'close listener ...' server.close()
def test_two_simultaneous_connections(self): """ This test is timing-sensitive. """ self.pool = self.create_pool(2) conn = self.pool.get() self.set_up_test_table(conn) self.fill_test_table(conn) curs = conn.cursor() conn2 = self.pool.get() self.set_up_test_table(conn2) self.fill_test_table(conn2) curs2 = conn2.cursor() results = [] LONG_QUERY = "select * from test_table" SHORT_QUERY = "select * from test_table where row_id <= 20" evt = coros.event() def long_running_query(): self.assert_cursor_works(curs) curs.execute(LONG_QUERY) results.append(1) evt.send() evt2 = coros.event() def short_running_query(): self.assert_cursor_works(curs2) curs2.execute(SHORT_QUERY) results.append(2) evt2.send() api.spawn(long_running_query) api.spawn(short_running_query) evt.wait() evt2.wait() results.sort() self.assertEqual([1, 2], results)
def start_udp_proxy(self, sim_ip, sim_port): """ start a udp proxy, spawning two sockets which serve as the faux region and faux client """ # if we are already running a proxy for this, simply pass it's location back out if (sim_ip, sim_port) in self.udp_proxied_hosts: logger.debug("UDPProxy already exists for %s:%s at %s:%s" % (sim_ip, sim_port, self.udp_proxied_hosts[(sim_ip, sim_port)][0], self.udp_proxied_hosts[(sim_ip, sim_port)][1])) return self.udp_proxied_hosts[( sim_ip, sim_port)][0], self.udp_proxied_hosts[(sim_ip, sim_port)][1] # signal handler to capture erm signals if not self.signal_handler: self.signal_handler = signal.signal(signal.SIGINT, self.sigint_handler) self.viewer_facing_port_seed += 1 self.server_facing_port_seed += 1 udp_proxy = UDPProxy(sim_ip, sim_port, self.viewer_facing_port_seed, self.server_facing_port_seed) # populate our trackers self.udp_proxied_hosts[(sim_ip, sim_port)] = ( udp_proxy.hostname, udp_proxy.proxy_socket.getsockname()[1]) self.udp_proxies.append(udp_proxy) api.spawn(udp_proxy.start_proxy) return udp_proxy.hostname, udp_proxy.proxy_socket.getsockname()[1]
def test_connect_ssl(self): def accept_once(listenfd): try: conn, addr = listenfd.accept() conn.write(b'hello\r\n') greenio.shutdown_safe(conn) conn.close() finally: greenio.shutdown_safe(listenfd) listenfd.close() server = api.ssl_listener(('0.0.0.0', 0), self.certificate_file, self.private_key_file) api.spawn(accept_once, server) raw_client = eventlet.connect(('127.0.0.1', server.getsockname()[1])) client = util.wrap_ssl(raw_client) fd = socket._fileobject(client, 'rb', 8192) assert fd.readline() == b'hello\r\n' try: self.assertEqual(b'', fd.read(10)) except greenio.SSL.ZeroReturnError: # if it's a GreenSSL object it'll do this pass greenio.shutdown_safe(client) client.close() check_hub()
def test_timeout_and_final_write(self): # This test verifies that a write on a socket that we've # stopped listening for doesn't result in an incorrect switch rpipe, wpipe = os.pipe() rfile = os.fdopen(rpipe,"r",0) wrap_rfile = greenio.GreenPipe(rfile) wfile = os.fdopen(wpipe,"w",0) wrap_wfile = greenio.GreenPipe(wfile) def sender(evt): api.sleep(0.02) wrap_wfile.write('hi') evt.send('sent via event') from eventlet import coros evt = coros.event() api.spawn(sender, evt) try: # try and get some data off of this pipe # but bail before any is sent api.exc_after(0.01, api.TimeoutError) _c = wrap_rfile.read(1) self.fail() except api.TimeoutError: pass result = evt.wait() self.assertEquals(result, 'sent via event')
def fan(self, block, input_list): queue = coros.queue(0) results = [] exceptional_results = 0 for index, input_item in enumerate(input_list): pool_item = self.get() ## Fan out api.spawn( self._invoke, block, pool_item, input_item, index, queue) ## Fan back in for i in range(len(input_list)): ## Wait for all guys to send to the queue index, value = queue.wait() if isinstance(value, Exception): exceptional_results += 1 results.append((index, value)) results.sort() results = [value for index, value in results] if exceptional_results: if exceptional_results == len(results): raise AllFailed(results) raise SomeFailed(results) return results
def read_client(self, json_socket): global running while True: api.sleep(0) data = json_socket.recv() if not data: # client disconnected, bail out if self.current: self.current.out_queue.put(["quit"]) break if data[0] == 'connect': if not running: # initial connect command running = GreenletsThread(*data[1:]) self.current = running api.spawn(running.run) for cmd in self.deferred_cmds: running.addCmd(cmd) self.deferred_cmds = [] json_socket.send(["hihi"]) else: running.addCmd(["bootstrap"]) elif self.current: # forward command self.current.addCmd(data) else: if data[0] in ["throttle"]: self.deferred_cmds.append(data) print("exit read client") # exit self.connected = False
def test_timeout_and_final_write(self): # This test verifies that a write on a socket that we've # stopped listening for doesn't result in an incorrect switch rpipe, wpipe = os.pipe() rfile = os.fdopen(rpipe, "r", 0) wrap_rfile = greenio.GreenPipe(rfile) wfile = os.fdopen(wpipe, "w", 0) wrap_wfile = greenio.GreenPipe(wfile) def sender(evt): api.sleep(0.02) wrap_wfile.write('hi') evt.send('sent via event') from eventlet import coros evt = coros.event() api.spawn(sender, evt) try: # try and get some data off of this pipe # but bail before any is sent api.exc_after(0.01, api.TimeoutError) _c = wrap_rfile.read(1) self.fail() except api.TimeoutError: pass result = evt.wait() self.assertEquals(result, 'sent via event')
def handle_client(self, json_socket): global running global run_main self.connected = True api.spawn(self.read_client, json_socket) if running: json_socket.send(["state", "connected"]) else: json_socket.send(["state", "idle"]) starttime = time.time() while self.connected: if self.current: cmd = self.current.out_queue.get() if cmd[0] == "quit": print("quit on handle client") break; else: json_socket.send(cmd) #for cmd in self.current.queue.get(): # json_socket.send(cmd) #api.sleep(0) api.sleep(0) json_socket.close() if running: running.addCmd(["quit"]) running = None sys.exit()
def test_waiting_for_event(self): evt = coros.event() value = 'some stuff' def send_to_event(): evt.send(value) api.spawn(send_to_event) self.assertEqual(evt.wait(), value)
def test_pool_smash(self): # The premise is that a coroutine in a Pool tries to get a token out # of a token pool but times out before getting the token. We verify # that neither pool is adversely affected by this situation. from eventlet import pools pool = self.klass(min_size=1, max_size=1) tp = pools.TokenPool(max_size=1) token = tp.get() # empty pool def do_receive(tp): api.exc_after(0, RuntimeError()) try: t = tp.get() self.fail("Shouldn't have recieved anything from the pool") except RuntimeError: return 'timed out' # the execute makes the token pool expect that coroutine, but then # immediately cuts bait e1 = pool.execute(do_receive, tp) self.assertEquals(e1.wait(), 'timed out') # the pool can get some random item back def send_wakeup(tp): tp.put('wakeup') api.spawn(send_wakeup, tp) # now we ask the pool to run something else, which should not # be affected by the previous send at all def resume(): return 'resumed' e2 = pool.execute(resume) self.assertEquals(e2.wait(), 'resumed') # we should be able to get out the thing we put in there, too self.assertEquals(tp.get(), 'wakeup')
def test_multiple_waiters(self): # tests that multiple waiters get their results back q = coros.queue() def waiter(q, evt): evt.send(q.wait()) sendings = ['1', '2', '3', '4'] evts = [coros.event() for x in sendings] for i, x in enumerate(sendings): api.spawn(waiter, q, evts[i]) api.sleep(0.01) # get 'em all waiting results = set() def collect_pending_results(): for i, e in enumerate(evts): timer = api.exc_after(0.001, api.TimeoutError) try: x = e.wait() results.add(x) timer.cancel() except api.TimeoutError: pass # no pending result at that event return len(results) q.send(sendings[0]) self.assertEquals(collect_pending_results(), 1) q.send(sendings[1]) self.assertEquals(collect_pending_results(), 2) q.send(sendings[2]) q.send(sendings[3]) self.assertEquals(collect_pending_results(), 4)
def _enable_child_region(self, region_params): """ enables a child region. eligible simulators are sent in EnableSimulator over the event queue, and routed through the packet handler """ # if this is the sim we are already connected to, skip it if self.region.sim_ip == region_params['IP'] and self.region.sim_port == region_params['Port']: #self.region.sendCompleteAgentMovement() logger.debug("Not enabling a region we are already connected to: %s" % (str(region_params['IP']) + ":" + str(region_params['Port']))) return child_region = Region(circuit_code = self.circuit_code, sim_ip = region_params['IP'], sim_port = region_params['Port'], handle = region_params['Handle'], agent = self, settings = self.settings, events_handler = self.events_handler) self.child_regions.append(child_region) logger.info("Enabling a child region with ip:port of %s" % (str(region_params['IP']) + ":" + str(region_params['Port']))) if self.settings.LOG_COROUTINE_SPAWNS: logger.info("Spawning a coroutine for connecting to a neighboring region.") eventlet.spawn(child_region.connect_child)
def start_caps_proxy(self, seed_cap_url): """ start a caps proxy, serving the seed_cap first then all subsequent caps """ # if we are already running a proxy for this, simply pass it's location back out if (seed_cap_url) in self.caps_proxied_hosts: logger.debug("CapabilitiesProxy already exists for %s at %s" % (seed_cap_url, self.proxied_hosts[seed_cap_url])) return self.udp_proxied_hosts[( sim_ip, sim_port)][0], self.udp_proxied_hosts[(sim_ip, sim_port)][1] # signal handler to capture erm signals if not self.signal_handler: self.signal_handler = signal.signal(signal.SIGINT, self.sigint_handler) self.caps_proxy_port_seed += 1 caps_proxy = CapabilitiesProxy(seed_cap_url, '127.0.0.1', self.caps_proxy_port_seed, self) # populate our trackers self.caps_proxied_hosts[seed_cap_url] = "%s:%s" % ( 'http://127.0.0.1', self.caps_proxy_port_seed) self.caps_proxies.append(caps_proxy) api.spawn(caps_proxy.start_caps_proxy_service) return '127.0.0.1', self.caps_proxy_port_seed, caps_proxy.proxy_map[ seed_cap_url]
def test_reset(self): evt = coros.event() # calling reset before send should throw self.assertRaises(AssertionError, evt.reset) value = 'some stuff' def send_to_event(): evt.send(value) api.spawn(send_to_event) self.assertEqual(evt.wait(), value) # now try it again, and we should get the same exact value, # and we shouldn't be allowed to resend without resetting value2 = 'second stuff' self.assertRaises(AssertionError, evt.send, value2) self.assertEqual(evt.wait(), value) # reset and everything should be happy evt.reset() def send_to_event2(): evt.send(value2) api.spawn(send_to_event2) self.assertEqual(evt.wait(), value2)
def test_senders_that_die(self): q = coros.queue() def do_send(q): q.send('sent') api.spawn(do_send, q) self.assertEquals(q.wait(), 'sent')
def setup(): global _threads for i in range(0,_nthreads): _threads[i] = threading.Thread(target=tworker) _threads[i].setDaemon(True) _threads[i].start() api.spawn(tpool_trampoline)
def test_spawn_is_not_cancelled(self): def func(): spawn(self.lst.pop) # exiting immediatelly, but self.lst.pop must be called spawn(func) sleep(0.1) assert self.lst == [], self.lst
def test_timer_cancelled_upon_greenlet_exit(self): def func(): call_after(0.1, self.lst.pop) spawn(func) assert self.lst == [1], self.lst sleep(0.2) assert self.lst == [1], self.lst
def login(): """ login an to a login endpoint """ parser = OptionParser(usage="usage: %prog [options] firstname lastname") logger = logging.getLogger("client.example") parser.add_option("-l", "--loginuri", dest="loginuri", default="https://login.aditi.lindenlab.com/cgi-bin/login.cgi", help="specified the target loginuri") parser.add_option("-r", "--region", dest="region", default=None, help="specifies the region (regionname/x/y/z) to connect to") parser.add_option("-q", "--quiet", dest="verbose", default=True, action="store_false", help="enable verbose mode") parser.add_option("-p", "--password", dest="password", default=None, help="specifies password instead of being prompted for one") (options, args) = parser.parse_args() if len(args) != 2: parser.error("Expected arguments: firstname lastname") if options.verbose: console = logging.StreamHandler() console.setLevel(logging.DEBUG) # seems to be a no op, set it for the logger formatter = logging.Formatter('%(asctime)-30s%(name)-30s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) # setting the level for the handler above seems to be a no-op # it needs to be set for the logger, here the root logger # otherwise it is NOTSET(=0) which means to log nothing. logging.getLogger('').setLevel(logging.DEBUG) else: print "Attention: This script will print nothing if you use -q. So it might be boring to use it like that ;-)" # example from a pure agent perspective #grab a password! if options.password: password = options.password else: password = getpass.getpass() # let's disable inventory handling for this example settings = Settings() settings.ENABLE_INVENTORY_MANAGEMENT = False settings.ENABLE_EQ_LOGGING = False settings.ENABLE_CAPS_LOGGING = False #First, initialize the agent client = Agent(settings = settings) # Now let's log it in try: api.spawn(client.login, options.loginuri, args[0], args[1], password, start_location = options.region, connect_region = True) except LoginError, error: sys.exit()
def main(): parser = optparse.OptionParser(usage='%prog --port=PORT') parser.add_option('--port', default='8080', dest='login_port', type='int', help='Port to serve on (default 8080)') parser.add_option( '--loginuri', default='https://login.aditi.lindenlab.com/cgi-bin/login.cgi', dest='loginuri', help='Specifies the target loginuri to connect proxy to') parser.add_option( '-v', '--verbose', default=False, dest='verbose', action="store_true", help='enables logging, sets logging level to info, logs names of all \ packets') parser.add_option( '-V', '--verboseverbose', default=False, dest='verboseverbose', action="store_true", help='enables logging, sets logging level to debug, logs contents \ of all packets') options, args = parser.parse_args() # init logging if options.verbose or options.verboseverbose: console = logging.StreamHandler() console.setLevel( logging.DEBUG) # seems to be a no op, set it for the logger formatter = logging.Formatter( '%(asctime)-30s%(name)-30s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) if options.verbose: logging.getLogger('').setLevel(logging.INFO) elif options.verboseverbose: logging.getLogger('').setLevel(logging.DEBUG) else: logging.getLogger('').setLevel(logging.WARNING) # init the viewer proxy viewer_proxy = ViewerProxyApp(options.loginuri, options.login_port) # spawn a coroutine which initially handles the login rpxy, then lives on api.spawn(viewer_proxy.proxy_login) # keep running until we intercept a signal while viewer_proxy._is_running: api.sleep(5)
def login(self, event=None): """ Logs in a pyogp agent """ start_location = self.location.get() if start_location == "": start_location = None api.spawn(self.parent.login_agent, self.first_name.get(), self.last_name.get(), self.password.get(), start_location) self.parent.init_chat() self.frame.pack_forget()
def init(): cur = login_db.execute("select * from clients") for row in cur: if row[3] == "online": client = "/comet/client/" + row[1] _client_queues[client] = ClientMessageQueue(row[0]) _client_queues[client].last = row[2] api.spawn(_check_timeout)
def test_timer_fired(self): def func(): call_after(0.1, self.lst.pop) sleep(0.2) spawn(func) assert self.lst == [1], self.lst sleep(0.3) assert self.lst == [], self.lst
def login(self, key, loginuri, start_location=None): """ spawns a new agent via an eventlet coroutine """ if self.settings.LOG_COROUTINE_SPAWNS: logger.info("Spawning a coroutine for agent login for %s." % (self.agents[key].Name())) try: eventlet.spawn(self.agents[key].login, loginuri = loginuri, start_location = start_location) except LoginError, error: logger.error("Skipping agent with failed login: %s due to %s." % (self.agents[key].Name(), error))
def main(): """ In stand alone mode we will open a port and accept commands. """ server = eventlet.listen(('0.0.0.0', 11112)) #pool = eventlet.GreenPool(1000) while run_main: new_sock, address = server.accept() client_handler = ClientHandler() api.spawn(client_handler.handle_client, JsonSocket(new_sock)) api.sleep(0)
def test_send_last(self): q = coros.queue() def waiter(q): timer = api.exc_after(0.1, api.TimeoutError) self.assertEquals(q.wait(), 'hi2') timer.cancel() api.spawn(waiter, q) api.sleep(0) api.sleep(0) q.send('hi2')
def test_send(self): event1 = Event() event2 = Event() spawn(event1.send, 'hello event1') eventlet.Timeout(0, ValueError('interrupted')) try: result = event1.wait() except ValueError: X = object() result = with_timeout(DELAY, event2.wait, timeout_value=X) assert result is X, 'Nobody sent anything to event2 yet it received %r' % (result, )
def test_start_and_stop(self): self.eq.cap = Capability('EventQueueGet', 'http://127.0.0.1') self.assertFalse(self.eq.stopped) eventlet.spawn(self.eq.start) eventlet.sleep(1) #self.eq.stop() #stop is broken atm self.eq.stopped = True eventlet.sleep(.1) self.assertTrue(self.eq.stopped) eventlet.sleep(1) self.assertFalse(self.eq._running)
def main(): try: server=api.tcp_listener(('0.0.0.0',3000)) print 'Server started!' while True: conn,addr=server.accept() #print 'client %s connected!'%repr(addr) writer=conn.makefile('w') api.spawn(httpd,writer,conn.makefile('r')) except KeyboardInterrupt: pass return
def test_013_empty_return(self): from eventlet import httpc def wsgi_app(environ, start_response): start_response("200 OK", []) return [""] certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') sock = api.ssl_listener(('', 4202), certificate_file, private_key_file) api.spawn(wsgi.server, sock, wsgi_app) res = httpc.get("https://localhost:4202/foo") self.assertEquals(res, '')
def login(self, loginuri, firstname=None, lastname=None, password=None, login_params = None, start_location=None, handler=None, connect_region = True): """ login to a login endpoint using the Login() class """ if (re.search('auth.cgi$', loginuri)): self.grid_type = 'OGP' elif (re.search('login.cgi$', loginuri)): self.grid_type = 'Legacy' else: logger.warning('Unable to identify the loginuri schema. Stopping') sys.exit(-1) if firstname != None: self.firstname = firstname if lastname != None: self.lastname = lastname if password != None: self.password = password # handle either login params passed in, or, account info if login_params == None: if (self.firstname == '') or (self.lastname == '') or (self.password == ''): raise LoginError('Unable to login an unknown agent.') else: self._login_params = self._get_login_params(self.firstname, self.lastname, self.password) else: self._login_params = login_params # login and parse the response login = Login(settings = self.settings) self.login_response = login.login(loginuri, self._login_params, start_location, handler = handler) self._parse_login_response() # ToDo: what to do with self.login_response['look_at']? if self.settings.MULTIPLE_SIM_CONNECTIONS: eventlet.spawn(self._monitor_for_new_regions) if connect_region: self._enable_current_region() eventlet.spawn(self.agent_updater)
def test_bounded(self): # this was originally semaphore's doctest sem = coros.BoundedSemaphore(2, limit=3) self.assertEqual(sem.acquire(), True) self.assertEqual(sem.acquire(), True) api.spawn(sem.release) self.assertEqual(sem.acquire(), True) self.assertEqual(-3, sem.balance) sem.release() sem.release() sem.release() api.spawn(sem.acquire) sem.release() self.assertEqual(3, sem.balance)
def test_waiting(self): def do_wait(q, evt): result = q.wait() evt.send(result) q = coros.queue() e1 = coros.event() api.spawn(do_wait, q, e1) api.sleep(0) self.assertEquals(1, waiting(q)) q.send('hi') api.sleep(0) self.assertEquals(0, waiting(q)) self.assertEquals('hi', e1.wait()) self.assertEquals(0, waiting(q))
def test_012_ssl_server(self): from eventlet import httpc def wsgi_app(environ, start_response): start_response('200 OK', {}) return [environ['wsgi.input'].read()] certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') sock = api.ssl_listener(('', 4201), certificate_file, private_key_file) api.spawn(wsgi.server, sock, wsgi_app) result = httpc.post("https://localhost:4201/foo", "abc") self.assertEquals(result, 'abc')
def test_timeout_cancel(self): server = eventlet.listen(('0.0.0.0', 0)) bound_port = server.getsockname()[1] done = [False] def client_closer(sock): while True: (conn, addr) = sock.accept() conn.close() def go(): desc = eventlet.connect(('127.0.0.1', bound_port)) try: api.trampoline(desc, read=True, timeout=0.1) except api.TimeoutError: assert False, "Timed out" server.close() desc.close() done[0] = True greenthread.spawn_after_local(0, go) server_coro = api.spawn(client_closer, server) while not done[0]: api.sleep(0) api.kill(server_coro) check_hub()
def test_killing_dormant(self): DELAY = 0.1 state = [] def test(): try: state.append('start') api.sleep(DELAY) except: state.append('except') # catching GreenletExit pass # when switching to hub, hub makes itself the parent of this greenlet, # thus after the function's done, the control will go to the parent api.sleep(0) state.append('finished') g = api.spawn(test) api.sleep(DELAY / 2) self.assertEqual(state, ['start']) api.kill(g) # will not get there, unless switching is explicitly scheduled by kill self.assertEqual(state, ['start', 'except']) api.sleep(DELAY) self.assertEqual(state, ['start', 'except', 'finished'])
def test_blocks_on_pool(self): waiter = coros.queue(0) def greedy(): self.pool.get() self.pool.get() self.pool.get() self.pool.get() # No one should be waiting yet. self.assertEquals(self.pool.waiting(), 0) # The call to the next get will unschedule this routine. self.pool.get() # So this send should never be called. waiter.send('Failed!') killable = api.spawn(greedy) # no one should be waiting yet. self.assertEquals(self.pool.waiting(), 0) ## Wait for greedy api.sleep(0) ## Greedy should be blocking on the last get self.assertEquals(self.pool.waiting(), 1) ## Send will never be called, so balance should be 0. self.assertFalse(waiter.ready()) api.kill(killable)
def setUp(self): self.logfile = StringIO() self.victim = api.spawn(wsgi.server, api.tcp_listener(('0.0.0.0', 31337)), self.site_class(), log=self.logfile, max_size=128)
def test_close_with_makefile(self): def accept_close_early(listener): # verify that the makefile and the socket are truly independent # by closing the socket prior to using the made file try: conn, addr = listener.accept() fd = conn.makeGreenFile() conn.close() fd.write('hello\n') fd.close() self.assertRaises(socket.error, fd.write, 'a') self.assertRaises(socket.error, conn.send, 'b') finally: listener.close() def accept_close_late(listener): # verify that the makefile and the socket are truly independent # by closing the made file and then sending a character try: conn, addr = listener.accept() fd = conn.makeGreenFile() fd.write('hello') fd.close() conn.send('\n') conn.close() self.assertRaises(socket.error, fd.write, 'a') self.assertRaises(socket.error, conn.send, 'b') finally: listener.close() def did_it_work(server): client = api.connect_tcp(('127.0.0.1', server.getsockname()[1])) fd = client.makeGreenFile() client.close() assert fd.readline() == 'hello\n' assert fd.read() == '' fd.close() server = api.tcp_listener(('0.0.0.0', 0)) killer = api.spawn(accept_close_early, server) did_it_work(server) api.kill(killer) server = api.tcp_listener(('0.0.0.0', 0)) killer = api.spawn(accept_close_late, server) did_it_work(server) api.kill(killer)
def test_send_exc(self): log = [] e = Event() def waiter(): try: result = e.wait() log.append(('received', result)) except Exception as ex: log.append(('catched', ex)) spawn(waiter) sleep(0) # let waiter to block on e.wait() obj = Exception() e.send(exc=obj) sleep(0) sleep(0) assert log == [('catched', obj)], log