def server_proc(coro=None): # to illustrate 'transform' function of channel, messages are modified def txfm_msgs(name, msg_cid): msg, client_id = msg_cid # assert name == 'channel' # e.g., drop shoutings if msg.isupper(): return None if msg == 'joined': msg += ' :-)' elif msg == 'bye': msg = 'left :-(' else: msg = 'says: %s' % msg return (msg, client_id) channel = asyncoro.Channel('chat_channel', transform=txfm_msgs) channel.register() coro.set_daemon() coro.register('chat_server') client_id = 1 while True: cmd, who = yield coro.receive() # join/quit messages can be sent by clients themselves, but # for illustration server sends them instead if cmd == 'join': channel.send(('joined', client_id)) who.send(client_id) client_id += 1 elif cmd == 'quit': channel.send(('bye', who)) elif cmd == 'terminate': break channel.unregister() coro.unregister()
def client_proc(computation, coro=None): # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be # created before computation is scheduled (next step below) rcoro_scheduler = RemoteCoroScheduler(computation) # in discoro_client6.py, data is sent to each remote coroutine; here, data # is broadcast over channel and remote coroutines subscribe to it data_channel = asyncoro.Channel('data_channel') # not necessary to register channel in this case, as it is sent to remote # coroutines; if they were to 'locate' it, it should be registered # data_channel.register() trend_coro = asyncoro.Coro(trend_proc) rcoro_avg = yield rcoro_scheduler.schedule(rcoro_avg_proc, data_channel, 0.4, trend_coro, 10) assert isinstance(rcoro_avg, asyncoro.Coro) rcoro_save = yield rcoro_scheduler.schedule(rcoro_save_proc, data_channel) assert isinstance(rcoro_save, asyncoro.Coro) # make sure both remote coroutines have subscribed to channel ('deliver' # should return 2 if they both are) assert (yield data_channel.deliver('start', n=2)) == 2 # if data is sent frequently (say, many times a second), enable # streaming data to remote peer; this is more efficient as # connections are kept open (so the cost of opening and closing # connections is avoided), but keeping too many connections open # consumes system resources yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location, stream_send=True) yield asyncoro.AsynCoro.instance().peer(rcoro_save.location, stream_send=True) # send 1000 items of random data to remote coroutines for i in range(1000): n = random.uniform(-1, 1) item = (i, n) # data can be sent to remote coroutines either with 'send' or # 'deliver'; 'send' is more efficient but no guarantee data # has been sent successfully whereas 'deliver' indicates # errors right away data_channel.send(item) yield coro.sleep(0.02) item = (i, None) data_channel.send(item) yield rcoro_scheduler.finish(close=True) data_channel.close()
import asyncoro.disasyncoro as asyncoro def receiver_proc(coro=None): coro.set_daemon() # until subscribed, 'deliver' in client will block yield coro.sleep(10) yield channel.subscribe(coro) while True: msg = yield coro.receive() if msg: print('Received "%s" from %s at %s' % \ (msg['msg'], msg['sender'].name, msg['sender'].location)) asyncoro.logger.setLevel(asyncoro.Logger.DEBUG) # scheduler = asyncoro.AsynCoro() channel = asyncoro.Channel('2clients') # register channel so client can get a reference to it channel.register() asyncoro.Coro(receiver_proc) if sys.version_info.major > 2: read_input = input else: read_input = raw_input while True: try: cmd = read_input() if cmd.strip().lower() in ('quit', 'exit'): break except: break