def handle_client(reader, writer): """ Get upstream text from an outside client """ cno = len(conns) if cno >= cno_limit: sys.exit("Client reached connection limit ({})".format(cno_limit)) conns.append(writer) logger.info("new client: cno {}".format(cno)) while True: yield from asyncio.sleep(0.00000001) try: buf = yield from reader.read(upcellmax) except OSError: yield from close_queue.put(cno) return data = m.pack(m.CLIENT_UPSTREAM, cno=cno + cno_offset, data=buf) logger.debug("client upstream: {} bytes on cno {}".format( len(buf), cno)) yield from upstream_queue.put(data) if len(buf) == 0: yield from close_queue.put(cno) return
def open_relay(errc, relay_addr, node_id, agroup, aport): """ Open a connection to the relay and start processing. errc (bool): Whether to do error correction relay_addr (host (str), port (int)): The relay's address node_id (int): This access point's index agroup (str): The multicast address aport (int): The multicast port """ loop = asyncio.get_event_loop() try: relay_reader, relay_writer = \ yield from asyncio.open_connection(*relay_addr) except Exception as e: logger.critical("Unable to connect to relay {}: {}" .format(relay_addr, e)) asyncio.get_event_loop().stop() return node = m.pack(m.AP_CONNECT, node=node_id) relay_writer.write(node) try: mwriter = MulticastWriter(loop, agroup, aport) except OSError as e: logger.critical("Failed to creat multicast writer on {}:{}: {}" .format(agroup, aport, e)) asyncio.get_event_loop().stop() return asyncio.async(handle_downstream(errc, relay_reader, mwriter))
def handle_downstream(errc, reader, mwriter): """ Forward relay traffic to clients, possibly over multicast errc (bool): Whether to do error correction reader (asyncio.StreamReader): Reader for the relay mwriter (net.MulticastWriter): Writer for multicast (None for no multicast) """ mid = 0 while True: yield from asyncio.sleep(0.00000001) try: down = yield from m.read_stream(reader) except Exception as e: logger.critical("Could not read from relay: {}".format(e)) asyncio.get_event_loop().stop() return # Wrap the relay's message rkind = down.pop('kind') rdwn = m.pack(rkind, **down) mid += 1 dbuf = m.pack(m.AP_DOWNSTREAM, mid=mid, data=rdwn) if mwriter != None: # Broadcast the message mwriter.write(dbuf) try: yield from mwriter.drain() except Exception as e: logger.error("Failed to write multicast: {}".format(e)) return if not errc: continue # Store the data for possible resending yield from down_set_lock try: down_set[mid] = ({x for x in conns.keys()}, dbuf) finally: down_set_lock.release() else: yield from clients_lock # Lock here because the entire key set has to be static for # iteration to work try: for conn in conns: yield from conn.write(dbuf) finally: clients_lock.release()
def main(): p = argparse.ArgumentParser(description="Basic DC-net trustee") p.add_argument("config_dir") p.add_argument("private_data") p.add_argument("-v", type=str, help="display more output (default: WARN)", choices=verbosity.keys(), default="WARN", dest="verbose") opts = p.parse_args() logger.setLevel(verbosity[opts.verbose]) # XXX error handling system_config = config.load(config.SystemConfig, os.path.join(opts.config_dir, "system.json")) session_config = config.load(config.SessionConfig, os.path.join(opts.config_dir, "session.json")) private = config.load(config.Private, opts.private_data) try: node_id = system_config.trustees.ids.index(private.id) except ValueError: sys.exit("Trustee is not in system config") node = m.pack(m.TRUSTEE_CONNECT, node=node_id) trustee = dcnet.Trustee(private.secret, system_config.clients.keys) trustee.add_nyms(session_config.clients.keys) trustee.sync(None, None) # connect to the relay try: conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.connect((system_config.relay.host, system_config.relay.port)) conn.send(node) except OSError as e: sys.exit("Could not connect to relay: {}".format(e)) # stream the ciphertext to the relay nsize = m.sizes[m.RELAY_TNEXT] tnxts = {} tnxt = {} try: while True: buf = conn.recv(nsize, socket.MSG_WAITALL) try: nxt = tnxts[buf] except KeyError: m.unpack(buf, tnxt) nxt = tnxt['nxt'] tnxts[buf] = nxt ciphertext = trustee.produce_ciphertext(nxt) n = conn.send(ciphertext) except KeyboardInterrupt: pass except Exception as e: logger.error("Could not read from relay: {}".format(e)) conn.close()
def process_acks(writer, timeout): """ Periodically ping if nothing has been received from the access point. writer: asyncio.StreamWriter to the access point timeout: seconds to wait before resending an ack """ while True: old = max_mid yield from asyncio.sleep(timeout) if old == max_mid and old > 0: writer.write(m.pack(m.CLIENT_ACK, mid=max_mid)) logger.warn("Acked {} after timeout".format(max_mid))
def read_ap(reader, writer): """ Read messages from a StreamReader and sort them for processing """ rdwn = {} while True: yield from asyncio.sleep(0.00000001) try: down = yield from m.read_stream(reader) except Exception as e: logger.critical("{} Could not read from access point: {}".format( cno_offset, e)) asyncio.get_event_loop().stop() return m.unpack(down['data'], rdwn) if writer == None: yield from downstream_queues[rdwn['kind']].put(rdwn) continue global max_mid yield from max_mid_lock try: if down['mid'] == max_mid + 1: # If this was the next expected message, flush extras yield from downstream_queues[rdwn['kind']].put(rdwn) max_mid = down['mid'] while max_mid + 1 in extras: max_mid += 1 rdwn = extras.pop(max_mid) yield from downstream_queues[rdwn['kind']].put(rdwn) if down['mid'] != max_mid: # If we filled in blanks, ack writer.write(m.pack(m.CLIENT_ACK, mid=max_mid)) yield from writer.drain() else: # If this was unexpected/out of order, save it in extras and # retransmit the last ack extras[down['mid']] = rdwn writer.write(m.pack(m.CLIENT_ACK, mid=max_mid)) yield from writer.drain() finally: max_mid_lock.release()
def open_relay(client, host, rport, ap_id, node_id): """ Open a connection to the relay and start listening for downstream client (dcnet.Client): The underlying Client object host (str): The IP address of the relay rport (int): The port number of the relay ap_id (int): The index of the access point this client is connected to (-1 if none) node_id (int): This client's index """ try: relay_reader, relay_writer = \ yield from asyncio.open_connection(host, rport) except Exception as e: logger.critical("Unable to connect to relay on {}:{} - {}".format( host, rport, e)) asyncio.get_event_loop().stop() return node = m.pack(m.CLIENT_CONNECT, node=node_id, ap=ap_id) relay_writer.write(node) yield from relay_writer.drain() asyncio. async (process_downstream(relay_writer, client, close_queue)) if ap_id == -1: asyncio. async (read_relay(relay_reader))
def main_loop(relay, tsocks, crsocks, cwsocks, upstreams, downstream, scheduler): """ Handle ciphertext from trustees/clients, forwarding to clients, and decoding messages from clients. relay (dcnet.Relay): The underlying dcnet Relay tsocks (socket list): List of sockets to trustees crsocks (socket list): List of connections to read from clients cwsocks (socket list): List of connections to write downstream traffic to (a combination of elements of crsocks and sockets to access points) upstreams (int -> asyncio.Queue()): dict of queues for upstream messages on active connections, with connection numbers as keys downstream (asyncio.Queue()): queue of messages to send downstream to clients scheduler (int generator): function to generate the next client index """ loop = asyncio.get_event_loop() # branch off two schedulers so trustees can get out ahead # XXX tee() can use a lot of memory if one copy gets too far ahead. This # shouldn't be a problem here, but is worth noting client_scheduler, trustee_scheduler = itertools.tee(scheduler) client_window = 2 client_inflight = 0 trustee_window = 10 trustee_inflight = 0 up = {'cno': 0} tmsgs = dict( (i, m.pack(m.RELAY_TNEXT, nxt=i)) for i in range(len(crsocks))) while True: yield from asyncio.sleep(0.00000001) # request future cell from trustees try: nxt = next(trustee_scheduler) except StopIteration: sys.exit("Scheduler stopped short") for tsock in tsocks: yield from loop.sock_sendall(tsock, tmsgs[nxt]) trustee_inflight += 1 if trustee_inflight < trustee_window: continue # see if there's anything to send down to clients try: cno, downbuf = downstream.get_nowait() except asyncio.QueueEmpty: cno, downbuf = 0, bytearray(0) logger.info("downstream to clients: {} bytes on cno {}".format( len(downbuf), cno)) if (cno > 0) else None # send downstream to all clients try: nxt = next(client_scheduler) except StopIteration: sys.exit("Scheduler stopped short") dbuf = m.pack(m.RELAY_DOWNSTREAM, cno=cno, nxt=nxt, data=downbuf) for csock in cwsocks: yield from loop.sock_sendall(csock, dbuf) client_inflight += 1 if client_inflight < client_window: continue # get trustee ciphertexts relay.decode_start() for tsock in tsocks: tslice = yield from loop.sock_recv(tsock, dcnet.cell_length) while len(tslice) < dcnet.cell_length: tslice += yield from loop.sock_recv( tsock, dcnet.cell_length - len(tslice)) relay.decode_trustee(tslice) # and client upstream ciphertexts for csock in crsocks: cslice = yield from loop.sock_recv(csock, dcnet.cell_length) while len(cslice) < dcnet.cell_length: cslice += yield from loop.sock_recv( csock, dcnet.cell_length - len(cslice)) relay.decode_client(cslice) # decode the actual upstream outb = relay.decode_cell() m.unpack(outb, up) client_inflight -= 1 trustee_inflight -= 1 # Possibly set up asynchronous sending upstream if up['cno'] == 0: continue else: cno = up['cno'] up['cno'] = 0 conn = upstreams.get(cno) if conn == None: # new connection to local socks server upstream = asyncio.Queue() socks_reader, socks_writer = \ yield from asyncio.open_connection(*socks_address) asyncio. async (socks_relay_down(cno, socks_reader, socks_writer, downstream)) asyncio. async (socks_relay_up(cno, socks_reader, socks_writer, upstream)) upstreams[cno] = upstream yield from upstreams[cno].put(up['data'])