def dataReceived(self, data): """ Add the data to the pending queue """ QE2LOG.debug('Qe2ServerTop.dataReceived(): %d bytes', len(data)) self.factory.quilt_server.pending_out.enq(data)
def prune_remote_holes(self): """ Remove any holes that were reported by the opposite endpoint that have already filled (according to ack_recvs reported by that endpoint). These holes might have been filled by messages in-flight. """ remaining_holes = set() for (hole_start, hole_end) in sorted(self.remote_holes): # If the hole_end is after the remote_ack_recv, then # this hole has an unfilled component (from what we can # tell) # if hole_end > self.remote_ack_recv: # If the start of the hole is prior to self.remote_ack_recv, # adjust the hole_start so we don't re-send data that # the opposite endpoint already has. # if hole_start <= self.remote_ack_recv: hole_start = self.remote_ack_recv + 1 remaining_holes.add((hole_start, hole_end)) self.remote_holes = self.prune_prefix_holes(remaining_holes) if self.remote_holes: QE2LOG.debug('REMOTE HOLES: %s', str(self.remote_holes)) return self.remote_holes
def connectionLost(self, reason=None): """ twisted connectionLost """ QE2LOG.debug('Qe2ChannelWorker: connectionLost') self.factory.qe2chan.disconnect()
def dataReceived(self, data): """ We have received data from the app; queue it to be sent to the channels. """ QE2LOG.debug('Qe2ClientTop.dataReceived: %d bytes', len(data)) self.factory.endpoint.pending_out.enq(data)
def nudger(): man.update() QE2LOG.info('running %d starting %d latent %d', len(man.running), len(man.starting), len(man.latent)) QE2LOG.debug(str(man)) if len(man.running) == 0: QE2LOG.warn('no channels running: starting latent channel') man.start_one()
def connectionMade(self): QE2LOG.debug('Qe2ServerTop.connectionMade()') self.factory.quilt_server.top = self self.app_connected = True server = None transport = None server = self.factory.quilt_server transport = self.transport
def do_connect_failed(self, reason): """ If we haven't already tried to reconnect too many times, wait a brief moment and try again """ if self._connection_attempts < self._max_connection_attempts: self._connection_attempts += 1 QE2LOG.debug('TCP4SocksClientEndpoint.do_connect_failed: ' + 'trying again') self._reactor.callLater(0.5, self.do_connect) else: QE2LOG.warn('TCP4SocksClientEndpoint.do_connect_failed: giving up')
def request_hole_fill(self): """ If we're stuck on a hole, make a request for it to be filled. Returns None if we're not stuck. """ holes = self.pending_in.find_holes() # QE2LOG.info('my find_holes(): %s', str(holes)) if not holes: return None if len(holes) == 1: if self.remote_ack_send > holes[0][0]: hole_start = holes[0][0] hole_end = self.remote_ack_send else: return None else: (hole_start, hole_end) = holes[0] """ QE2LOG.warn('RAW HOLES: %s', str( self.prune_local_holes() if not self.local_holes: return None QE2LOG.info('MY HOLES %s', str(sorted(self.local_holes))) (hole_start, hole_end) = sorted(self.local_holes)[0] # self.local_holes.discard((hole_start, hole_end)) """ QE2LOG.debug('request_hole_fill: need fill for [%d %d]', hole_start, hole_end) msg = Qe2HoleMsg(self.uuid, (hole_start, 1 + hole_end - hole_start), 0, 0, ack_send=self.ack_send, ack_recv=self.ack_recv) return msg
def __init__(self, quilt_uuid): """ TODO: this app_port is fictious """ # print 'Qe2Server.__init__()' # Error checking: make sure that local_port is sane super(Qe2Server, self).__init__() app_host = Qe2Params.get('SERVER_APP_HOST') app_port = Qe2Params.get('SERVER_APP_PORT') # self.uuid is the uuid created for this quilt by the client # for this quilt # self.uuid = quilt_uuid # self.local_uuid is used to answer back to OP_CHAN messages; # it identifies this endpoint uniquely, so if the quilt-server # crashes and reboots, or some similar disruption, the client # will know that the original endpoint has been lost # self.local_uuid = uuid.uuid4().bytes self.transport = None # Needs to be initialized later # We need to create a connection to the server app # which will serve as our local top self.top = None self.top_factory = Factory() self.top_factory.protocol = Qe2ServerTop self.top_factory.quilt_server = self QE2LOG.debug('Qe2ServerTop I AM CONNECTING TO (%s:%d)', str(app_host), app_port) endpoint = endpoints.TCP4ClientEndpoint(reactor, app_host, app_port, timeout=1) endpoint.connect(self.top_factory)
def connectionLost(self, reason=None): """ The app has closed its connection with us. We need to shut down, but we need to do it in a reasonable way. It might make sense to leave the channels open for a few moments, and pass some chaff through, or it might make sense to shut them down all together (i.e., if the browser exited, or the user closed a page in the browser) """ QE2LOG.debug('Qe2ClientTop.connectionLost') QE2LOG.warn('SHUTTING DOWN QUILT') self.app_connected = False # Try to kill any lingering channels and their subprocesses # if self.factory.endpoint.chanman: self.factory.endpoint.chanman.stop_all()
def fill_remote_hole(self): """ Select the "oldest" hole from remote_holes, and create a Qe2DataMsg to fill it. Return None if there is no hole to fill. """ self.prune_remote_holes() if not self.remote_holes: return None (hole_start, hole_end) = sorted(self.remote_holes)[0] # If the hole is too large to stuff into one message, # put as much as we can # if (1 + hole_end - hole_start) > Qe2Msg.MAX_PAYLOAD_LEN: hole_end = hole_start + Qe2Msg.MAX_PAYLOAD_LEN - 1 QE2LOG.debug('fill_remote_hole: filling [%d %d]', hole_start, hole_end) try: filler = self.pending_out.peek(hole_start, hole_end + 1) if filler: # print 'fill_remote_hole: filler is [%s]' % filler hole_msg = Qe2DataMsg(self.uuid, filler, 0, send_offset=hole_start) return hole_msg else: QE2LOG.warn('fill_remote_hole: filler [%d, %d] not found', hole_start, hole_end) return None except BaseException, exc: QE2LOG.warn('fill_remote_hole: exception [%s]', str(exc)) return None
def prune_local_holes(self): """ Remove any local holes that end before the current offset and truncate any that are no longer completely "missing". The coalesce any holes that start at the same offset. """ (first_offset, first_len) = self.first_missing() new_set = set() for hole in self.local_holes: if hole[0] >= first_offset: new_set.add(hole) elif (hole[0] < first_offset) and (hole[1] >= first_offset): new_set.add((first_offset, hole[1])) new_set = self.prune_prefix_holes(new_set) if new_set: QE2LOG.debug('LOCAL HOLES: %s', str(sorted(new_set))) self.local_holes = new_set
def pending_to_app(self): """ If there is pending data that is ready to forward to the top/app, then send some of it along (or all of it, if there's not much). Note that we can end up in livelock if we just send everything through as quickly as possible. For this reason, we only send a fixed amount through at a time. TODO: we need a real throttling mechanism. """ if ((not self.top) or (not self.top.app_connected)): QE2LOG.info('Qe2Endpoint.pending_to_app: app not connected') return ready_len = self.pending_in.data_ready() if ready_len > 0: # FIXME: this is a mistake if there's too much ready_len # because we could blow out the transport if it can't # keep up. What we need to do is bite off what we can # and have a looping call that takes the rest. # data = self.pending_in.dequeue(ready_len) QE2LOG.debug('Qe2Channel.pending_to_app: data %d', len(data)) # Now that we've delivered this data, we won't ask for # it again # self.ack_recv += len(data) self.top.transport.write(data) else: # print 'Qe2Channel.pending_to_app: NO data' (hole_base, hole_len) = self.pending_in.first_missing() if hole_len != -1: QE2LOG.debug('NEED TO FILL HOLE [base %d len %d]', hole_base, hole_len) self.add_local_hole(hole_base, hole_base + hole_len - 1) else: # print 'pending_to_app: not missing anything' pass QE2LOG.debug('RECV THROUGH %d', self.ack_recv)
def pusher(self): """ Example of a push worker, invoked by a looping call, to push data from the pending_out to the channel server. This is the heart of a channel, and where the customization for each channel takes place. This is a very simple pusher method that can be invoked by a LoopingCall. It is fully functional in a basic way, but is intended to be overridden in subclasses. The basic mechanism is to dequeue any pending data from pending_out, and send it in a data message. If there is no pending data, it optionally can send a chaff message, as this example does. A pusher can send any combination of data and chaff necessary to shape the traffic as requested. The pusher can also service hole requests (requests from the opposite endpoint of the channel for missing data), or send hole requests if any holes have been detected locally. The general form of a simple pusher is: 1. Return immediately if the channel is not available. (If we're not connected yet, then don't try to send any data) 2. If it is time for this channel to close (because it has been open too long, or has sent the desired amount of traffic, or some other heuristic for deciding when a channel should be closed) then close it and return. 3. Do some combination of the following: a) Dequeue some data from pending_out. Note the current offset. Make a data message from the queue. Note that when you dequeue data from pending_out, you MUST also update two fields in the endpoint in order to mark that the data is in the process of being sent: self.endpoint.next_offset - the next offset to dequeue self.endpoint.ack_send - the highest offset of any data sent to the opposite endpoint so far Right now, ack_send is always next_offset - 1, and it's possible that they will be combined in the future unless we want to get fancy with out-of-order messages. It is typical to avoid asking for more data than can fit in one message, but this is not required. Multiple data messages can be composed in a single push. b) If there is no data, or not enough data to satisfy the traffic shaping requirements (if any) then pad the data message with chaff and/or create one or more chaff messages. c) Create a hole fill request message d) Create a data message that satisfies a hole request As each message is created, it is added to a list. 4. Pack each Qe2Msg in the message list, and place the result in a buffer, and write the buffer to the channel's transport. Note that we delay packing the message list until the end so we can get the most up-to-date metadata. If sending more than one Qe2Msg, it can be important to serialize them into a single write to avoid potential synchronization issues. Doing one large write is more efficient than several of small writes, unless small writes are required for the channel traffic shaping (in which case you should have dequeued less data in step #2a, or rewritten the pusher in order to write less data in each invocation). """ now = time.time() if self.last_hole_response_time == 0: self.last_hole_response_time = now if self.last_hole_request_time == 0: self.last_hole_request_time = now # We'll always send messages with a data length of msg_data_size, # even if we don't have any data (and need to fill it with chaff) # msg_data_size = self.max_msg_size if self.should_close(): self.disconnect() return # If we're not connected yet, then we can't push data # anywhere. Quietly return. # if not self.is_connected(): # print 'CHANNEL NOT CONNECTED YET' return endpoint = self.endpoint # msgs_to_send is the list of message instances that # we want to send. We don't pack these messages # until we're ready to send them. # msgs_to_send = list() data_msg = self.create_data_msg(0, msg_data_size) if data_msg: msgs_to_send.append(data_msg) # Do we want to send more chaff? # This is a fine place to add chaff. # If there's a remote hole to fill, or we're already in # the midst of filling a remote hole, add fill msgs to the list # # We should prefer to send fill data instead of new data because # sending new data when there are already holes can create # or extend holes, causing more requests. # """ This doesn't seem to work yet if not self.remote_fill_msgs: hole = endpoint.next_remote_hole() if hole and (not hole in self.remote_holes_filled): self.remote_holes_filled.add(hole) self.remote_fill_msgs = endpoint.create_hole_msg_list( hole[0], hole[1], msg_data_size) if self.remote_fill_msgs: fill_res = self.remote_fill_msgs.pop() msgs_to_send.append(fill_res) """ if (now - self.last_hole_response_time) > self.hole_response_interval: fill_res = endpoint.fill_remote_hole() if fill_res: msgs_to_send.append(fill_res) self.last_hole_response_time = now QE2LOG.info('responding to a hole fill') # Are there any holes that make us stuck, waiting to be filled? # Ask the remote endpoint to fill them. # # Heuristic: don't ask for a hole request until at least # self.hole_request_interval seconds have elapsed since the # last request. Give the channel a chance to fill the hole # before re-requesting # if (now - self.last_hole_request_time) > self.hole_request_interval: fill_req = endpoint.request_hole_fill() if fill_req: # print 'PENDING SEGMENTS: %s' % str(endpoint.pending_in.segments) msgs_to_send.append(fill_req) self.last_hole_request_time = now QE2LOG.info('requesting a hole fill') # Do we want to send a ping? # A ping will tell the endpoint our state. # ping_msg = self.create_ping_msg(msgs_to_send) if ping_msg: # print '>>>>>>>>>>> SENDING PING' msgs_to_send.append(ping_msg) # If we created any messages, then pack them. If the result # is a non-empty string of packed data, then write it to the # transport. # packed = '' for msg in msgs_to_send: msg.ack_send = endpoint.ack_send msg.ack_recv = endpoint.ack_recv QE2LOG.debug('sending %s', str(msg)) packed += msg.pack() if packed: # If we're sending anything, then update self.next_ping_time # self.next_ping_time = time.time() + self.max_idle_time transport = self.get_transport() transport.write(packed) QE2LOG.debug('ack_send is %d', endpoint.ack_send)
def __init__(self): QE2LOG.debug('Qe2ServerTop.__init__()')
def process_msgs(self, msgs): """ Process messages received by a channel """ max_offset = -1 delivered = False old_remote_ack_recv = self.remote_ack_recv for msg in msgs: QE2LOG.debug('RECEIVE MSG %s', str(msg)) # update max_offset and remote_ack_recv, no # matter what the message type is # if max_offset < msg.ack_send: max_offset = msg.ack_send if self.remote_ack_recv < msg.ack_recv: self.remote_ack_recv = msg.ack_recv if self.remote_ack_send < msg.ack_send: self.remote_ack_send = msg.ack_send if msg.opcode == Qe2Msg.OP_DATA: if len(msg.data) > 0: self.deliver(msg.send_offset, msg.data) delivered = True elif msg.opcode == Qe2Msg.OP_PING: pass elif msg.opcode == Qe2Msg.OP_HOLE: if self.remote_ack_recv > msg.hole[0]: QE2LOG.info('UNEXPECTED hole start before remote_ack_recv') self.add_remote_hole(msg.hole[0], msg.hole[0] + msg.hole[1] - 1) elif msg.opcode == Qe2Msg.OP_CHAN: QE2LOG.info('got OP_CHAN message') pass elif msg.opcode == Qe2Msg.OP_HALT: # TODO: this should close down the quilt (not just # one particular channel) # QE2LOG.info('got OP_HALT message') elif msg.opcode == Qe2Msg.OP_INIT: self.handle_init(msg) else: QE2LOG.error('UNHANDLED msg %d', msg.opcode) # If this sequence of msgs included delivered data, then # see if there's anything to push to the app # if delivered: self.pending_to_app() if max_offset > self.ack_recv: QE2LOG.info('max_offset %d > self.ack_rev %d: something missing', max_offset, self.ack_recv) self.add_local_hole(self.ack_recv + 1, max_offset) QE2LOG.debug('LOCAL holes %s', str(sorted(self.local_holes))) # We've gotten acknowledgment from the remote endpoint # for additional data. Throw away our old copy. # # TODO: it is much more efficient to delete larger chunks # periodically rather than small chunks constantly. # if old_remote_ack_recv < self.remote_ack_recv: self.pending_out.discard(self.remote_ack_recv - old_remote_ack_recv)
def dataReceived(self, data): QE2LOG.debug('dataReceived marker %d', self.marker) self.recv_buf += data (msgs, self.recv_buf) = Qe2Msg.recv(self.recv_buf) if not msgs: return # The first message on a channel MUST be an OP_CHAN message # # NOTE: in a full-featured channel, this message will describe the # channel and include parameters that the server should use, # but this is not implemented. We always use the same channel # parameters. if not self.uuid: first_msg = msgs[0] if first_msg.opcode != Qe2Msg.OP_CHAN: QE2LOG.warn('channel started/resumed without OP_CHAN msg') self.loseConnection() msgs = msgs[1:] self.uuid = first_msg.uuid listener = self.factory.server_listener if not (self.uuid in listener.uuid2server): QE2LOG.info('Creating Qe2Server for uuid %s', str(self.uuid).encode('hex')) listener.uuid2server[self.uuid] = Qe2Server(self.uuid) self.server = listener.uuid2server[self.uuid] # Register ourselves with our server endpoint # self.server.add_bottom(self) # We should get the parameters in the first OP_CHAN message # # TODO: this doesn't pay attention to the first message, but # instead makes assumptions about the parameters # if not self.channel: QE2LOG.debug('CREATING CHANNEL ON SERVER') self.channel = Qe2SocketServerChannel(self.transport, self.server) # Immediately tell the client the UUID we selected for our # local UUID, so it can tell if we crash or the connections # are redirected. # # TODO: it would be better if this message didn't need to # happen immediately (because this gives the channel a # fingerprint) but instead was based on the channel parameters. # # Since we're ignoring the channel paramters in this version, # we don't have much choice, but a smarter channel would wait. # resp_msg = Qe2Msg(Qe2Msg.OP_INIT, self.uuid, self.server.local_uuid, 0) self.transport.write(resp_msg.pack()) endpoint = self.server endpoint.process_msgs(msgs)