def _poll_handshake_file(self): """Check if a client has created a handshake file. If so, parse the file and store the session key""" # check for file with valid name with self.xmit_lock: files = list_files(self.ftps) for f in files: s_id, direction, seq = parse_tunnel_filename(f[0]) if s_id is not None and direction == 0 and seq == 0: # check if contents of file is encrypted with this proxy's public key data = get_file_contents(self.ftps, f[0]) try: # attempt to decrypt the received message unseal_box = SealedBox(self.private_key) self.session_key = unseal_box.decrypt(data) self.session_id = s_id return except Exception as e: # file contents not encrypted with proxy's key continue # update heartbeat if needed if self.heart + PROXY_HEARTBEAT_TIMEOUT < time.time(): upload_binary_data(self.ftps, self.my_proxy_id, generate_proxy_descriptor(self.public_key)) self.heart = time.time()
def _poll_ack_file(self): """Check if a client has created a handshake file. If so, parse the file and store the session key""" # check for file with valid name with self.xmit_lock: ftps = self.ftps files = list_files(ftps) for f in files: if self._is_ack_file(ftps, f[0]): return True return False
def client_req_handler(cs, addr, d): cur_dir = d client_address = str(addr) print("Client connected from: %s" % client_address) bufsize = 1024 payloads = [] while True: try: payload = su.get_message( cs, bufsize) # this recv gets a string message = command # since payload is a command it needs to be decoded payload = payload.decode('utf-8') except (EOFError, ConnectionError, UnicodeDecodeError): #handle_disconnect(cs, addr) break #decode message contents = re.split(' ', payload) print(client_address, ' request: ', contents) if (len(contents) > 1): # either cd, upload, download if contents[0] == 'upload': fBytes = cs.recv(int(contents[2])) su.save_file(cur_dir, contents[1], fBytes) elif contents[0] == 'download': abs_file_name = cur_dir + '/' + contents[1] if (os.path.exists(abs_file_name)): file_size = str(os.path.getsize(abs_file_name)) file_to_tranfer = su.read_file(abs_file_name) message = 'download ' + contents[1] + ' ' + file_size print('\t', message) su.send_msg(cs, message) su.send_file(cs, file_to_tranfer) else: message = 'FileNotFound' su.send_msg(cs, message) elif contents[0] == 'cd': try: os.chdir(contents[1]) cur_dir = os.getcwd() su.send_msg(cs, 'Directory changed.') except FileNotFoundError: su.send_msg(cs, 'Directory not found') else: # list and close if contents[0] == 'list': lst = str(su.list_files(cur_dir)) su.send_msg(cs, lst) elif contents[0] == 'close': break cs.close()
def recv(self): """Receive data from the data channel and push it to the receive queue""" while self._am_i_alive(): try: # check if there is a valid tunnel file to read data = None with self.xmit_lock: ftps = self.ftps # check files files = list_files(ftps) for f in files: if self._is_next_inbound_packet(f[0]): # get file contents self.incoming_seq += 1 data = get_file_contents(ftps, f[0]) if data is None: raise ValueError( "get_file_contents returned None. Check debugging output." ) # delete file delete_file(ftps, f[0]) break # write received data to recvbuf if data is not None: # decrypt data with session key d = self._decrypt_data(data) self.recvbuf.put(d) elif time.time() > self.heartbeat + PROXY_HEARTBEAT_TIMEOUT: # channel has timed out, check if proxy has updated its heartbeat with self.xmit_lock: key, self.heartbeat = parse_proxy_descriptor( get_file_contents(self.ftps, str(self.proxy_id))) if self.heartbeat is not None: if time.time( ) > self.heartbeat + PROXY_HEARTBEAT_TIMEOUT: raise ValueError("channel has timed out") else: raise ValueError("channel has timed out") except Exception as e: self._kill_self() logging.error("recv thread exception: {}".format(e)) logging.info("recv thread exit")
def recv(self): """"Receive data from the data channel and push it to the receive queue""" while self._am_i_alive(): try: # check if there is a valid tunnel file to read data = None with self.xmit_lock: files = list_files(self.ftps) for f in files: if self._is_next_inbound_packet(f[0]): # get file contents self.incoming_seq += 1 data = get_file_contents(self.ftps, f[0]) if data is None: raise ValueError("get_file_contents returned None. Check debugging output.") # delete file delete_file(self.ftps, f[0]) break # write received data to recvbuf if data is not None: d = self._decrypt_data(data) sys.stdout.flush() self.recvbuf.put(d) self.last_recv = time.time() # update heartbeat if needed elif self.heart + PROXY_HEARTBEAT_TIMEOUT < time.time(): with self.xmit_lock: upload_binary_data(self.ftps, self.my_proxy_id, generate_proxy_descriptor(self.public_key)) self.heart = time.time() # check if connection has timed out if self.last_recv + CLIENT_TIMEOUT < time.time(): raise ValueError("client has timed out") except Exception as e: self._kill_self() logging.error("recv thread exception: {}".format(e)) logging.info("recv thread exit")
def __init__(self, server_addr, username, password, use_plain, tunnel_dir, pub_file=None, priv_file=None): # basic set up print("Proxy started") sys.stdout.flush() super().__init__() self.addr = server_addr self.username = username self.password = password self.use_plain = use_plain self.tunnel_dir = tunnel_dir self.heart = -1 self.my_proxy_id = None self.session_key = None self.session_box = None self.public_key = None self.private_key = None self.session_id = None self.last_recv = -1 self.outgoing_seq = 0 self.incoming_seq = 0 self.alive = True self.alive_lock = threading.Lock() self.ftp_account_lock = threading.Lock() # avoid simultaneous logins with same credentials self.xmit_lock = threading.Lock() # avoid simultaneous logins with same credentials # process keyfile if pub_file is not None: if priv_file is not None: # TODO raise NotImplementedError("specifying public/private key used by proxy is not yet supported") else: logging.error("need both public and private key files") sys.exit(-1) elif priv_file is not None: logging.error("need both public and private key files") sys.exit(-1) # generate public/private key pair if no keys are specified if self.public_key is None and self.private_key is None: self.private_key = PrivateKey.generate() self.public_key = self.private_key.public_key # check for proxy descriptor files self.ftps = self._login_ftp() files = list_files(self.ftps) _min = 0 # all proxy descriptor filenames < 0 for f in files: if is_proxy_descriptor(f[0]) and int(f[0]) < _min: _min = int(f[0]) # create proxy descriptor file for this proxy self.my_proxy_id = _min - 1 upload_binary_data(self.ftps, self.my_proxy_id, generate_proxy_descriptor(self.public_key)) self.heart = time.time() # only have to support a single client per proxy, so just poll here until a client connects print("Awaiting a client connection...") sys.stdout.flush() while self.session_id is None: time.sleep(PROXY_POLL_FREQ) self._poll_handshake_file() # create ACK file self.incoming_seq += 1 upload_binary_data(self.ftps, str(self.session_id) + "_1_0", self._encrypt_data(b"ACK")) self.outgoing_seq += 1 ### Tunnel is set up, wait for client transmissions ### print("Tunnel setup (session ID: {})".format(self.session_id)) self.last_recv = time.time() sys.stdout.flush() self.start()
def __init__(self, server_addr, username, password, use_plain, tunnel_dir): """Set up tunnel when this class is instantiated""" # basic set up print("Client started") sys.stdout.flush() super().__init__() self.alive = True self.alive_lock = threading.Lock() self.ftp_account_lock = threading.Lock( ) # avoid simultaneous logins with same credentials self.xmit_lock = threading.Lock( ) # avoid simultaneous logins with same credentials self.addr = server_addr self.username = username self.password = password self.tunnel_dir = tunnel_dir self.proxy_id = None self.proxy_key = None self.session_key = None self.session_id = None self.outgoing_seq = 0 self.incoming_seq = 0 self.heartbeat = -1 self.use_plain = use_plain # check for proxy descriptor files, determine max session ID # TODO: COMEBACK AND FIX self.ftps = self._login_ftp() print("Searching for proxies...") sys.stdout.flush() files = list_files(self.ftps) max_id = -1 for f in files: # parse session ID from tunnel files (used to determine session ID for this client) s_id, _, _seq = parse_tunnel_filename(f[0]) if s_id is not None and s_id > max_id: max_id = s_id if is_proxy_descriptor(f[0]): # parse proxy descriptor file to see if its heartbeat is valid key, heartbeat = parse_proxy_descriptor( get_file_contents(self.ftps, f[0])) if key is None and heartbeat is None: continue # validate heartbeat if heartbeat + PROXY_HEARTBEAT_TIMEOUT < time.time(): continue # accept the first proxy we find (not great for load balancing, but it works for now) self.proxy_id = int(f[0]) self.proxy_key = key # error if no proxies are available if self.proxy_id is None: logging.error("no valid proxies in tunnel directory") sys.exit(-1) # start tunnel handshake by writing file with session key encrypted with proxy public key print("Suitable proxy found") sys.stdout.flush() self.session_id = str(max_id + 1) self._generate_session_key() data = self._generate_handshake_file() filename = self.session_id + '_0_0' upload_binary_data(self.ftps, filename, data) self.outgoing_seq += 1 # wait for proxy's ACK message start = time.time() print("Waiting for proxy ACK...") sys.stdout.flush() while not self._poll_ack_file(): # check if proxy has timed out if time.time() > PROXY_HEARTBEAT_TIMEOUT + start: logging.error("proxy timed out before completing handshake") sys.exit(-1) time.sleep(CLIENT_POLL_FREQ) self.incoming_seq += 1 # client is the last component waiting for an ACK in the handshake # it should perform clean up of channel init files (with the exception of the proxy descriptor) with self.xmit_lock: ftps = self.ftps delete_file(ftps, filename) # <session ID>_0_0 delete_file(ftps, self.session_id + "_1_0") # <session ID>_1_0 ### Tunnel is set up ### print("Tunnel set up (proxy id: {})".format(self.proxy_id)) sys.stdout.flush() self.heartbeat = time.time() self.start()