def process_group_management_message( ts: 'datetime', packet: bytes, header: bytes, messages_to_flask: 'Queue[Tuple[Union[bytes, str], bytes]]') -> None: """Parse and display group management message.""" header_str = header.decode() group_id, packet = separate_header(packet, GROUP_ID_LENGTH) if header in [GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER]: pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) for onion_pub_key in pub_keys: others = [k for k in pub_keys if k != onion_pub_key] packet_str = header_str + b85encode(group_id + b''.join(others)) queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header) elif header in [GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER]: first_list_len_b, packet = separate_header(packet, ENCODED_INTEGER_LENGTH) first_list_length = bytes_to_int(first_list_len_b) pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) before_adding = remaining = pub_keys[:first_list_length] new_in_group = removable = pub_keys[first_list_length:] if header == GROUP_MSG_MEMBER_ADD_HEADER: packet_str = GROUP_MSG_MEMBER_ADD_HEADER.decode() + b85encode( group_id + b''.join(new_in_group)) for onion_pub_key in before_adding: queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header) for onion_pub_key in new_in_group: other_new = [k for k in new_in_group if k != onion_pub_key] packet_str = ( GROUP_MSG_INVITE_HEADER.decode() + b85encode(group_id + b''.join(other_new + before_adding))) queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header) elif header == GROUP_MSG_MEMBER_REM_HEADER: packet_str = header_str + b85encode(group_id + b''.join(removable)) for onion_pub_key in remaining: queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header) elif header == GROUP_MSG_EXIT_GROUP_HEADER: pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) packet_str = header_str + b85encode(group_id) for onion_pub_key in pub_keys: queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header)
def client_scheduler(queues: 'QueueDict', gateway: 'Gateway', url_token_private_key: 'X448PrivateKey', unit_test: bool = False ) -> None: """Manage `client` processes.""" proc_dict = dict() # type: Dict[bytes, Process] # Wait for Tor port from `onion_service` process. while True: with ignored(EOFError, KeyboardInterrupt): while queues[TOR_DATA_QUEUE].qsize() == 0: time.sleep(0.1) tor_port, onion_addr_user = queues[TOR_DATA_QUEUE].get() break while True: with ignored(EOFError, KeyboardInterrupt): while queues[CONTACT_MGMT_QUEUE].qsize() == 0: time.sleep(0.1) command, ser_public_keys, is_existing_contact = queues[CONTACT_MGMT_QUEUE].get() onion_pub_keys = split_byte_string(ser_public_keys, ONION_SERVICE_PUBLIC_KEY_LENGTH) if command == RP_ADD_CONTACT_HEADER: add_new_client_process(gateway, is_existing_contact, onion_addr_user, onion_pub_keys, proc_dict, queues, tor_port, url_token_private_key) elif command == RP_REMOVE_CONTACT_HEADER: remove_client_process(onion_pub_keys, proc_dict) if unit_test and queues[UNIT_TEST_QUEUE].qsize() != 0: break
def add_onion_data(command: bytes, queues: 'QueueDict') -> None: """Add Onion Service data. Separate onion service private key and public keys for pending/existing contacts and add them as contacts. The ONION_KEY_QUEUE is read by relay.onion.onion_service() """ os_private_key, confirmation_code, allow_req_byte, no_pending_bytes, ser_pub_keys \ = separate_headers(command, [ONION_SERVICE_PRIVATE_KEY_LENGTH, CONFIRM_CODE_LENGTH, ENCODED_BOOLEAN_LENGTH, ENCODED_INTEGER_LENGTH]) no_pending = bytes_to_int(no_pending_bytes) public_key_list = split_byte_string(ser_pub_keys, ONION_SERVICE_PUBLIC_KEY_LENGTH) pending_public_keys = public_key_list[:no_pending] existing_public_keys = public_key_list[no_pending:] for onion_pub_key in pending_public_keys: add_contact(onion_pub_key, False, queues) for onion_pub_key in existing_public_keys: add_contact(onion_pub_key, True, queues) manage_contact_req(allow_req_byte, queues, notify=False) queues[ONION_KEY_QUEUE].put((os_private_key, confirmation_code))
def queue_command(payload: bytes, settings: 'Settings', c_queue: 'Queue') -> None: """Split command into assembly packets and queue them. :param payload: Command's plaintext string. :param settings: Settings object :param c_queue: Multiprocessing queue for commands :return: None """ payload = zlib.compress(payload, level=9) if len(payload) < 255: padded = byte_padding(payload) packet_list = [C_S_HEADER + padded] else: payload += hash_chain(payload) padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=255) packet_list = ([C_L_HEADER + p_list[0]] + [C_A_HEADER + p for p in p_list[1:-1]] + [C_E_HEADER + p_list[-1]]) if settings.session_trickle: for p in packet_list: c_queue.put(p) else: for p in packet_list: c_queue.put((p, settings))
def mock_entry_preprocessor(message: str, header: bytes = b'', group: bool = False) -> List[bytes]: if not header: if group: timestamp = double_to_bytes(time.time() * 1000) header = GROUP_MESSAGE_HEADER + timestamp + 'testgroup'.encode() + US_BYTE else: header = PRIVATE_MESSAGE_HEADER plaintext = message.encode() payload = header + plaintext payload = zlib.compress(payload, level=9) if len(payload) < 255: padded = byte_padding(payload) packet_list = [M_S_HEADER + padded] else: msg_key = bytes(32) payload = encrypt_and_sign(payload, msg_key) payload += msg_key padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=255) packet_list = ([M_L_HEADER + p_list[0]] + [M_A_HEADER + p for p in p_list[1:-1]] + [M_E_HEADER + p_list[-1]]) return packet_list
def _load_keys(self) -> None: """Load KeySets from the encrypted database. This function first reads and decrypts the database content. It then splits the plaintext into a list of 176-byte blocks. Each block contains the serialized data of one KeySet. Next, the function will remove from the list all dummy KeySets (that start with the `dummy_id` byte string). The function will then populate the `self.keysets` list with KeySet objects, the data of which is sliced and decoded from the dummy-free blocks. """ pt_bytes = self.database.load_database() blocks = split_byte_string(pt_bytes, item_len=KEYSET_LENGTH) df_blocks = [b for b in blocks if not b.startswith(self.dummy_id)] for block in df_blocks: if len(block) != KEYSET_LENGTH: raise CriticalError("Invalid data in key database.") onion_pub_key, tx_mk, rx_mk, tx_hk, rx_hk, tx_harac_bytes, rx_harac_bytes \ = separate_headers(block, [ONION_SERVICE_PUBLIC_KEY_LENGTH] + 4*[SYMMETRIC_KEY_LENGTH] + [HARAC_LENGTH]) self.keysets.append( KeySet(onion_pub_key=onion_pub_key, tx_mk=tx_mk, rx_mk=rx_mk, tx_hk=tx_hk, rx_hk=rx_hk, tx_harac=bytes_to_int(tx_harac_bytes), rx_harac=bytes_to_int(rx_harac_bytes), store_keys=self.store_keys))
def split_to_assembly_packets(payload: bytes, p_type: str) -> List[bytes]: """Split payload to assembly packets. Messages and commands are compressed to reduce transmission time. Files directed to this function during traffic masking have been compressed at an earlier point. If the compressed message cannot be sent over one packet, it is split into multiple assembly packets. Long messages are encrypted with an inner layer of XChaCha20-Poly1305 to provide sender based control over partially transmitted data. Regardless of packet size, files always have an inner layer of encryption, and it is added before the file data is passed to this function. Commands do not need sender-based control, so they are only delivered with a hash that makes integrity check easy. First assembly packet in file transmission is prepended with an 8-byte packet counter header that tells the sender and receiver how many packets the file transmission requires. Each assembly packet is prepended with a header that tells the Receiver Program if the packet is a short (single packet) transmission or if it's the start packet, a continuation packet, or the last packet of a multi-packet transmission. """ s_header = {MESSAGE: M_S_HEADER, FILE: F_S_HEADER, COMMAND: C_S_HEADER}[p_type] l_header = {MESSAGE: M_L_HEADER, FILE: F_L_HEADER, COMMAND: C_L_HEADER}[p_type] a_header = {MESSAGE: M_A_HEADER, FILE: F_A_HEADER, COMMAND: C_A_HEADER}[p_type] e_header = {MESSAGE: M_E_HEADER, FILE: F_E_HEADER, COMMAND: C_E_HEADER}[p_type] if p_type in [MESSAGE, COMMAND]: payload = zlib.compress(payload, level=COMPRESSION_LEVEL) if len(payload) < PADDING_LENGTH: padded = byte_padding(payload) packet_list = [s_header + padded] else: if p_type == MESSAGE: msg_key = csprng() payload = encrypt_and_sign(payload, msg_key) payload += msg_key elif p_type == FILE: payload = bytes(FILE_PACKET_CTR_LENGTH) + payload elif p_type == COMMAND: payload += blake2b(payload) padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=PADDING_LENGTH) if p_type == FILE: p_list[0] = int_to_bytes(len(p_list)) + p_list[0][FILE_PACKET_CTR_LENGTH:] packet_list = ([l_header + p_list[0]] + [a_header + p for p in p_list[1:-1]] + [e_header + p_list[-1]]) return packet_list
def src_incoming(queues: 'QueueDict', gateway: 'Gateway', unittest: bool = False ) -> None: """\ Redirect messages received from Source Computer to appropriate queues. """ packets_from_sc = queues[GATEWAY_QUEUE] packets_to_dc = queues[DST_MESSAGE_QUEUE] commands_to_dc = queues[DST_COMMAND_QUEUE] messages_to_flask = queues[M_TO_FLASK_QUEUE] files_to_flask = queues[F_TO_FLASK_QUEUE] commands_to_relay = queues[SRC_TO_RELAY_QUEUE] while True: with ignored(EOFError, KeyboardInterrupt): while packets_from_sc.qsize() == 0: time.sleep(0.01) ts, packet = packets_from_sc.get() # type: datetime, bytes ts_bytes = int_to_bytes(int(ts.strftime('%Y%m%d%H%M%S%f')[:-4])) try: packet = gateway.detect_errors(packet) except FunctionReturn: continue header, packet = separate_header(packet, DATAGRAM_HEADER_LENGTH) if header == UNENCRYPTED_DATAGRAM_HEADER: commands_to_relay.put(packet) elif header in [COMMAND_DATAGRAM_HEADER, LOCAL_KEY_DATAGRAM_HEADER]: commands_to_dc.put(header + ts_bytes + packet) p_type = 'Command ' if header == COMMAND_DATAGRAM_HEADER else 'Local key' rp_print(f"{p_type} to local Receiver", ts) elif header in [MESSAGE_DATAGRAM_HEADER, PUBLIC_KEY_DATAGRAM_HEADER]: onion_pub_key, payload = separate_header(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) packet_str = header.decode() + b85encode(payload) queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header) if header == MESSAGE_DATAGRAM_HEADER: packets_to_dc.put(header + ts_bytes + onion_pub_key + ORIGIN_USER_HEADER + payload) elif header == FILE_DATAGRAM_HEADER: no_contacts_b, payload = separate_header(packet, ENCODED_INTEGER_LENGTH) no_contacts = bytes_to_int(no_contacts_b) ser_accounts, file_ct = separate_header(payload, no_contacts * ONION_SERVICE_PUBLIC_KEY_LENGTH) pub_keys = split_byte_string(ser_accounts, item_len=ONION_SERVICE_PUBLIC_KEY_LENGTH) for onion_pub_key in pub_keys: queue_to_flask(file_ct, onion_pub_key, files_to_flask, ts, header) elif header in [GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER, GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER, GROUP_MSG_EXIT_GROUP_HEADER]: process_group_management_message(ts, packet, header, messages_to_flask) if unittest: break
def process_group_management_message( ts: 'datetime', packet: bytes, header: bytes, buf_key: bytes, ) -> None: """Parse and display group management message.""" header_str = header.decode() group_id, packet = separate_header(packet, GROUP_ID_LENGTH) if header in [GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER]: pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) for onion_pub_key in pub_keys: others = [k for k in pub_keys if k != onion_pub_key] packet_str = header_str + b85encode(group_id + b''.join(others)) buffer_to_flask(packet_str, onion_pub_key, ts, header, buf_key) elif header in [GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER]: first_list_len_b, packet = separate_header(packet, ENCODED_INTEGER_LENGTH) first_list_length = bytes_to_int(first_list_len_b) pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) before_adding = remaining = pub_keys[:first_list_length] new_in_group = removable = pub_keys[first_list_length:] if header == GROUP_MSG_MEMBER_ADD_HEADER: process_add_or_group_remove_member(ts, header, buf_key, header_str, group_id, before_adding, new_in_group) for onion_pub_key in new_in_group: other_new = [k for k in new_in_group if k != onion_pub_key] packet_str = ( GROUP_MSG_INVITE_HEADER.decode() + b85encode(group_id + b''.join(other_new + before_adding))) buffer_to_flask(packet_str, onion_pub_key, ts, header, buf_key) elif header == GROUP_MSG_MEMBER_REM_HEADER: process_add_or_group_remove_member(ts, header, buf_key, header_str, group_id, remaining, removable) elif header == GROUP_MSG_EXIT_GROUP_HEADER: process_group_exit_header(ts, packet, header, buf_key, header_str, group_id)
def process_group_management_message(data: bytes, existing_contacts: List[bytes], group_id_hr: str, header: bytes, trunc_addr: str) -> None: """Process group management message.""" if header in [ GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER, GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER ]: pub_keys = split_byte_string(data, ONION_SERVICE_PUBLIC_KEY_LENGTH) pub_key_length = ONION_SERVICE_PUBLIC_KEY_LENGTH members = [k for k in pub_keys if len(k) == pub_key_length] known = [ f" * {pub_key_to_onion_address(m)}" for m in members if m in existing_contacts ] unknown = [ f" * {pub_key_to_onion_address(m)}" for m in members if m not in existing_contacts ] line_list = [] if known: line_list.extend(["Known contacts"] + known) if unknown: line_list.extend(["Unknown contacts"] + unknown) if header in [GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER]: action = 'invited you to' if header == GROUP_MSG_INVITE_HEADER else 'joined' postfix = ' with' if members else '' m_print( [f"{trunc_addr} has {action} group {group_id_hr}{postfix}"] + line_list, box=True) elif header in [ GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER ]: if members: action, p = ( "added", "to") if header == GROUP_MSG_MEMBER_ADD_HEADER else ( "removed", "from") m_print([ f"{trunc_addr} has {action} following members {p} group {group_id_hr}" ] + line_list, box=True) elif header == GROUP_MSG_EXIT_GROUP_HEADER: m_print([ f"{trunc_addr} has left group {group_id_hr}", '', "Warning", "Unless you remove the contact from the group, they", "can still read messages you send to the group." ], box=True)
def count_number_of_packets(self) -> int: """Count number of packets needed for file delivery.""" packet_data = self.time_bytes + self.size + self.name + US_BYTE + self.data if len(packet_data) < PADDING_LEN: return 1 else: packet_data += bytes(FILE_PACKET_CTR_LEN) packet_data = byte_padding(packet_data) return len(split_byte_string(packet_data, item_len=PADDING_LEN))
def count_number_of_packets(name: bytes, size: bytes, processed: bytes, time_bytes: bytes) -> int: """Count number of packets needed for file delivery.""" packet_data = time_bytes + size + name + US_BYTE + processed if len(packet_data) < PADDING_LENGTH: return 1 else: packet_data += bytes(FILE_PACKET_CTR_LENGTH) packet_data = byte_padding(packet_data) return len(split_byte_string(packet_data, item_len=PADDING_LENGTH))
def process_group_exit_header( ts: 'datetime', packet: bytes, header: bytes, header_str: str, group_id: bytes, messages_to_flask: 'Queue[Tuple[Union[bytes, str], bytes]]') -> None: """Process group exit packet.""" pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) packet_str = header_str + b85encode(group_id) for onion_pub_key in pub_keys: queue_to_flask(packet_str, onion_pub_key, messages_to_flask, ts, header)
def mock_file_preprocessor(payload): payload = bytes(8) + payload padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=255) packet_list = ( [F_L_HEADER + int_to_bytes(len(p_list)) + p_list[0][8:]] + [F_A_HEADER + p for p in p_list[1:-1]] + [F_E_HEADER + p_list[-1]]) return packet_list
def split_to_assembly_packets(payload: bytes, p_type: str) -> List[bytes]: """Split payload to assembly packets. Messages and commands are compressed to reduce transmission time. Files have been compressed at earlier phase, before B85 encoding. If the compressed message can not be sent over one packet, it is split into multiple assembly packets with headers. Long messages are encrypted with inner layer of XSalsa20-Poly1305 to provide sender based control over partially transmitted data. Regardless of packet size, files always have an inner layer of encryption, and it is added in earlier phase. Commands do not need sender-based control, so they are only delivered with hash that makes integrity check easy. First assembly packet in file transmission is prepended with 8-byte packet counter that tells sender and receiver how many packets the file transmission requires. """ s_header = {MESSAGE: M_S_HEADER, FILE: F_S_HEADER, COMMAND: C_S_HEADER}[p_type] l_header = {MESSAGE: M_L_HEADER, FILE: F_L_HEADER, COMMAND: C_L_HEADER}[p_type] a_header = {MESSAGE: M_A_HEADER, FILE: F_A_HEADER, COMMAND: C_A_HEADER}[p_type] e_header = {MESSAGE: M_E_HEADER, FILE: F_E_HEADER, COMMAND: C_E_HEADER}[p_type] if p_type in [MESSAGE, COMMAND]: payload = zlib.compress(payload, level=COMPRESSION_LEVEL) if len(payload) < PADDING_LEN: padded = byte_padding(payload) packet_list = [s_header + padded] else: if p_type == MESSAGE: msg_key = csprng() payload = encrypt_and_sign(payload, msg_key) payload += msg_key elif p_type == FILE: payload = bytes(FILE_PACKET_CTR_LEN) + payload elif p_type == COMMAND: payload += hash_chain(payload) padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=PADDING_LEN) if p_type == FILE: p_list[0] = int_to_bytes(len(p_list)) + p_list[0][FILE_PACKET_CTR_LEN:] packet_list = ([l_header + p_list[0]] + [a_header + p for p in p_list[1:-1]] + [e_header + p_list[-1]]) return packet_list
def test_split_byte_string(self): self.assertEqual(split_byte_string(b'teststring', 1), [b't', b'e', b's', b't', b's', b't', b'r', b'i', b'n', b'g']) self.assertEqual(split_byte_string(b'teststring', 2), [b'te', b'st', b'st', b'ri', b'ng']) self.assertEqual(split_byte_string(b'teststring', 3), [b'tes', b'tst', b'rin', b'g']) self.assertEqual(split_byte_string(b'teststring', 5), [b'tests', b'tring']) self.assertEqual(split_byte_string(b'teststring', 10), [b'teststring']) self.assertEqual(split_byte_string(b'teststring', 15), [b'teststring'])
def process_group_exit_header( ts: 'datetime', packet: bytes, header: bytes, buf_key: bytes, header_str: str, group_id: bytes, ) -> None: """Process group exit packet.""" pub_keys = split_byte_string(packet, ONION_SERVICE_PUBLIC_KEY_LENGTH) packet_str = header_str + b85encode(group_id) for onion_pub_key in pub_keys: buffer_to_flask(packet_str, onion_pub_key, ts, header, buf_key)
def mock_command_preprocessor(command): payload = zlib.compress(command, level=9) if len(payload) < 255: padded = byte_padding(payload) packet_list = [C_S_HEADER + padded] else: payload += hash_chain(payload) padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=255) packet_list = ([C_L_HEADER + p_list[0]] + [C_A_HEADER + p for p in p_list[1:-1]] + [C_E_HEADER + p_list[-1]]) return packet_list
def write_udp_packet(self, packet: bytes) -> None: """Split packet to smaller parts and transmit them over the socket.""" udp_port = QUBES_SRC_LISTEN_SOCKET if self.settings.software_operation == TX else QUBES_DST_LISTEN_SOCKET packet = base64.b85encode(packet) packets = split_byte_string(packet, SOCKET_BUFFER_SIZE) if self.txq_socket is not None: for p in packets: self.txq_socket.sendto(p, (self.settings.rx_udp_ip, udp_port)) time.sleep(0.000001) self.txq_socket.sendto(US_BYTE, (self.settings.rx_udp_ip, udp_port))
def process_file_datagram(ts: 'datetime', packet: bytes, header: bytes, queues: 'QueueDict') -> None: """Process file datagram.""" files_to_flask = queues[F_TO_FLASK_QUEUE] no_contacts_b, payload = separate_header(packet, ENCODED_INTEGER_LENGTH) no_contacts = bytes_to_int(no_contacts_b) ser_accounts, file_ct = separate_header( payload, no_contacts * ONION_SERVICE_PUBLIC_KEY_LENGTH) pub_keys = split_byte_string(ser_accounts, item_len=ONION_SERVICE_PUBLIC_KEY_LENGTH) for onion_pub_key in pub_keys: queue_to_flask(file_ct, onion_pub_key, files_to_flask, ts, header)
def queue_message(user_input: Union['UserInput', 'Message'], window: Union['MockWindow', 'Window'], settings: 'Settings', m_queue: 'Queue', header: bytes = b'') -> None: """Convert message into set of assembly packets and queue them. :param user_input: UserInput object :param window: Window object :param settings: Settings object :param m_queue: Multiprocessing message queue :param header: Overrides message header with group management header :return: None """ if not header: if window.type == 'group': timestamp = double_to_bytes(time.time() * 1000) header = GROUP_MESSAGE_HEADER + timestamp + window.name.encode( ) + US_BYTE else: header = PRIVATE_MESSAGE_HEADER plaintext = user_input.plaintext.encode() payload = header + plaintext payload = zlib.compress(payload, level=9) if len(payload) < 255: padded = byte_padding(payload) packet_list = [M_S_HEADER + padded] else: msg_key = keygen() payload = encrypt_and_sign(payload, msg_key) payload += msg_key padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=255) packet_list = ([M_L_HEADER + p_list[0]] + [M_A_HEADER + p for p in p_list[1:-1]] + [M_E_HEADER + p_list[-1]]) if settings.session_trickle: log_m_dictionary = dict((c.rx_account, c.log_messages) for c in window) for p in packet_list: m_queue.put((p, log_m_dictionary)) else: for c in window: log_setting = window.group.log_messages if window.type == 'group' else c.log_messages for p in packet_list: m_queue.put((p, settings, c.rx_account, c.tx_account, log_setting, window.uid))
def c_req_manager(queues: 'QueueDict', unit_test: bool = False) -> None: """Manage incoming contact requests.""" existing_contacts = [] # type: List[bytes] contact_requests = [] # type: List[bytes] request_queue = queues[CONTACT_REQ_QUEUE] contact_queue = queues[C_REQ_MGMT_QUEUE] setting_queue = queues[C_REQ_STATE_QUEUE] show_requests = True while True: with ignored(EOFError, KeyboardInterrupt): while request_queue.qsize() == 0: time.sleep(0.1) purp_onion_address = request_queue.get() while setting_queue.qsize() != 0: show_requests = setting_queue.get() # Update list of existing contacts while contact_queue.qsize() > 0: command, ser_onion_pub_keys = contact_queue.get() onion_pub_key_list = split_byte_string( ser_onion_pub_keys, ONION_SERVICE_PUBLIC_KEY_LENGTH) if command == RP_ADD_CONTACT_HEADER: existing_contacts = list( set(existing_contacts) | set(onion_pub_key_list)) elif command == RP_REMOVE_CONTACT_HEADER: existing_contacts = list( set(existing_contacts) - set(onion_pub_key_list)) if validate_onion_addr(purp_onion_address) == '': onion_pub_key = onion_address_to_pub_key(purp_onion_address) if onion_pub_key in existing_contacts: continue if onion_pub_key in contact_requests: continue if show_requests: ts_fmt = datetime.now().strftime( '%b %d - %H:%M:%S.%f')[:-4] m_print([ f"{ts_fmt} - New contact request from an unknown TFC account:", purp_onion_address ], box=True) contact_requests.append(onion_pub_key) if unit_test and queues[UNIT_TEST_QUEUE].qsize() != 0: break
def update_list_of_existing_contacts(contact_queue: 'Queue[Any]', existing_contacts: List[bytes] ) -> List[bytes]: """Update list of existing contacts.""" while contact_queue.qsize() > 0: command, ser_onion_pub_keys = contact_queue.get() onion_pub_key_list = split_byte_string(ser_onion_pub_keys, ONION_SERVICE_PUBLIC_KEY_LENGTH) if command == RP_ADD_CONTACT_HEADER: existing_contacts = list(set(existing_contacts) | set(onion_pub_key_list)) elif command == RP_REMOVE_CONTACT_HEADER: existing_contacts = list(set(existing_contacts) - set(onion_pub_key_list)) return existing_contacts
def client_scheduler(queues: 'QueueDict', gateway: 'Gateway', url_token_private_key: X448PrivateKey, unittest: bool = False) -> None: """Manage `client` processes.""" proc_dict = dict() # type: Dict[bytes, Process] # Wait for Tor port from `onion_service` process. while True: with ignored(EOFError, KeyboardInterrupt): while queues[TOR_DATA_QUEUE].qsize() == 0: time.sleep(0.1) tor_port, onion_addr_user = queues[TOR_DATA_QUEUE].get() break while True: with ignored(EOFError, KeyboardInterrupt): while queues[CONTACT_KEY_QUEUE].qsize() == 0: time.sleep(0.1) command, ser_public_keys, is_existing_contact = queues[ CONTACT_KEY_QUEUE].get() onion_pub_keys = split_byte_string( ser_public_keys, ONION_SERVICE_PUBLIC_KEY_LENGTH) if command == RP_ADD_CONTACT_HEADER: for onion_pub_key in onion_pub_keys: if onion_pub_key not in proc_dict: onion_addr_user = '' if is_existing_contact else onion_addr_user proc_dict[onion_pub_key] = Process( target=client, args=(onion_pub_key, queues, url_token_private_key, tor_port, gateway, onion_addr_user)) proc_dict[onion_pub_key].start() elif command == RP_REMOVE_CONTACT_HEADER: for onion_pub_key in onion_pub_keys: if onion_pub_key in proc_dict: process = proc_dict[onion_pub_key] # type: Process process.terminate() proc_dict.pop(onion_pub_key) rp_print( f"Removed {pub_key_to_short_address(onion_pub_key)}", bold=True) if unittest and queues[UNITTEST_QUEUE].qsize() != 0: break
def process_file_datagram( ts: 'datetime', packet: bytes, header: bytes, buf_key: bytes, ) -> None: """Process file datagram.""" no_contacts_b, payload = separate_header(packet, ENCODED_INTEGER_LENGTH) no_contacts = bytes_to_int(no_contacts_b) ser_accounts, file_ct = separate_header( payload, no_contacts * ONION_SERVICE_PUBLIC_KEY_LENGTH) pub_keys = split_byte_string(ser_accounts, item_len=ONION_SERVICE_PUBLIC_KEY_LENGTH) for onion_pub_key in pub_keys: buffer_to_flask(file_ct, onion_pub_key, ts, header, buf_key, file=True)
def group_add(cmd_data: bytes, ts: 'datetime', window_list: 'WindowList', contact_list: 'ContactList', group_list: 'GroupList', settings: 'Settings') -> None: """Add member(s) to group.""" group_id, ser_members = separate_header(cmd_data, GROUP_ID_LENGTH) purp_pub_keys = set( split_byte_string(ser_members, ONION_SERVICE_PUBLIC_KEY_LENGTH)) try: group_name = group_list.get_group_by_id(group_id).name except StopIteration: raise SoftError( f"Error: No group with ID '{b58encode(group_id)}' found.") pub_keys = set(contact_list.get_list_of_pub_keys()) before_adding = set( group_list.get_group(group_name).get_list_of_member_pub_keys()) ok_accounts = set(pub_keys & purp_pub_keys) new_in_group_set = set(ok_accounts - before_adding) end_assembly = list(before_adding | new_in_group_set) already_in_g = list(purp_pub_keys & before_adding) rejected = list(purp_pub_keys - pub_keys) new_in_group = list(new_in_group_set) if len(end_assembly) > settings.max_number_of_group_members: raise SoftError( f"Error: TFC settings only allow {settings.max_number_of_group_members} members per group." ) group = group_list.get_group(group_name) group.add_members( [contact_list.get_contact_by_pub_key(k) for k in new_in_group]) window = window_list.get_window(group.group_id) window.add_contacts(new_in_group) window.create_handle_dict() group_management_print(ADDED_MEMBERS, new_in_group, contact_list, group_name) group_management_print(ALREADY_MEMBER, already_in_g, contact_list, group_name) group_management_print(UNKNOWN_ACCOUNTS, rejected, contact_list, group_name) cmd_win = window_list.get_window(WIN_UID_COMMAND) cmd_win.add_new(ts, f"Added members to group {group_name}.")
def group_create(cmd_data: bytes, ts: 'datetime', window_list: 'WindowList', contact_list: 'ContactList', group_list: 'GroupList', settings: 'Settings') -> None: """Create a new group.""" group_id, variable_len_data = separate_header(cmd_data, GROUP_ID_LENGTH) group_name_bytes, ser_members = variable_len_data.split(US_BYTE, 1) group_name = group_name_bytes.decode() purp_pub_keys = set( split_byte_string(ser_members, ONION_SERVICE_PUBLIC_KEY_LENGTH)) pub_keys = set(contact_list.get_list_of_pub_keys()) accepted = list(purp_pub_keys & pub_keys) rejected = list(purp_pub_keys - pub_keys) if len(accepted) > settings.max_number_of_group_members: raise SoftError( f"Error: TFC settings only allow {settings.max_number_of_group_members} members per group." ) if len(group_list) == settings.max_number_of_groups: raise SoftError( f"Error: TFC settings only allow {settings.max_number_of_groups} groups." ) accepted_contacts = [ contact_list.get_contact_by_pub_key(k) for k in accepted ] group_list.add_group(group_name, group_id, settings.log_messages_by_default, settings.show_notifications_by_default, accepted_contacts) group = group_list.get_group(group_name) window = window_list.get_window(group.group_id) window.window_contacts = accepted_contacts window.message_log = [] window.unread_messages = 0 window.create_handle_dict() group_management_print(NEW_GROUP, accepted, contact_list, group_name) group_management_print(UNKNOWN_ACCOUNTS, rejected, contact_list, group_name) cmd_win = window_list.get_window(WIN_UID_COMMAND) cmd_win.add_new(ts, f"Created new group {group_name}.")
def test_decryption_error_raises_fr(self): # Setup packet = Packet('*****@*****.**', self.contact, ORIGIN_USER_HEADER, MESSAGE, self.settings) payload = zlib.compress(self.msg.encode(), level=COMPRESSION_LEVEL) msg_key = bytes(KEY_LENGTH) payload = encrypt_and_sign(payload, msg_key)[::-1] payload += msg_key padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=PADDING_LEN) packet_list = ([M_L_HEADER + p_list[0]] + [M_A_HEADER + p for p in p_list[1:-1]] + [M_E_HEADER + p_list[-1]]) for p in packet_list: packet.add_packet(p) # Test self.assertFR("Error: Decryption of message failed.", packet.assemble_message_packet)
def test_long_command_compression_error_raises_fr(self): # Setup packet = Packet(LOCAL_ID, self.contact, ORIGIN_CONTACT_HEADER, COMMAND, self.settings) command = os.urandom(500) + b'a' payload = zlib.compress(command, level=COMPRESSION_LEVEL)[::-1] payload += hash_chain(payload) padded = byte_padding(payload) p_list = split_byte_string(padded, item_len=PADDING_LEN) packet_list = ([C_L_HEADER + p_list[0]] + [C_A_HEADER + p for p in p_list[1:-1]] + [C_E_HEADER + p_list[-1]]) for p in packet_list: packet.add_packet(p) # Test self.assertFR("Error: Decompression of command failed.", packet.assemble_command_packet) self.assertEqual(packet.log_masking_ctr, 0)
def _load_contacts(self) -> None: """Load contacts from the encrypted database. This function first reads and decrypts the database content. It then splits the plaintext into a list of 1124-byte blocks: each block contains the serialized data of one contact. Next, the function will remove from the list all dummy contacts (that start with dummy contact's public key). The function will then populate the `self.contacts` list with Contact objects, the data of which is sliced and decoded from the dummy-free blocks. """ with open(self.file_name, 'rb') as f: ct_bytes = f.read() pt_bytes = auth_and_decrypt(ct_bytes, self.master_key.master_key, database=self.file_name) blocks = split_byte_string(pt_bytes, item_len=CONTACT_LENGTH) df_blocks = [ b for b in blocks if not b.startswith(self.dummy_contact.onion_pub_key) ] for block in df_blocks: if len(block) != CONTACT_LENGTH: raise CriticalError("Invalid data in contact database.") (onion_pub_key, tx_fingerprint, rx_fingerprint, kex_status_byte, log_messages_byte, file_reception_byte, notifications_byte, nick_bytes) = separate_headers( block, [ONION_SERVICE_PUBLIC_KEY_LENGTH] + 2 * [FINGERPRINT_LENGTH] + [KEX_STATUS_LENGTH] + 3 * [ENCODED_BOOLEAN_LENGTH]) self.contacts.append( Contact(onion_pub_key=onion_pub_key, tx_fingerprint=tx_fingerprint, rx_fingerprint=rx_fingerprint, kex_status=kex_status_byte, log_messages=bytes_to_bool(log_messages_byte), file_reception=bytes_to_bool(file_reception_byte), notifications=bytes_to_bool(notifications_byte), nick=bytes_to_str(nick_bytes)))