def test_onion_service(self, *_): # Setup queues = gen_queue_dict() def queue_delayer(): """Place Onion Service data into queue after delay.""" time.sleep(0.5) queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) time.sleep(0.1) queues[ONION_CLOSE_QUEUE].put(EXIT) threading.Thread(target=queue_delayer).start() # Test with mock.patch("time.sleep", return_value=None): self.assertIsNone(onion_service(queues)) port, address = queues[TOR_DATA_QUEUE].get() self.assertIsInstance(port, int) self.assertEqual(validate_onion_addr(address), '') self.assertEqual(queues[EXIT_QUEUE].get(), EXIT) # Teardown tear_queues(queues)
def test_loop(self): # Setup packet = b'test_packet' queues = gen_queue_dict() gateway = Gateway() def queue_delayer(): """Place packets into queue after delay.""" time.sleep(0.015) queues[DST_COMMAND_QUEUE].put(packet) time.sleep(0.015) queues[DST_MESSAGE_QUEUE].put(packet) time.sleep(0.015) queues[UNIT_TEST_QUEUE].put(EXIT) threading.Thread(target=queue_delayer).start() # Test side_effects = [EOFError, KeyboardInterrupt, None] + [None] * 100_000 with unittest.mock.patch('time.sleep', side_effect=side_effects): self.assertIsNone(dst_outgoing(queues, gateway, unit_test=True)) self.assertEqual(packet, gateway.packets[0]) # Teardown tear_queues(queues)
def test_function_logs_traffic_masking_data(self): # Setup settings = Settings(log_file_masking=True, traffic_masking=False) master_key = MasterKey() queues = gen_queue_dict() queues[TRAFFIC_MASKING_QUEUE].put(True) def queue_delayer(): """Place messages to queue one at a time.""" for p in [(nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), False, False, master_key), (None, C_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key), (nick_to_pub_key('Alice'), F_S_HEADER + bytes(PADDING_LENGTH), True, True, master_key), (nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key)]: queues[LOG_PACKET_QUEUE].put(p) time.sleep(0.02) queues[UNITTEST_QUEUE].put(EXIT) time.sleep(0.02) queues[LOG_PACKET_QUEUE].put( (nick_to_pub_key('Alice'), P_N_HEADER + bytes(PADDING_LENGTH), True, True, master_key)) time.sleep(0.02) # Test threading.Thread(target=queue_delayer).start() log_writer_loop(queues, settings, unittest=True) self.assertEqual(os.path.getsize(f'{DIR_USER_DATA}{settings.software_operation}_logs'), 3*LOG_ENTRY_LENGTH) # Teardown tear_queues(queues)
def test_loop(self): # Setup queues = gen_queue_dict() settings = Settings(traffic_masking=True, tm_static_delay=0.001, tm_random_delay=0.001) gateway = Gateway() key_list = KeyList(nicks=['Alice', LOCAL_ID]) window = TxWindow(log_messages=True) contact_list = ContactList(nicks=['Alice', LOCAL_ID]) window.contact_list = contact_list window.window_contacts = [contact_list.get_contact_by_address_or_nick('Alice')] user_input = UserInput(plaintext='test') def queue_delayer(): """Place packets to queue after delay.""" time.sleep(0.01) queues[WINDOW_SELECT_QUEUE].put(window.window_contacts) time.sleep(0.01) queue_command(b'test', settings, queues) # 1 queue_message(user_input, window, settings, queues) # 2 queue_message(user_input, window, settings, queues) # 3 queue_command(b'test', settings, queues) # 4 queues[TM_NOISE_COMMAND_QUEUE].put((C_N_HEADER + bytes(PADDING_LENGTH))) # 5 queue_to_nc(UNENCRYPTED_DATAGRAM_HEADER + UNENCRYPTED_EXIT_COMMAND, queues[RELAY_PACKET_QUEUE]) # 6 queue_to_nc(UNENCRYPTED_DATAGRAM_HEADER + UNENCRYPTED_WIPE_COMMAND, queues[RELAY_PACKET_QUEUE]) # 7 queues[SENDER_MODE_QUEUE].put(settings) # Test threading.Thread(target=queue_delayer).start() self.assertIsInstance(traffic_masking_loop(queues, settings, gateway, key_list), Settings) self.assertEqual(len(gateway.packets), 7) # Teardown tear_queues(queues)
def test_wipe(self, mock_os_system, *_: Any) -> None: queues = gen_queue_dict() process_list = [Process(target=self.mock_process)] os.mkdir(DIR_USER_DATA) os.mkdir(DIR_RECV_FILES) self.assertTrue(os.path.isdir(DIR_USER_DATA)) self.assertTrue(os.path.isdir(DIR_RECV_FILES)) for p in process_list: p.start() def queue_delayer() -> None: """Place WIPE packet to queue after delay.""" time.sleep(0.01) queues[EXIT_QUEUE].put(WIPE) threading.Thread(target=queue_delayer).start() with self.assertRaises(SystemExit): monitor_processes(process_list, RX, queues) self.assertFalse(os.path.isdir(DIR_USER_DATA)) self.assertFalse(os.path.isdir(DIR_RECV_FILES)) mock_os_system.assert_called_with('systemctl poweroff') tear_queues(queues)
def test_client_scheduler(self): queues = gen_queue_dict() gateway = Gateway() server_private_key = X448.generate_private_key() def queue_delayer(): """Place messages to queue one at a time.""" time.sleep(0.1) queues[TOR_DATA_QUEUE].put( ('1234', nick_to_onion_address('Alice'))) queues[CONTACT_KEY_QUEUE].put( (RP_ADD_CONTACT_HEADER, b''.join([nick_to_pub_key('Alice'), nick_to_pub_key('Bob')]), True)) time.sleep(0.1) queues[CONTACT_KEY_QUEUE].put( (RP_REMOVE_CONTACT_HEADER, b''.join([nick_to_pub_key('Alice'), nick_to_pub_key('Bob')]), True)) time.sleep(0.1) queues[UNITTEST_QUEUE].put(EXIT) time.sleep(0.1) queues[CONTACT_KEY_QUEUE].put((EXIT, EXIT, EXIT)) threading.Thread(target=queue_delayer).start() self.assertIsNone( client_scheduler(queues, gateway, server_private_key, unittest=True)) tear_queues(queues)
def test_wipe_tails(self, mock_os_system, *_): queues = gen_queue_dict() process_list = [Process(target=self.mock_process)] os.mkdir(DIR_USER_DATA) self.assertTrue(os.path.isdir(DIR_USER_DATA)) for p in process_list: p.start() def queue_delayer(): """Place WIPE packet to queue after delay.""" time.sleep(0.01) queues[EXIT_QUEUE].put(WIPE) threading.Thread(target=queue_delayer).start() with self.assertRaises(SystemExit): monitor_processes(process_list, RX, queues) mock_os_system.assert_called_with('poweroff') # Test that user data wasn't removed self.assertTrue(os.path.isdir(DIR_USER_DATA)) tear_queues(queues)
def test_function_logs_traffic_masking_data(self) -> None: # Setup settings = Settings(log_file_masking=True, traffic_masking=False) master_key = MasterKey() queues = gen_queue_dict() queues[TRAFFIC_MASKING_QUEUE].put(True) def queue_delayer() -> None: """Place messages to the logging queue one at a time.""" for p in [(nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), False, False, master_key), (None, C_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key), (nick_to_pub_key('Alice'), F_S_HEADER + bytes(PADDING_LENGTH), True, True, master_key), (nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key)]: queues[LOG_PACKET_QUEUE].put(p) time.sleep(SLEEP_DELAY) queues[UNIT_TEST_QUEUE].put(EXIT) time.sleep(SLEEP_DELAY) queues[LOG_PACKET_QUEUE].put( (nick_to_pub_key('Alice'), P_N_HEADER + bytes(PADDING_LENGTH), True, True, master_key)) time.sleep(SLEEP_DELAY) # Test threading.Thread(target=queue_delayer).start() self.assertIsNone(log_writer_loop(queues, settings, self.message_log, unit_test=True)) # Teardown tear_queues(queues)
def test_flask_server(self) -> None: # Setup queues = gen_queue_dict() url_token_private_key = X448.generate_private_key() url_token_public_key = X448.derive_public_key( url_token_private_key).hex() url_token = 'a450987345098723459870234509827340598273405983274234098723490285' url_token_old = 'a450987345098723459870234509827340598273405983274234098723490286' url_token_invalid = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' onion_pub_key = nick_to_pub_key('Alice') onion_address = nick_to_onion_address('Alice') packet1 = "packet1" packet2 = "packet2" packet3 = b"packet3" # Test app = flask_server(queues, url_token_public_key, unit_test=True) with app.test_client() as c: # Test root domain returns public key of server. resp = c.get('/') self.assertEqual(resp.data, url_token_public_key.encode()) resp = c.get(f'/contact_request/{onion_address}') self.assertEqual(b'OK', resp.data) self.assertEqual(queues[CONTACT_REQ_QUEUE].qsize(), 1) # Test invalid URL token returns empty response resp = c.get(f'/{url_token_invalid}/messages/') self.assertEqual(b'', resp.data) resp = c.get(f'/{url_token_invalid}/files/') self.assertEqual(b'', resp.data) # Test valid URL token returns all queued messages queues[URL_TOKEN_QUEUE].put((onion_pub_key, url_token_old)) queues[URL_TOKEN_QUEUE].put((onion_pub_key, url_token)) queues[M_TO_FLASK_QUEUE].put((packet1, onion_pub_key)) queues[M_TO_FLASK_QUEUE].put((packet2, onion_pub_key)) queues[F_TO_FLASK_QUEUE].put((packet3, onion_pub_key)) with app.test_client() as c: resp = c.get(f'/{url_token}/messages/') self.assertEqual(b'packet1\npacket2', resp.data) with app.test_client() as c: resp = c.get(f'/{url_token}/files/') self.assertEqual(b'packet3', resp.data) # Test valid URL token returns nothing as queues are empty with app.test_client() as c: resp = c.get(f'/{url_token}/messages/') self.assertEqual(b'', resp.data) with app.test_client() as c: resp = c.get(f'/{url_token}/files/') self.assertEqual(b'', resp.data) # Teardown tear_queues(queues)
def tearDown(self) -> None: """Post-test actions.""" cleanup(self.unit_test_dir) with ignored(OSError): os.remove(f"{self.onion_service.user_short_address}.psk - Give to {nick_to_short_address('Alice')}") tear_queues(self.queues)
def test_exception_during_onion_service_setup_returns(self, *_): # Setup queues = gen_queue_dict() queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) # Test self.assertIsNone(onion_service(queues)) # Teardown tear_queues(queues)
def test_returns_with_keyboard_interrupt(self, *_): # Setup queues = gen_queue_dict() queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) # Test self.assertIsNone(onion_service(queues)) # Teardown tear_queues(queues)
def test_socket_closed_returns(self, *_): # Setup queues = gen_queue_dict() controller = stem.control.Controller controller.create_ephemeral_hidden_service = MagicMock() queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) # Test self.assertIsNone(onion_service(queues)) # Teardown tear_queues(queues)
def test_function_log_file_masking_queue_controls_log_file_masking(self): # Setup settings = Settings(log_file_masking=False, traffic_masking=True) master_key = MasterKey() queues = gen_queue_dict() def queue_delayer(): """Place messages to queue one at a time.""" for p in [ (None, C_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key), (nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), False, False, master_key), (nick_to_pub_key('Alice'), F_S_HEADER + bytes(PADDING_LENGTH), True, True, master_key) ]: queues[LOG_PACKET_QUEUE].put(p) time.sleep(SLEEP_DELAY) queues[LOGFILE_MASKING_QUEUE].put( True) # Start logging noise packets time.sleep(SLEEP_DELAY) for _ in range(2): queues[LOG_PACKET_QUEUE].put( (nick_to_pub_key('Alice'), F_S_HEADER + bytes(PADDING_LENGTH), True, True, master_key)) time.sleep(SLEEP_DELAY) queues[UNIT_TEST_QUEUE].put(EXIT) time.sleep(SLEEP_DELAY) queues[LOG_PACKET_QUEUE].put( (nick_to_pub_key('Alice'), M_S_HEADER + bytes(PADDING_LENGTH), True, False, master_key)) time.sleep(SLEEP_DELAY) # Test threading.Thread(target=queue_delayer).start() log_writer_loop(queues, settings, unit_test=True) self.assertEqual( os.path.getsize( f'{DIR_USER_DATA}{settings.software_operation}_logs'), 3 * LOG_ENTRY_LENGTH) # Teardown tear_queues(queues)
def test_dying_process(self, *_): def mock_process(): """Function that returns after a moment.""" time.sleep(0.01) queues = gen_queue_dict() process_list = [Process(target=mock_process)] for p in process_list: p.start() with self.assertRaises(SystemExit): monitor_processes(process_list, RX, queues) tear_queues(queues)
def test_group_manager(self): queues = gen_queue_dict() def queue_delayer(): """Place messages to queue one at a time.""" time.sleep(0.1) # Test function recovers from incorrect group ID size queues[GROUP_MSG_QUEUE].put( (GROUP_MSG_EXIT_GROUP_HEADER, bytes((GROUP_ID_LENGTH - 1)), pub_key_to_short_address( bytes(ONION_SERVICE_PUBLIC_KEY_LENGTH)))) # Test group invite for added and removed contacts queues[GROUP_MGMT_QUEUE].put( (RP_ADD_CONTACT_HEADER, nick_to_pub_key('Alice') + nick_to_pub_key('Bob'))) queues[GROUP_MGMT_QUEUE].put( (RP_REMOVE_CONTACT_HEADER, nick_to_pub_key('Alice'))) for header in [ GROUP_MSG_INVITE_HEADER, GROUP_MSG_JOIN_HEADER, GROUP_MSG_MEMBER_ADD_HEADER, GROUP_MSG_MEMBER_REM_HEADER ]: queues[GROUP_MSG_QUEUE].put( (header, bytes(GROUP_ID_LENGTH) + nick_to_pub_key('Bob') + nick_to_pub_key('Charlie'), pub_key_to_short_address( bytes(ONION_SERVICE_PUBLIC_KEY_LENGTH)))) queues[GROUP_MSG_QUEUE].put( (GROUP_MSG_EXIT_GROUP_HEADER, bytes(GROUP_ID_LENGTH), pub_key_to_short_address( bytes(ONION_SERVICE_PUBLIC_KEY_LENGTH)))) # Exit test time.sleep(0.2) queues[UNITTEST_QUEUE].put(EXIT) queues[GROUP_MSG_QUEUE].put( (GROUP_MSG_EXIT_GROUP_HEADER, bytes(GROUP_ID_LENGTH), pub_key_to_short_address( bytes(ONION_SERVICE_PUBLIC_KEY_LENGTH)))) # Test threading.Thread(target=queue_delayer).start() self.assertIsNone(g_msg_manager(queues, unittest=True)) tear_queues(queues)
def test_function_allows_control_of_noise_packets_based_on_log_setting_queue( self): # Setup settings = Settings(log_file_masking=True, traffic_masking=True) master_key = MasterKey() queues = gen_queue_dict() noise_tuple = (nick_to_pub_key('Alice'), P_N_HEADER + bytes(PADDING_LENGTH), True, True, master_key) def queue_delayer(): """Place packets to log into queue after delay.""" for _ in range(5): queues[LOG_PACKET_QUEUE].put( noise_tuple ) # Not logged because logging_state is False by default time.sleep(0.02) queues[LOG_SETTING_QUEUE].put(True) for _ in range(2): queues[LOG_PACKET_QUEUE].put(noise_tuple) # Log two packets time.sleep(0.02) queues[LOG_SETTING_QUEUE].put(False) for _ in range(3): queues[LOG_PACKET_QUEUE].put( noise_tuple) # Not logged because logging_state is False time.sleep(0.02) queues[UNITTEST_QUEUE].put(EXIT) queues[LOG_SETTING_QUEUE].put(True) queues[LOG_PACKET_QUEUE].put(noise_tuple) # Log third packet # Test threading.Thread(target=queue_delayer).start() log_writer_loop(queues, settings, unittest=True) self.assertEqual( os.path.getsize( f'{DIR_USER_DATA}{settings.software_operation}_logs'), 3 * LOG_ENTRY_LENGTH) # Teardown tear_queues(queues)
def test_missing_tor_controller_raises_critical_error(self, *_): # Setup queues = gen_queue_dict() orig_tor_connect = Tor.connect Tor.connect = MagicMock(return_value=None) controller = stem.control.Controller controller.create_ephemeral_hidden_service = MagicMock() queues[ONION_KEY_QUEUE].put((bytes(ONION_SERVICE_PRIVATE_KEY_LENGTH), b'\x01')) # Test with self.assertRaises(SystemExit): onion_service(queues) # Teardown tear_queues(queues) Tor.connect = orig_tor_connect
def test_exit(self, *_): queues = gen_queue_dict() process_list = [Process(target=self.mock_process)] for p in process_list: p.start() def queue_delayer(): """Place EXIT packet into queue after delay.""" time.sleep(0.01) queues[EXIT_QUEUE].put(EXIT) threading.Thread(target=queue_delayer).start() with self.assertRaises(SystemExit): monitor_processes(process_list, RX, queues) tear_queues(queues)
def test_function_allows_control_of_noise_packets_based_on_log_setting_queue( self) -> None: # Setup settings = Settings(log_file_masking=True, traffic_masking=True) master_key = MasterKey() queues = gen_queue_dict() noise_tuple = (nick_to_pub_key('Alice'), P_N_HEADER + bytes(PADDING_LENGTH), True, True, master_key) def queue_delayer() -> None: """Place packets to log into queue after delay.""" for _ in range(5): queues[LOG_PACKET_QUEUE].put( noise_tuple ) # Not logged because logging_state is False by default time.sleep(SLEEP_DELAY) queues[LOG_SETTING_QUEUE].put(True) for _ in range(2): queues[LOG_PACKET_QUEUE].put(noise_tuple) # Log two packets time.sleep(SLEEP_DELAY) queues[LOG_SETTING_QUEUE].put(False) for _ in range(3): queues[LOG_PACKET_QUEUE].put( noise_tuple) # Not logged because logging_state is False time.sleep(SLEEP_DELAY) queues[UNIT_TEST_QUEUE].put(EXIT) queues[LOG_SETTING_QUEUE].put(True) queues[LOG_PACKET_QUEUE].put(noise_tuple) # Log third packet # Test threading.Thread(target=queue_delayer).start() self.assertIsNone( log_writer_loop(queues, settings, self.message_log, unit_test=True)) # Teardown tear_queues(queues)
def test_contact_request_manager(self): queues = gen_queue_dict() def queue_delayer(): """Place messages to queue one at a time.""" time.sleep(0.1) queues[F_REQ_MGMT_QUEUE].put( (RP_ADD_CONTACT_HEADER, b''.join(list(map(nick_to_pub_key, ['Alice', 'Bob']))))) time.sleep(0.1) # Test that request from Alice does not appear queues[CONTACT_REQ_QUEUE].put((nick_to_onion_address('Alice'))) time.sleep(0.1) # Test that request from Charlie appears queues[CONTACT_REQ_QUEUE].put((nick_to_onion_address('Charlie'))) time.sleep(0.1) # Test that another request from Charlie does not appear queues[CONTACT_REQ_QUEUE].put((nick_to_onion_address('Charlie'))) time.sleep(0.1) # Remove Alice queues[F_REQ_MGMT_QUEUE].put( (RP_REMOVE_CONTACT_HEADER, nick_to_pub_key('Alice'))) time.sleep(0.1) # Load settings from queue queues[C_REQ_MGR_QUEUE].put(False) queues[C_REQ_MGR_QUEUE].put(True) # Test that request from Alice is accepted queues[CONTACT_REQ_QUEUE].put((nick_to_onion_address('Alice'))) time.sleep(0.1) # Exit test queues[UNITTEST_QUEUE].put(EXIT) queues[CONTACT_REQ_QUEUE].put(nick_to_pub_key('Charlie')) threading.Thread(target=queue_delayer).start() self.assertIsNone(c_req_manager(queues, unittest=True)) tear_queues(queues)
def tearDown(self): tear_queues(self.queues)
def tearDown(self) -> None: """Post-test actions.""" with ignored(OSError): os.remove(f'v4dkh.psk - Give to hpcra') tear_queues(self.queues)
def tearDown(self) -> None: """Post-test actions.""" tear_queues(self.queues)
def tearDown(self) -> None: """Post-test actions.""" cleanup(self.unit_test_dir) tear_queues(self.queues)
def test_flask_server(self) -> None: # Setup queues = gen_queue_dict() url_token_private_key = X448.generate_private_key() url_token_public_key = X448.derive_public_key( url_token_private_key).hex() url_token = 'a450987345098723459870234509827340598273405983274234098723490285' url_token_old = 'a450987345098723459870234509827340598273405983274234098723490286' url_token_invalid = 'ääääääääääääääääääääääääääääääääääääääääääääääääääääääääääääääää' onion_pub_key = nick_to_pub_key('Alice') onion_address = nick_to_onion_address('Alice') packet1 = b"packet1" packet2 = b"packet2" packet3 = b"packet3" test_key = SYMMETRIC_KEY_LENGTH * b'a' sub_dir = hashlib.blake2b( onion_pub_key, key=test_key, digest_size=BLAKE2_DIGEST_LENGTH).hexdigest() buf_dir_m = f"{RELAY_BUFFER_OUTGOING_M_DIR}/{sub_dir}" buf_dir_f = f"{RELAY_BUFFER_OUTGOING_F_DIR}/{sub_dir}" ensure_dir(f"{buf_dir_m}/") ensure_dir(f"{buf_dir_f}/") packet_list = [packet1, packet2] for i, packet in enumerate(packet_list): TestFlaskServer.store_test_packet( packet, buf_dir_m, RELAY_BUFFER_OUTGOING_MESSAGE + f".{i}", test_key) TestFlaskServer.store_test_packet(packet3, buf_dir_f, RELAY_BUFFER_OUTGOING_FILE + '.0', test_key) def queue_delayer() -> None: """Place buffer key to queue after a delay.""" time.sleep(0.1) queues[RX_BUF_KEY_QUEUE].put(test_key) threading.Thread(target=queue_delayer).start() # Test app = flask_server(queues, url_token_public_key, unit_test=True) # Test valid URL token returns all queued messages queues[URL_TOKEN_QUEUE].put((onion_pub_key, url_token_old)) queues[URL_TOKEN_QUEUE].put((onion_pub_key, url_token)) with app.test_client() as c: # Test root domain returns public key of server. resp = c.get('/') self.assertEqual(resp.data, url_token_public_key.encode()) resp = c.get(f'/contact_request/{onion_address}') self.assertEqual(b'OK', resp.data) self.assertEqual(queues[CONTACT_REQ_QUEUE].qsize(), 1) # Test invalid URL token returns empty response resp = c.get(f'/{url_token_invalid}/messages/') self.assertEqual(b'', resp.data) resp = c.get(f'/{url_token_invalid}/files/') self.assertEqual(b'', resp.data) with app.test_client() as c: resp = c.get(f'/{url_token}/messages/') self.assertEqual(b'packet1\npacket2', resp.data) with app.test_client() as c: resp = c.get(f'/{url_token}/files/') self.assertEqual(b'packet3', resp.data) # Test valid URL token returns nothing as buffers are empty with app.test_client() as c: resp = c.get(f'/{url_token}/messages/') self.assertEqual(b'', resp.data) with app.test_client() as c: resp = c.get(f'/{url_token}/files/') self.assertEqual(b'', resp.data) # Teardown tear_queues(queues)
def tearDown(self): """Post-test actions.""" tear_queues(self.queues) cleanup(self.unit_test_dir)
def tearDown(self): with ignored(OSError): os.remove(f'v4dkh.psk - Give to hpcra') tear_queues(self.queues)
def tearDown(self): cleanup(self.unittest_dir) tear_queues(self.queues)
def tearDown(self): requests.session = self.o_session tear_queues(self.queues)