class TestExtraction(unittest.TestCase): def setUp(self): self.logfile = "test.log" log_content = "A,B\n1,0\n0,1\n1,0\n0,1\n0,1\n0,1\n0,1\n1,0\n1,0\n1,0\n" self.expected = ( "B,0.000747009,0.000920462,0.001327954,0.000904547,0.000768453," "0.000752226,0.000862102,0.000706491,0.000668237,0.000992733\n" "A,0.000758130,0.000696718,0.000980080,0.000988899,0.000875510," "0.000734843,0.000754852,0.000667378,0.000671230,0.000790935\n") # fix mock not supporting iterators self.mock_log = mock.mock_open(read_data=log_content) self.mock_log.return_value.__iter__ = lambda s: iter(s.readline, '') with mock.patch('__main__.__builtins__.open', self.mock_log): self.log = Log(self.logfile) self.log.read_log() def test_extraction(self): extract = Extract(self.log, join(dirname(abspath(__file__)), "capture.pcap"), "/tmp", "localhost", 4433) extract.parse() with mock.patch('__main__.__builtins__.open', mock.mock_open()) as mock_file: mock_file.return_value.write.side_effect = lambda s: self.assertTrue( s.strip() in self.expected.splitlines()) extract.write_csv('timing.csv')
def __init__(self, name, tests, out_dir, ip_address, port, interface, affinity=None): """ Check if tcpdump is present and setup instance parameters. :param str name: Test name :param list tests: List of test tuples (name, conversation) to be run :param str out_dir: Directory where results should be stored :param str ip_address: Server IP address :param int port: Server port :param str interface: Network interface to run tcpdump on :param str affinity: The processor IDs to use for affinity of the `tcpdump` process. See taskset man page for description of --cpu-list option. """ # first check tcpdump presence if not self.check_tcpdump(): raise Exception("Could not find tcpdump, aborting timing tests") self.tests = tests self.out_dir = out_dir self.out_dir = self.create_output_directory(name) self.ip_address = ip_address self.port = port self.interface = interface self.log = Log(os.path.join(self.out_dir, "log.csv")) self.affinity = affinity self.tcpdump_running = True
def main(): """Process arguments and start extraction.""" logfile = None capture = None output = None ip_address = None port = None argv = sys.argv[1:] opts, args = getopt.getopt(argv, "l:c:h:p:o:t:", ["help"]) for opt, arg in opts: if opt == '-l': logfile = arg elif opt == '-c': capture = arg elif opt == '-o': output = arg elif opt == '-h': ip_address = arg elif opt == '-p': port = int(arg) elif opt == "--help": help_msg() sys.exit(0) if not all([logfile, capture, output, ip_address, port]): raise ValueError("Some arguments are missing!") log = Log(logfile) log.read_log() analysis = Extract(log, capture, output, ip_address, port) analysis.parse() analysis.write_csv('timing.csv')
def main(): """Process arguments and start extraction.""" logfile = None capture = None output = None ip_address = None port = None raw_times = None argv = sys.argv[1:] if not argv: help_msg() sys.exit(1) opts, args = getopt.getopt(argv, "l:c:h:p:o:t:", ["help", "raw-times="]) for opt, arg in opts: if opt == '-l': logfile = arg elif opt == '-c': capture = arg elif opt == '-o': output = arg elif opt == '-h': ip_address = arg elif opt == '-p': port = int(arg) elif opt == "--raw-times": raw_times = arg elif opt == "--help": help_msg() sys.exit(0) if raw_times and capture: raise ValueError( "Can't specify both a capture file and external timing log") if not all([logfile, output]): raise ValueError( "Specifying logfile and output is mandatory") if capture and not all([logfile, output, ip_address, port]): raise ValueError("Some arguments are missing!") log = Log(logfile) log.read_log() analysis = Extract(log, capture, output, ip_address, port, raw_times) analysis.parse() analysis.write_csv('timing.csv')
def setUp(self): self.logfile = "test.log" log_content = "A,B\n1,0\n0,1\n1,0\n0,1\n0,1\n0,1\n0,1\n1,0\n1,0\n1,0\n" self.expected = ( "B,0.000747009,0.000920462,0.001327954,0.000904547,0.000768453," "0.000752226,0.000862102,0.000706491,0.000668237,0.000992733\n" "A,0.000758130,0.000696718,0.000980080,0.000988899,0.000875510," "0.000734843,0.000754852,0.000667378,0.000671230,0.000790935\n") # fix mock not supporting iterators self.mock_log = mock.mock_open(read_data=log_content) self.mock_log.return_value.__iter__ = lambda s: iter(s.readline, '') with mock.patch('__main__.__builtins__.open', self.mock_log): self.log = Log(self.logfile) self.log.read_log()
class TestExtraction(unittest.TestCase): def setUp(self): self.logfile = join(dirname(abspath(__file__)), "test.log") log_content = "A,B\n1,0\n0,1\n1,0\n0,1\n0,1\n0,1\n0,1\n1,0\n1,0\n1,0\n" self.expected = ("A,B\n" "0.000758130,0.000747009\n" "0.000696718,0.000920462\n" "0.000980080,0.001327954\n" "0.000988899,0.000904547\n" "0.000875510,0.000768453\n" "0.000734843,0.000752226\n" "0.000754852,0.000862102\n" "0.000667378,0.000706491\n" "0.000671230,0.000668237\n" "0.000790935,0.000992733\n") self.time_vals = "\n".join(["some random header"] + list(str(i) for i in range(20))) # fix mock not supporting iterators self.mock_log = mock.mock_open(read_data=log_content) self.mock_log.return_value.__iter__ = lambda s: iter(s.readline, '') with mock.patch('__main__.__builtins__.open', self.mock_log): self.log = Log(self.logfile) self.log.read_log() def test_extraction_from_external_time_source(self): extract = Extract(self.log, None, "/tmp", None, None, join(dirname(abspath(__file__)), "times-log.csv")) extract.parse() with mock.patch('__main__.__builtins__.open', mock.mock_open()) as mock_file: mock_file.return_value.write.side_effect = lambda s: self.assertIn( s.strip(), self.expected.splitlines()) extract.write_csv('timing.csv') def test_extraction(self): extract = Extract(self.log, join(dirname(abspath(__file__)), "capture.pcap"), "/tmp", "localhost", 4433) extract.parse() with mock.patch('__main__.__builtins__.open', mock.mock_open()) as mock_file: mock_file.return_value.write.side_effect = lambda s: self.assertIn( s.strip(), self.expected.splitlines()) extract.write_csv('timing.csv')
def setUp(self): self.logfile = join(dirname(abspath(__file__)), "test.log") log_content = "A,B\n1,0\n0,1\n1,0\n0,1\n0,1\n0,1\n0,1\n1,0\n1,0\n1,0\n" self.expected = ( "B,0.000747009,0.000920462,0.001327954,0.000904547,0.000768453," "0.000752226,0.000862102,0.000706491,0.000668237,0.000992733\n" "A,0.000758130,0.000696718,0.000980080,0.000988899,0.000875510," "0.000734843,0.000754852,0.000667378,0.000671230,0.000790935\n") self.time_vals = "\n".join(["some random header"] + list(str(i) for i in range(20))) # fix mock not supporting iterators self.mock_log = mock.mock_open(read_data=log_content) self.mock_log.return_value.__iter__ = lambda s: iter(s.readline, '') with mock.patch('__main__.__builtins__.open', self.mock_log): self.log = Log(self.logfile) self.log.read_log()
class TimingRunner: """Repeatedly runs tests and captures timing information.""" def __init__(self, name, tests, out_dir, ip_address, port, interface, affinity=None): """ Check if tcpdump is present and setup instance parameters. :param str name: Test name :param list tests: List of test tuples (name, conversation) to be run :param str out_dir: Directory where results should be stored :param str ip_address: Server IP address :param int port: Server port :param str interface: Network interface to run tcpdump on :param str affinity: The processor IDs to use for affinity of the `tcpdump` process. See taskset man page for description of --cpu-list option. """ # first check tcpdump presence if not self.check_tcpdump(): raise Exception("Could not find tcpdump, aborting timing tests") self.tests = tests self.out_dir = out_dir self.out_dir = self.create_output_directory(name) self.ip_address = ip_address self.port = port self.interface = interface self.log = Log(os.path.join(self.out_dir, "log.csv")) self.affinity = affinity self.tcpdump_running = True def generate_log(self, run_only, run_exclude, repetitions): """ Creates log with number of requested shuffled runs. :param set run_only: List of tests to be run exclusively :param set run_exclude: List of tests to exclude :param int repetitions: How many times to repeat each test """ # first filter out what is really going to be run actual_tests = [] test_dict = {} for c_name, c_test in self.tests: if run_only and c_name not in run_only or c_name in run_exclude: continue if not c_name.startswith("sanity"): actual_tests.append(c_name) # also convert internal test structure to dict for lookup test_dict[c_name] = c_test self.tests = test_dict self.log.start_log(actual_tests) # generate requested number of random order test runs for _ in range(repetitions): self.log.shuffle_new_run() self.log.write() def run(self): """ Run test the specified number of times and start analysis :return: int 0 for no difference, 1 for difference, 2 if unavailable """ sniffer = self.sniff() status = Thread(target=self.tcpdump_status, args=(sniffer, )) status.setDaemon(True) status.start() try: # run the conversations test_classes = self.log.get_classes() # prepend the conversations with few warm-up ones exp_len = WARM_UP + sum(1 for _ in self.log.iterate_log()) self.log.read_log() queries = chain(repeat(0, WARM_UP), self.log.iterate_log()) print("Starting timing info collection. " "This might take a while...") for executed, index in enumerate(queries): if executed % 20 == 0: print("Done: {0:6.2f}%".format(executed * 100.0 / exp_len), end="\r") if self.tcpdump_running: c_name = test_classes[index] c_test = self.tests[c_name] runner = Runner(c_test) res = True try: runner.run() except Exception: print("Error while processing") print(traceback.format_exc()) res = False if not res: raise AssertionError( "Test must pass in order to be timed") else: sys.exit(1) finally: # stop sniffing and give tcpdump time to write all buffered packets self.tcpdump_running = False time.sleep(2) sniffer.terminate() sniffer.wait() # start extraction and analysis print("Starting extraction...") if self.extract(): print("Starting analysis...") return self.analyse() return 2 def extract(self): """Starts the extraction if available.""" if self.check_extraction_availability(): from tlsfuzzer.extract import Extract self.log.read_log() extraction = Extract(self.log, os.path.join(self.out_dir, "capture.pcap"), self.out_dir, self.ip_address, self.port) extraction.parse() extraction.write_csv(os.path.join(self.out_dir, "timing.csv")) return True print("Extraction is not available. " "Install required packages to enable.") return False def analyse(self): """ Starts analysis if available :return: int 0 for no difference, 1 for difference, 2 unavailable """ if self.check_analysis_availability(): from tlsfuzzer.analysis import Analysis analysis = Analysis(self.out_dir) return analysis.generate_report() print("Analysis is not available. " "Install required packages to enable.") return 2 def sniff(self): """Start tcpdump with filter on communication to/from server""" # check privileges for tcpdump to work if os.geteuid() != 0: print('WARNING: Timing tests should run with root privileges,' 'as it improves accuracy and might be needed for tcpdump.') packet_filter = "host {0} and port {1} and tcp".format( self.ip_address, self.port) flags = [ '-i', self.interface, '-s', '0', '--time-stamp-precision', 'nano' ] output_file = os.path.join(self.out_dir, "capture.pcap") cmd = [] if self.affinity: cmd += ['taskset', '--cpu-list', self.affinity] cmd += ['tcpdump', packet_filter, '-w', output_file] + flags process = subprocess.Popen(cmd, stderr=subprocess.PIPE) # detect when tcpdump starts capturing self.tcpdump_running = False for row in iter(process.stderr.readline, b''): line = row.rstrip() if 'listening' in line.decode(): # tcpdump is ready print("tcpdump ready...") self.tcpdump_running = True break if not self.tcpdump_running: print('tcpdump could not be started.' ' Do you have the correct permissions?') sys.exit(1) return process @staticmethod def check_tcpdump(): """ Checks if tcpdump is installed. :return: boolean value indicating if tcpdump is present """ try: subprocess.check_call(['tcpdump', '--version'], stderr=subprocess.PIPE) except subprocess.CalledProcessError: return False return True def tcpdump_status(self, process): """ Checks if tcpdump is running. Intended to be run as a separate thread. :param Popen process: A process with running tcpdump attached """ _, stderr = process.communicate() if self.tcpdump_running: self.tcpdump_running = False print("tcpdump unexpectedly exited with return code {0}".format( process.returncode)) if stderr: print(stderr.decode()) @staticmethod def check_extraction_availability(): """ Checks if additional packages are installed so extraction can run. :return: bool Indicating if it is okay to run """ try: from tlsfuzzer.extract import Extract except ImportError: return False return True @staticmethod def check_analysis_availability(): """ Checks if additional packages are installed so analysis can run. :return: bool Indicating if it is okay to run """ try: from tlsfuzzer.analysis import Analysis except ImportError: return False return True def create_output_directory(self, name): """ Creates a new directory in the specified path to store results in. :param str name: Name of the test being run :return: str Path to newly created directory """ test_name = os.path.basename(name) out_dir = os.path.join(os.path.abspath(self.out_dir), "{0}_{1}".format(test_name, int(time.time()))) os.mkdir(out_dir) return out_dir
def setUp(self): self.logfile = "test.log" self.log = Log(self.logfile)
class TestLog(unittest.TestCase): def setUp(self): self.logfile = "test.log" self.log = Log(self.logfile) @staticmethod def _mock_open(*args, **kwargs): """Fix mock not supporting iterators in all Python versions.""" mock_open = mock.mock_open(*args, **kwargs) mock_open.return_value.__iter__ = lambda s: iter(s.readline, '') return mock_open def test_write_classes(self): with mock.patch('__main__.__builtins__.open', self._mock_open()) as mock_file: self.log.start_log(["A", "B", "C"]) self.log.write() mock_file.return_value.write.assert_called_once_with("A,B,C\r\n") mock_file.return_value.close.assert_called_once_with() def test_read_classes(self): with mock.patch('__main__.__builtins__.open', self._mock_open(read_data="A,B,C\r\n")): classes = self.log.get_classes() self.assertEqual(classes, ["A", "B", "C"]) def test_add_run(self): with mock.patch('__main__.__builtins__.open', self._mock_open()) as mock_file: classes = ["A", "B", "C"] self.log.start_log(classes) mock_file.return_value.write.assert_called_with("A,B,C\r\n") # add regular runs runs = [0, 2, 1, 2, 0, 1, 2, 1, 0] self.log.add_run(runs[0:3]) mock_file.return_value.write.assert_called_with("0,2,1\r\n") self.log.add_run(runs[3:6]) mock_file.return_value.write.assert_called_with("2,0,1\r\n") self.log.add_run(runs[6:9]) mock_file.return_value.write.assert_called_with("2,1,0\r\n") self.log.write() mock_file.return_value.close.assert_called_once() def test_read_run(self): runs = [0, 2, 1, 2, 0, 1, 2, 1, 0] i = 0 with mock.patch( '__main__.__builtins__.open', self._mock_open( read_data="A,B,C\r\n0,2,1\r\n2,0,1\r\n2,1,0\r\n")): for index in self.log.iterate_log(): self.assertEqual(index, runs[i]) i += 1 self.assertEqual(i, len(runs)) def test_shuffled_run(self): def check_indexes(class_count, line): indexes = line.strip().split(',') self.assertTrue(all(indexes) in range(0, class_count)) with mock.patch('__main__.__builtins__.open', self._mock_open()) as mock_file: classes = ["A", "B", "C"] self.log.start_log(classes) mock_file.return_value.write.side_effect = lambda s: check_indexes( len(classes), s) num = 3 for _ in range(num): self.log.shuffle_new_run() self.assertEqual(mock_file.return_value.write.call_count, 4)