def test_log_file(self): """When swift-repo is set, log is tied to Git branch and revision.""" self.assertIsNone( BenchmarkDriver(Stub(output_dir=None, tests='/bin/'), tests=['ignored']).log_file) now = time.strftime('%Y%m%d%H%M%S', time.localtime()) driver = BenchmarkDriver(Stub( output_dir='/path', tests='/bin/', optimization='Suffix', swift_repo=None, ), tests=['ignored']) self.assertEquals(driver.log_file, '/path/Benchmark_Suffix-' + now + '.log') r = '/repo/' subprocess_mock = SubprocessMock(responses=[ ('git -C {0} rev-parse --abbrev-ref HEAD'.format(r).split(' '), 'branch\n'), ('git -C {0} rev-parse --short HEAD'.format(r).split(' '), 'short_hash\n'), ]) driver = BenchmarkDriver(Stub(output_dir='/log/', tests='', optimization='S', swift_repo=r), tests=['ignored'], _subprocess=subprocess_mock) self.assertEquals(driver.log_file, '/log/branch/Benchmark_S-' + now + '-short_hash.log') subprocess_mock.assert_called_all_expected()
def test_log_results(self): """Create log directory if it doesn't exist and write the log file.""" def assert_log_written(out, log_file, content): self.assertEquals(out.getvalue(), 'Logging results to: ' + log_file + '\n') with open(log_file, 'rU') as f: text = f.read() self.assertEquals(text, "formatted output") try: import tempfile # setUp temp_dir = tempfile.mkdtemp() log_dir = os.path.join(temp_dir, 'sub-dir/') driver = BenchmarkDriver(Stub(), tests=['']) self.assertFalse(os.path.exists(log_dir)) content = "formatted output" log_file = os.path.join(log_dir, '1.log') with captured_output() as (out, _): driver.log_results(content, log_file=log_file) assert_log_written(out, log_file, content) self.assertTrue(os.path.exists(log_dir)) log_file = os.path.join(log_dir, '2.log') with captured_output() as (out, _): driver.log_results(content, log_file=log_file) assert_log_written(out, log_file, content) finally: import shutil # tearDown shutil.rmtree(temp_dir)
def test_run_and_log(self): def mock_run(test): self.assertEquals(test, 'b1') return PerformanceTestResult('3,b1,5,101,1,1,1,1,888'.split(','), quantiles=True, delta=True, memory=True) driver = BenchmarkDriver(tests=['b1'], args=Stub(output_dir=None)) driver.run_independent_samples = mock_run # patching with captured_output() as (out, _): log = driver.run_and_log() header = '#,TEST,SAMPLES,MIN(μs),Q1(μs),MEDIAN(μs),Q3(μs),MAX(μs),' +\ 'MAX_RSS(B)\n' csv_log = '3,b1,5,101,102,103,104,105,888\n' self.assertEquals(log, None) self.assertEquals( out.getvalue(), header + csv_log + '\n' + 'Total performance tests executed: 1\n') with captured_output() as (out, _): log = driver.run_and_log(csv_console=False) self.assertEquals(log, header + csv_log) self.assertEquals( out.getvalue(), ' # TEST SAMPLES MIN(μs) Q1(μs)' + ' MEDIAN(μs) Q3(μs) MAX(μs) MAX_RSS(B)\n' + ' 3 b1 5 101 102' + ' 103 104 105 888\n' + '\n' + 'Total performance tests executed: 1\n')
def test_run_and_log(self): def mock_run(test): self.assertEquals(test, 'b1') return PerformanceTestResult( '3,b1,1,123,123,123,0,123,888'.split(',')) driver = BenchmarkDriver(tests=['b1'], args=Stub(output_dir=None)) driver.run_independent_samples = mock_run # patching with captured_output() as (out, _): log = driver.run_and_log() csv_log = '3,b1,1,123,123,123,0,123,888\n' self.assertEquals(log, None) self.assertEquals( out.getvalue(), '#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),' + 'MAX_RSS(B)\n' + csv_log + '\n' + 'Total performance tests executed: 1\n') with captured_output() as (out, _): log = driver.run_and_log(csv_console=False) self.assertEquals(log, csv_log) self.assertEquals( out.getvalue(), ' # TEST SAMPLES MIN(μs) MAX(μs)' + ' MEAN(μs) SD(μs) MEDIAN(μs) MAX_RSS(B)\n' + ' 3 b1 1 123 123' + ' 123 0 123 888\n' + '\n' + 'Total performance tests executed: 1\n')
def test_run_bechmarks(self): def mock_run(test): self.assertEquals(test, 'b1') return PerformanceTestResult( '3,b1,1,123,123,123,0,123,888'.split(',')) driver = Stub(tests=['b1']) driver.run_independent_samples = mock_run run_benchmarks = Benchmark_Driver.run_benchmarks with captured_output() as (out, _): run_benchmarks(driver) self.assertEquals('\n'.join(""" #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),MAX_RSS(B) 3,b1,1,123,123,123,0,123,888 Totals,1 """.splitlines()[1:]), out.getvalue()) # removes 1st \n from multiline string
def test_log_file(self): """When swift-repo is set, log is tied to Git branch and revision.""" self.assertIsNone( BenchmarkDriver( Stub(output_dir=None, tests="/bin/"), tests=["ignored"] ).log_file ) now = time.strftime("%Y%m%d%H%M%S", time.localtime()) driver = BenchmarkDriver( Stub( output_dir="/path", tests="/bin/", optimization="Suffix", swift_repo=None, ), tests=["ignored"], _subprocess=self.subprocess_mock ) self.assertEqual(driver.log_file, "/path/Benchmark_Suffix-" + now + ".log") r = "/repo/" subprocess_mock = SubprocessMock( responses=[ ( "git -C {0} rev-parse --abbrev-ref HEAD".format(r).split(" "), "branch\n", ), ( "git -C {0} rev-parse --short HEAD".format(r).split(" "), "short_hash\n", ), ] ) driver = BenchmarkDriver( Stub(output_dir="/log/", tests="", optimization="S", swift_repo=r), tests=["ignored"], _subprocess=subprocess_mock, ) self.assertEqual( driver.log_file, "/log/branch/Benchmark_S-" + now + "-short_hash.log" ) subprocess_mock.assert_called_all_expected()
def test_run_and_log(self): def mock_run(test): self.assertEqual(test, "b1") return PerformanceTestResult( "3,b1,5,101,1,1,1,1,888".split(","), quantiles=True, delta=True, memory=True, ) driver = BenchmarkDriver(tests=["b1"], args=Stub(output_dir=None)) driver.run_independent_samples = mock_run # patching with captured_output() as (out, _): log = driver.run_and_log() header = ( "#,TEST,SAMPLES,MIN(μs),Q1(μs),MEDIAN(μs),Q3(μs),MAX(μs)," + "MAX_RSS(B)\n" ) csv_log = "3,b1,5,101,102,103,104,105,888\n" self.assertEqual(log, None) self.assertEqual( out.getvalue(), header + csv_log + "\n" + "Total performance tests executed: 1\n", ) with captured_output() as (out, _): log = driver.run_and_log(csv_console=False) self.assertEqual(log, header + csv_log) self.assertEqual( out.getvalue(), " # TEST SAMPLES MIN(μs)" + " Q1(μs) MEDIAN(μs) Q3(μs) MAX(μs) MAX_RSS(B)\n" + " 3 b1 5 101" + " 102 103 104 105 888\n" + "\n" + "Total performance tests executed: 1\n", )
def test_log_results(self): """Create log directory if it doesn't exist and write the log file.""" def assert_log_written(out, log_file, content): self.assertEqual(out.getvalue(), "Logging results to: " + log_file + "\n") if sys.version_info < (3, 0): openmode = "rU" else: openmode = "r" # 'U' mode is deprecated in Python 3 with open(log_file, openmode) as f: text = f.read() self.assertEqual(text, "formatted output") try: import tempfile # setUp temp_dir = tempfile.mkdtemp() log_dir = os.path.join(temp_dir, "sub-dir/") driver = BenchmarkDriver(Stub(), tests=[""]) self.assertFalse(os.path.exists(log_dir)) content = "formatted output" log_file = os.path.join(log_dir, "1.log") with captured_output() as (out, _): driver.log_results(content, log_file=log_file) assert_log_written(out, log_file, content) self.assertTrue(os.path.exists(log_dir)) log_file = os.path.join(log_dir, "2.log") with captured_output() as (out, _): driver.log_results(content, log_file=log_file) assert_log_written(out, log_file, content) finally: import shutil # tearDown shutil.rmtree(temp_dir)
def test_supports_verbose_output(self): driver = BenchmarkDriverMock(tests=['B1', 'B2']) driver.verbose = True with captured_output() as (out, _): BenchmarkDoctor(Stub(verbose=True), driver) self.assert_contains(['Checking tests: B1, B2'], out.getvalue())
def setUp(self): super(TestBenchmarkDoctor, self).setUp() self.args = Stub(verbose=False) self._doctor_log_handler.reset() self.logs = self._doctor_log_handler.messages
def _PTR(min=700, mem_pages=1000, setup=None): """Create PerformanceTestResult Stub.""" return Stub(samples=Stub(min=min), mem_pages=mem_pages, setup=setup)
def _PTR(min=700, mem_pages=1000): """Create PerformanceTestResult Stub.""" return Stub(min=min, mem_pages=mem_pages)