Exemple #1
0
 def test_parse_output_with_subtests(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'some test: [1, 2, 3, 4, 5]',
         'other test = else: [6, 7, 8, 9, 10]',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, '')
Exemple #2
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'some-unrecognizable-line',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
Exemple #3
0
    def test_parse_output_with_failing_line(self):
        output = DriverOutput("""
Running 20 times
Ignoring warm-up run (1115)

some-unrecognizable-line

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            test._filter_output(output)
            self.assertEqual(test.parse_output(output), None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
    def test_parse_output_with_failing_line(self):
        output = DriverOutput("""
Running 20 times
Ignoring warm-up run (1115)

some-unrecognizable-line

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""",
                              image=None,
                              image_hash=None,
                              audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test',
                            '/path/some-dir/some-test')
            test.run_single = lambda driver, path, time_out_ms: output
            self.assertFalse(test._run_with_driver(None, None))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
            )
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
 def test_parse_output(self):
     output = DriverOutput('\n'.join([
         'Running 20 times', 'Ignoring warm-up run (1115)', '', 'avg 1100',
         'median 1101', 'stdev 11', 'min 1080', 'max 1120'
     ]),
                           image=None,
                           image_hash=None,
                           audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest('some-test', '/path/some-dir/some-test')
         self.assertEqual(
             test.parse_output(output), {
                 'some-test': {
                     'avg': 1100.0,
                     'median': 1101.0,
                     'min': 1080.0,
                     'max': 1120.0,
                     'stdev': 11.0,
                     'unit': 'ms'
                 }
             })
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
         )
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(
         actual_logs,
         'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n'
     )
Exemple #6
0
 def test_parse_output(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
    def test_parse_output_with_description(self):
        output = DriverOutput("""
Description: this is a test description.

:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        self._assert_results_are_correct(test, output)
        self.assertEqual(test.description(), 'this is a test description.')
    def test_parse_output_with_subtests(self):
        output = DriverOutput("""
Description: this is a test description.
some test:Time -> [1, 2, 3, 4, 5] ms
some other test = else:Time -> [6, 7, 8, 9, 10] ms
some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes
Array Construction, []:Time -> [11, 12, 13, 14, 15] ms
Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms
jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms
Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms
Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms
Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms
Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms

:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
            test.run_single = lambda driver, path, time_out_ms: output
            self.assertTrue(test.run(10))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        subtests = test._metrics
        self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else',
            'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child',
            'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None])

        some_test_metrics = subtests[0]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test'])
        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)

        some_other_test_metrics = subtests[1]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc'])
        self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else'])
        self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4)
        self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else'])
        self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4)

        main_metrics = subtests[len(subtests) - 1]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
        self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
        self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4)

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, """DESCRIPTION: this is a test description.
RESULT some-dir: some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
 def _assert_failed_on_line(self, output_text, expected_log):
     output = DriverOutput(output_text, image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test.run_single = lambda driver, path, time_out_ms: output
         self.assertFalse(test._run_with_driver(None, None))
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, expected_log)
Exemple #10
0
 def test_parse_output_with_description(self):
     output = DriverOutput('\n'.join([
         'Description: this is a test description.',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
     self.assertTrue(test.parse_output(output))
     self.assertEqual(test.description(), 'this is a test description.')
Exemple #11
0
    def test_ignored_stderr_lines(self):
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
Unknown option: --foo-bar
Should not be ignored
[WARNING:proxy_service.cc] bad moon a-rising
[WARNING:chrome.cc] Something went wrong
[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
[ERROR:main.cc] The sky has fallen""")
        test._filter_output(output_with_lines_to_ignore)
        self.assertEqual(output_with_lines_to_ignore.error,
                         "Should not be ignored\n"
                         "[WARNING:chrome.cc] Something went wrong\n"
                         "[ERROR:main.cc] The sky has fallen")
Exemple #12
0
    def _assert_failed_on_line(self, output_text, expected_log):
        output = DriverOutput(output_text,
                              image=None,
                              image_hash=None,
                              audio=None)
        with OutputCapture(level=logging.INFO) as captured:
            test = PerfTest(MockPort(), 'some-test',
                            '/path/some-dir/some-test')
            test.run_single = lambda driver, path, time_out_ms: output
            self.assertFalse(test._run_with_driver(None, None))

        self.assertEqual(captured.stdout.getvalue(), '')
        self.assertEqual(captured.stderr.getvalue(), '')
        self.assertEqual(captured.root.log.getvalue(), expected_log)
    def test_ignored_stderr_lines(self):
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
Unknown option: --foo-bar
Should not be ignored
[WARNING:proxy_service.cc] bad moon a-rising
[WARNING:chrome.cc] Something went wrong
[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
[ERROR:main.cc] The sky has fallen""")
        test._filter_output(output_with_lines_to_ignore)
        self.assertEqual(output_with_lines_to_ignore.error,
                         "Should not be ignored\n"
                         "[WARNING:chrome.cc] Something went wrong\n"
                         "[ERROR:main.cc] The sky has fallen")
    def test_parse_output_with_subtests_and_total(self):
        output = DriverOutput("""
:Time:Total -> [2324, 2328, 2345, 2314, 2312] ms
EmberJS-TodoMVC:Time:Total -> [1462, 1473, 1490, 1465, 1458] ms
EmberJS-TodoMVC/a:Time -> [1, 2, 3, 4, 5] ms
BackboneJS-TodoMVC:Time -> [862, 855, 855, 849, 854] ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
            test.run_single = lambda driver, path, time_out_ms: output
            self.assertTrue(test.run(10))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        subtests = test._metrics
        self.assertEqual(map(lambda test: test['name'], subtests), [None, 'EmberJS-TodoMVC', 'EmberJS-TodoMVC/a', 'BackboneJS-TodoMVC'])

        main_metrics = subtests[0]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
        self.assertEqual(main_metrics[0].aggregator(), 'Total')
        self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
        self.assertEqual(main_metrics[0].flattened_iteration_values(), [2324, 2328, 2345, 2314, 2312] * 4)

        some_test_metrics = subtests[1]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
        self.assertEqual(some_test_metrics[0].aggregator(), 'Total')
        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC'])
        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1462, 1473, 1490, 1465, 1458] * 4)

        some_test_metrics = subtests[2]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
        self.assertEqual(some_test_metrics[0].aggregator(), None)
        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC', 'a'])
        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)

        some_test_metrics = subtests[3]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
        self.assertEqual(some_test_metrics[0].aggregator(), None)
        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'BackboneJS-TodoMVC'])
        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [862, 855, 855, 849, 854] * 4)

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, """RESULT some-dir: some-test: Time= 2324.6 ms
median= 2324.0 ms, stdev= 12.1326007105 ms, min= 2312.0 ms, max= 2345.0 ms
""")
Exemple #15
0
    def test_parse_output_with_ignored_stderr(self):
        output = DriverOutput(":Time -> [1080, 1120, 1095, 1101, 1104] ms",
                              image=None,
                              image_hash=None,
                              audio=None,
                              error="""
Jan 22 14:09:24  WebKitTestRunner[1296] <Error>: CGContextSetFillColorWithColor: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
Jan 22 14:09:24  WebKitTestRunner[1296] <Error>: CGContextSetStrokeColorWithColor: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
Jan 22 14:09:24  WebKitTestRunner[1296] <Error>: CGContextGetCompositeOperation: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
Jan 22 14:09:24  WebKitTestRunner[1296] <Error>: CGContextSetCompositeOperation: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
Jan 22 14:09:24  WebKitTestRunner[1296] <Error>: CGContextFillRects: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
""")

        class MockPortWithSierraName(MockPort):
            def name(self):
                return "mac-sierra"

        with OutputCapture(level=logging.INFO) as captured:
            test = PerfTest(MockPortWithSierraName(), 'some-test',
                            '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)

        self.assertEqual(captured.stdout.getvalue(), '')
        self.assertEqual(captured.stderr.getvalue(), '')
        self.assertEqual(
            captured.root.log.getvalue(), """RESULT some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
Exemple #16
0
 def test_parse_output(self):
     printer = MockPrinter()
     buildbot_output = StringIO.StringIO()
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'avg 1100',
         'median 1101',
         'stdev 11',
         'min 1080',
         'max 1120']), image=None, image_hash=None, audio=None)
     test = PerfTest('some-test', 'some-dir/some-test', '/path/some-dir/some-test')
     self.assertEqual(test.parse_output(output, printer, buildbot_output),
         {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
     self.assertEqual(printer.written_lines, [])
    def test_parse_output_with_subtests(self):
        output = DriverOutput("""
Running 20 times
some test: [1, 2, 3, 4, 5]
other test = else: [6, 7, 8, 9, 10]
Ignoring warm-up run (1115)

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""",
                              image=None,
                              image_hash=None,
                              audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test',
                            '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
            )
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')
Exemple #18
0
 def run_test(self, test_name):
     runner, port = self.create_runner()
     tests = [
         PerfTest(port, test_name,
                  runner._host.filesystem.join('some-dir', test_name))
     ]
     return runner._run_tests_set(tests) == 0
Exemple #19
0
    def test_ignored_stderr_lines(self):
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        ignored_lines = [
            "Unknown option: --foo-bar",
            "[WARNING:proxy_service.cc] bad moon a-rising",
            "[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/",
        ]
        for line in ignored_lines:
            self.assertTrue(test._should_ignore_line_in_stderr(line))

        non_ignored_lines = [
            "Should not be ignored",
            "[WARNING:chrome.cc] Something went wrong",
            "[ERROR:main.cc] The sky has fallen",
        ]
        for line in non_ignored_lines:
            self.assertFalse(test._should_ignore_line_in_stderr(line))
Exemple #20
0
    def test_ignored_stderr_lines(self):
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        ignored_lines = [
            "Unknown option: --foo-bar",
            "[WARNING:proxy_service.cc] bad moon a-rising",
            "[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/",
        ]
        for line in ignored_lines:
            self.assertTrue(test._should_ignore_line_in_stderr(line))

        non_ignored_lines = [
            "Should not be ignored",
            "[WARNING:chrome.cc] Something went wrong",
            "[ERROR:main.cc] The sky has fallen",
        ]
        for line in non_ignored_lines:
            self.assertFalse(test._should_ignore_line_in_stderr(line))
Exemple #21
0
    def test_parse_output_with_description(self):
        output = DriverOutput("""
Description: this is a test description.

Running 20 times
Ignoring warm-up run (1115)

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms""", image=None, image_hash=None, audio=None)
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        self._assert_results_are_correct(test, output)
        self.assertEqual(test.description(), 'this is a test description.')
Exemple #22
0
 def test_parse_output_with_failing_line(self):
     printer = MockPrinter()
     buildbot_output = StringIO.StringIO()
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'some-unrecognizable-line',
         '',
         'avg 1100',
         'median 1101',
         'stdev 11',
         'min 1080',
         'max 1120']), image=None, image_hash=None, audio=None)
     test = PerfTest('some-test', 'some-dir/some-test', '/path/some-dir/some-test')
     self.assertEqual(test.parse_output(output, printer, buildbot_output), None)
     self.assertEqual(printer.written_lines, ['some-unrecognizable-line'])
Exemple #23
0
    def test_parse_output_with_description(self):
        output = DriverOutput("""
Description: this is a test description.

Running 20 times
Ignoring warm-up run (1115)

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms""", image=None, image_hash=None, audio=None)
        test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
        self._assert_results_are_correct(test, output)
        self.assertEqual(test.description(), 'this is a test description.')
Exemple #24
0
 def _tests_for_runner(self, runner, test_names):
     filesystem = runner._host.filesystem
     tests = []
     for test in test_names:
         path = filesystem.join(runner._base_path, test)
         dirname = filesystem.dirname(path)
         tests.append(PerfTest(runner._port, test, path))
     return tests
Exemple #25
0
    def test_compute_statistics(self):
        def compute_statistics(values):
            statistics = PerfTest.compute_statistics(map(lambda x: float(x), values))
            return json.loads(json.dumps(statistics))

        statistics = compute_statistics([10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11])
        self.assertEqual(sorted(statistics.keys()), ['avg', 'max', 'median', 'min', 'stdev'])
        self.assertEqual(statistics['avg'], 10.5)
        self.assertEqual(statistics['min'], 1)
        self.assertEqual(statistics['max'], 20)
        self.assertEqual(statistics['median'], 10.5)
        self.assertEqual(compute_statistics([8, 9, 10, 11, 12])['avg'], 10)
        self.assertEqual(compute_statistics([8, 9, 10, 11, 12] * 4)['avg'], 10)
        self.assertEqual(compute_statistics([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])['avg'], 10)
        self.assertEqual(PerfTest.compute_statistics([1, 5, 2, 8, 7])['median'], 5)
        self.assertEqual(PerfTest.compute_statistics([1, 6, 2, 8, 7, 2])['median'], 4)
        self.assertAlmostEqual(statistics['stdev'], math.sqrt(35))
        self.assertAlmostEqual(compute_statistics([1, 2, 3, 4, 5, 6])['stdev'], math.sqrt(3.5))
        self.assertAlmostEqual(compute_statistics([4, 2, 5, 8, 6])['stdev'], math.sqrt(5))
Exemple #26
0
 def _tests_for_runner(self, runner, test_names):
     filesystem = runner._host.filesystem
     tests = []
     for test in test_names:
         path = filesystem.join(runner._base_path, test)
         if test.startswith('inspector/'):
             tests.append(ChromiumStylePerfTest(runner._port, test, path))
         else:
             tests.append(PerfTest(runner._port, test, path))
     return tests
Exemple #27
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times', 'Ignoring warm-up run (1115)', '',
         'some-unrecognizable-line', '', 'avg 1100', 'median 1101',
         'stdev 11', 'min 1080', 'max 1120'
     ]),
                           image=None,
                           image_hash=None,
                           audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest('some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
         )
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
Exemple #28
0
 def test_parse_output(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'avg 1100',
         'median 1101',
         'stdev 11',
         'min 1080',
         'max 1120']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest('some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
Exemple #29
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'some-unrecognizable-line',
         '',
         'avg 1100',
         'median 1101',
         'stdev 11',
         'min 1080',
         'max 1120']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest('some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
Exemple #30
0
 def test_parse_output_with_subtests(self):
     output = DriverOutput('\n'.join([
         'Running 20 times', 'some test: [1, 2, 3, 4, 5]',
         'other test = else: [6, 7, 8, 9, 10]', '', 'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms', 'median 1101 ms', 'stdev 11 ms', 'min 1080 ms',
         'max 1120 ms'
     ]),
                           image=None,
                           image_hash=None,
                           audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test',
                         '/path/some-dir/some-test')
         self.assertEqual(
             test.parse_output(output), {
                 'some-test': {
                     'avg': 1100.0,
                     'median': 1101.0,
                     'min': 1080.0,
                     'max': 1120.0,
                     'stdev': 11.0,
                     'unit': 'ms',
                     'values': [i for i in range(1, 20)]
                 }
             })
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
         )
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(
         actual_logs,
         'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n'
     )
Exemple #31
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times', 'Ignoring warm-up run (1115)', '',
         'some-unrecognizable-line', '', 'Time:'
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms', 'median 1101 ms', 'stdev 11 ms', 'min 1080 ms',
         'max 1120 ms'
     ]),
                           image=None,
                           image_hash=None,
                           audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test',
                         '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
         )
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
    def test_parse_output(self):
        output = DriverOutput("""
:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, """RESULT some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
Exemple #33
0
    def test_parse_output(self):
        output = DriverOutput("""
:Time -> [1080, 1120, 1095, 1101, 1104] ms
""",
                              image=None,
                              image_hash=None,
                              audio=None)
        with OutputCapture(level=logging.INFO) as captured:
            test = PerfTest(MockPort(), 'some-test',
                            '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)

        self.assertEqual(captured.stdout.getvalue(), '')
        self.assertEqual(captured.stderr.getvalue(), '')
        self.assertEqual(
            captured.root.log.getvalue(), """RESULT some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
Exemple #34
0
 def compute_statistics(values):
     statistics = PerfTest.compute_statistics(map(lambda x: float(x), values))
     return json.loads(json.dumps(statistics))