def main(): test_cases, _ = gtest_fake_base.parse_args(TESTS, 0) for test_case in test_cases: print gtest_fake_base.get_test_output(test_case, False) print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return 1
def main(): parser = optparse.OptionParser() parser.add_option('--gtest_list_tests', action='store_true') parser.add_option('--gtest_filter') options, args = parser.parse_args() if args: parser.error('Failed to process args %s' % args) if options.gtest_list_tests: for fixture, cases in TESTS.iteritems(): print '%s.' % fixture for case in cases: print ' ' + case print ' YOU HAVE 2 tests with ignored failures (FAILS prefix)' print '' return 0 if options.gtest_filter: # Simulate running one test. print 'Note: Google Test filter = %s\n' % options.gtest_filter print gtest_fake_base.get_test_output(options.gtest_filter) print gtest_fake_base.get_footer(1, 1) return 0 for fixture, cases in TESTS.iteritems(): for case in cases: print gtest_fake_base.get_test_output('%s.%s' % (fixture, case)) print gtest_fake_base.get_footer(TOTAL, TOTAL) return 0
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) duration = int(args[0]) for i in test_cases: time.sleep(float(duration) / 1000.) print gtest_fake_base.get_test_output(i, False, duration=str(duration)) print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return 0
def main(): parser = optparse.OptionParser() parser.add_option('--gtest_list_tests', action='store_true') parser.add_option('--gtest_filter') parser.add_option('--gtest_output') # Ignored, exists to fake browser_tests. parser.add_option('--gtest_print_time') # Ignored, exists to fake browser_tests. parser.add_option('--lib') parser.add_option('--trim-xml', action='store_true') options, args = parser.parse_args() if args: parser.error('Failed to process args %s' % args) if options.trim_xml: trim_xml() return 0 if options.gtest_list_tests: for fixture, cases in TESTS.iteritems(): print '%s.' % fixture for case in cases: print ' ' + case # Print junk. print ' YOU HAVE 2 tests with ignored failures (FAILS prefix)' print '' return 0 if options.gtest_filter: # Simulate running one test. print 'Note: Google Test filter = %s\n' % options.gtest_filter assert 'PRE_' not in options.gtest_filter # To mick better the actual output, it should output the PRE_ tests but it's # not important for this test. print gtest_fake_base.get_test_output(options.gtest_filter) print gtest_fake_base.get_footer(1, 1) case = options.gtest_filter.split('.', 1)[1] filename = case + '.xml' out = os.path.join(options.gtest_output, filename) if case != 'Javascript': shutil.copyfile(os.path.join(ROOT_DIR, filename), out) return # Fails on first run, succeeds on the second. filename = case + '_fail.xml' out = os.path.join(options.gtest_output, filename) if not os.path.exists(out): shutil.copyfile(os.path.join(ROOT_DIR, filename), out) return 1 else: filename = case + '.xml' out = os.path.join(options.gtest_output, filename) shutil.copyfile(os.path.join(ROOT_DIR, filename), out) return 0 return 2
def main(): test_cases, _ = gtest_fake_base.parse_args(TESTS, 0) result = 0 for test_case in test_cases: # Make Baz.Fail fail. should_fail = test_case == 'Baz.Fail' result = result or int(should_fail) print gtest_fake_base.get_test_output(test_case, should_fail) print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return result
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) temp_dir = args[0] result = 0 for test_case in test_cases: filename = os.path.join(temp_dir, test_case) # Fails on first run, succeeds on the second. should_fail = not os.path.isfile(filename) result = result or int(should_fail) print gtest_fake_base.get_test_output(test_case, should_fail) if should_fail: with open(filename, "wb") as f: f.write("bang") print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return result
def test_simple_fail(self): out, err, return_code = RunTest("gtest_fake_fail.py", ["--result", self.filename]) self.assertEqual(1, return_code) expected_out_re = ( [ r"\[\d/\d\] \d\.\d\ds .+", r"\[\d/\d\] \d\.\d\ds .+", r"\[\d/\d\] \d\.\d\ds .+", r"\[\d/\d\] \d\.\d\ds .+", r"\[\d/\d\] \d\.\d\ds .+", r"\[\d/\d\] \d\.\d\ds .+", re.escape("Note: Google Test filter = Baz.Fail"), r"", ] + [re.escape(l) for l in gtest_fake_base.get_test_output("Baz.Fail").splitlines()] + [""] + [re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines()] + [ "", re.escape("Summary:"), re.escape("Baz.Fail failed"), re.escape("Success: 3 75.00%"), re.escape("Flaky: 0 0.00%"), re.escape("Fail: 1 25.00%"), r"\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s", ] ) self._check_results(expected_out_re, out, err) expected_result_file_entries = [("Foo.Bar1", 1), ("Foo.Bar2", 1), ("Foo.Bar3", 1), ("Baz.Fail", 3)] self._check_results_file(expected_result_file_entries)
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) temp_dir = args[0] result = 0 for test_case in test_cases: filename = os.path.join(temp_dir, test_case) # Fails on first run, succeeds on the second. should_fail = not os.path.isfile(filename) result = result or int(should_fail) print gtest_fake_base.get_test_output(test_case, should_fail) if should_fail: with open(filename, 'wb') as f: f.write('bang') print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return result
def test_simple_fail(self): out, err, return_code = RunTest('gtest_fake_fail.py') self.assertEquals(1, return_code) expected_out_re = [ r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', re.escape('Note: Google Test filter = Baz.Fail'), r'', ] + [ re.escape(l) for l in gtest_fake_base.get_test_output('Baz.Fail').splitlines() ] + [ '', ] + [ re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines() ] + [ '', re.escape('Success: 3 75.00%'), re.escape('Flaky: 0 0.00%'), re.escape('Fail: 1 25.00%'), r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s', ] self._check_results(expected_out_re, out, err)
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) temp_dir = args[0] result = 0 for test_case in test_cases: filename = os.path.join(temp_dir, test_case) # Fails on first run, succeeds on the second. should_fail = not os.path.isfile(filename) # But it still prints it succeeded. print gtest_fake_base.get_test_output(test_case, False) result = result or int(should_fail) if should_fail: with open(filename, 'wb') as f: f.write('bang') print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) if result: print('OMG I crashed') print('Here\'s a stack trace') return result
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) temp_dir = args[0] result = 0 for test_case in test_cases: if test_case == "Foo.Bar2": # Never run it, don't fail either. continue should_fail = False if test_case == "Foo.Bar1": filename = os.path.join(temp_dir, test_case) should_fail = not os.path.isfile(filename) result = result or int(should_fail) print gtest_fake_base.get_test_output(test_case, should_fail) if should_fail: with open(filename, "wb") as f: f.write("bang") print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return result
def main(): test_cases, args = gtest_fake_base.parse_args(TESTS, 1) temp_dir = args[0] result = 0 for test_case in test_cases: if test_case == 'Foo.Bar2': # Never run it, don't fail either. continue should_fail = False if test_case == 'Foo.Bar1': filename = os.path.join(temp_dir, test_case) should_fail = not os.path.isfile(filename) result = result or int(should_fail) print gtest_fake_base.get_test_output(test_case, should_fail) if should_fail: with open(filename, 'wb') as f: f.write('bang') print gtest_fake_base.get_footer(len(test_cases), len(test_cases)) return result
def main(): parser = optparse.OptionParser() parser.add_option('--gtest_list_tests', action='store_true') parser.add_option('--gtest_filter') options, args = parser.parse_args() if len(args) != 1: parser.error('Need to pass a temporary directory path') temp_dir = args[0] if options.gtest_list_tests: for fixture, cases in TESTS.iteritems(): print '%s.' % fixture for case in cases: print ' ' + case print ' YOU HAVE 2 tests with ignored failures (FAILS prefix)' print '' return 0 if options.gtest_filter: # Simulate running one test. print 'Note: Google Test filter = %s\n' % options.gtest_filter print gtest_fake_base.get_test_output(options.gtest_filter) print gtest_fake_base.get_footer(1, 1) filename = os.path.join(temp_dir, options.gtest_filter) # Fails on first run, succeeds on the second. if not os.path.isfile(filename): with open(filename, 'w') as f: f.write('bang') return 1 return 0 for fixture, cases in TESTS.iteritems(): for case in cases: print gtest_fake_base.get_test_output('%s.%s' % (fixture, case)) print gtest_fake_base.get_footer(TOTAL, TOTAL) return 1
def test_simple_fail(self): out, err, return_code = RunTest( [ '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', r'\[\d/\d\] \d\.\d\ds .+', re.escape('Note: Google Test filter = Baz.Fail'), r'', ] + [ re.escape(l) for l in gtest_fake_base.get_test_output('Baz.Fail').splitlines() ] + [ '', ] + [ re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines() ] + [ '', re.escape('Failed tests:'), re.escape(' Baz.Fail'), re.escape('Summary:'), re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [ ('Foo.Bar1', 1), ('Foo.Bar2', 1), ('Foo.Bar3', 1), ('Baz.Fail', 3) ] self._check_results_file( fail=['Baz.Fail'], flaky=[], success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], test_cases=test_cases)
def get_test_re(test_name, failed, duration): return [ re.escape(i) for i in gtest_fake_base.get_test_output( test_name, failed, duration).splitlines() ] + ['']
def test_simple(self): file_handle, self.temp_file = tempfile.mkstemp( prefix='trace_test_cases_test') os.close(file_handle) cmd = [ sys.executable, os.path.join(ROOT_DIR, 'trace_test_cases.py'), # Forces 4 parallel jobs. '--jobs', '4', '--out', self.temp_file, ] if VERBOSE: cmd.extend(['-v'] * 3) cmd.append(TARGET_PATH) logging.debug(' '.join(cmd)) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=ROOT_DIR) out, err = proc.communicate() or ('', '') # pylint is confused. self.assertEqual(0, proc.returncode, (out, err)) lines = out.splitlines() expected_out_re = [ r'Tracing\.\.\.', r'\[1/4\] +\d+\.\d\ds .+', r'\[2/4\] +\d+\.\d\ds .+', r'\[3/4\] +\d+\.\d\ds .+', r'\[4/4\] +\d+\.\d\ds .+', r'Reading trace logs\.\.\.', ] self.assertEqual(len(expected_out_re), len(lines), lines) for index in range(len(expected_out_re)): self.assertTrue( re.match('^%s$' % expected_out_re[index], lines[index]), '%d: %s\n%r\n%s' % ( index, expected_out_re[index], lines[index], out)) # Junk is printed on win32. if sys.platform != 'win32' and not VERBOSE: self.assertEqual('', err) with open(self.temp_file, 'r') as f: content = f.read() try: result = json.loads(content) except: print repr(content) raise test_cases = { 'Baz.Fail': 1, 'Foo.Bar1': 0, 'Foo.Bar2': 0, 'Foo.Bar3': 0, } self.assertEqual(dict, result.__class__) self.assertEqual(sorted(test_cases), sorted(result)) for index, test_case in enumerate(sorted(result)): actual = result[test_case] self.assertEqual( [u'duration', u'output', u'returncode', u'trace'], sorted(actual)) self.assertGreater(actual['duration'], 0.0000001) self.assertEqual(test_cases[test_case], actual['returncode']) expected_output = ( 'Note: Google Test filter = %s\n' % test_case + '\n' + gtest_fake_base.get_test_output(test_case) + '\n' + gtest_fake_base.get_footer(1, 1) + '\n') # On Windows, actual['output'] is unprocessed so it will contain CRLF. output = actual['output'] if sys.platform == 'win32': output = output.replace('\r\n', '\n') self.assertEqual(expected_output, output, repr(output)) expected_trace = { u'root': { u'children': [], u'command': [ self.executable, TARGET_PATH, '--gtest_filter=' + test_case, ], u'executable': trace_inputs.get_native_path_case( unicode(self.executable)), u'initial_cwd': ROOT_DIR, }, } if sys.platform == 'win32': expected_trace['root']['initial_cwd'] = None self.assertGreater(actual['trace']['root'].pop('pid'), 1) self.assertGreater(len(actual['trace']['root'].pop('files')), 10) self.assertEqual(expected_trace, actual['trace'])
def test_simple(self): file_handle, self.temp_file = tempfile.mkstemp( prefix='trace_test_cases_test') os.close(file_handle) cmd = [ sys.executable, os.path.join(GOOGLETEST_DIR, 'trace_test_cases.py'), # Forces 4 parallel jobs. '--jobs', '4', '--out', self.temp_file, ] if VERBOSE: cmd.extend(['-v'] * 3) cmd.append(TARGET_PATH) logging.debug(' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=GOOGLETEST_DIR) out, err = proc.communicate() or ('', '') # pylint is confused. self.assertEqual(0, proc.returncode, (out, err)) lines = out.splitlines() expected_out_re = [ r'Tracing\.\.\.', r'\[0/4\] +\d+\.\d\ds ', r'\[1/4\] +\d+\.\d\ds .+', r'\[2/4\] +\d+\.\d\ds .+', r'\[3/4\] +\d+\.\d\ds .+', r'\[4/4\] +\d+\.\d\ds .+', r'Reading trace logs\.\.\.', ] self.assertEqual(len(expected_out_re), len(lines), lines) for index in range(len(expected_out_re)): self.assertTrue( re.match('^%s$' % expected_out_re[index], lines[index]), '%d: %s\n%r\n%s' % (index, expected_out_re[index], lines[index], out)) # Junk is printed on win32. if sys.platform != 'win32' and not VERBOSE: self.assertEqual('', err) with open(self.temp_file, 'r') as f: content = f.read() try: result = json.loads(content) except: print repr(content) raise test_cases = { 'Baz.Fail': 1, 'Foo.Bar1': 0, 'Foo.Bar2': 0, 'Foo.Bar3': 0, } self.assertEqual(dict, result.__class__) self.assertEqual(sorted(test_cases), sorted(result)) for index, test_case in enumerate(sorted(result)): actual = result[test_case] self.assertEqual([u'duration', u'output', u'returncode', u'trace'], sorted(actual)) self.assertGreater(actual['duration'], 0.0000001) self.assertEqual(test_cases[test_case], actual['returncode']) expected_output = ('Note: Google Test filter = %s\n' % test_case + '\n' + gtest_fake_base.get_test_output( test_case, 'Fail' in test_case) + '\n' + gtest_fake_base.get_footer(1, 1) + '\n') # On Windows, actual['output'] is unprocessed so it will contain CRLF. output = actual['output'] if sys.platform == 'win32': output = output.replace('\r\n', '\n') self.assertEqual(expected_output, output, repr(output)) expected_trace = { u'root': { u'children': [], u'command': [ self.executable, TARGET_PATH, '--gtest_filter=' + test_case, ], u'executable': file_path.get_native_path_case(unicode(self.executable)), u'initial_cwd': GOOGLETEST_DIR, }, } if sys.platform == 'win32': expected_trace['root']['initial_cwd'] = None self.assertGreater(actual['trace']['root'].pop('pid'), 1) self.assertGreater(len(actual['trace']['root'].pop('files')), 10) self.assertEqual(expected_trace, actual['trace'])