def main_run(args): runner = os.path.join(common.SRC_DIR, 'mojo', 'tools', 'apptest_runner.py') build_dir = os.path.join(common.SRC_DIR, 'out', args.build_config_fs) with common.temporary_file() as tempfile_path: rc = common.run_command([ runner, build_dir, '--verbose', '--write-full-results-to', tempfile_path ]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main_run(args): filter_tests = [] if args.filter_file: filter_tests = json.load(args.filter_file) test_args = ['--retry-limit', '3'] if 'android' == args.properties.get('target_platform'): test_args += ['--browser', 'android-chromium', '--device', 'android'] else: test_args += ['--browser', args.build_config_fs.lower()] with common.temporary_file() as tempfile_path: test_args += ['--write-full-results-to', tempfile_path] rc = common.run_runtest(args, [ '--test-type', 'telemetry_perf_unittests', '--run-python-script', os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests') ] + test_args + filter_tests) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main_run(args): filter_tests = [] if args.filter_file: filter_tests = json.load(args.filter_file) with common.temporary_file() as tempfile_path: rc = common.run_runtest(args, [ '--annotate', 'gtest', '--test-type', 'telemetry_unittests', '--run-python-script', os.path.join(common.SRC_DIR, 'tools', 'telemetry', 'run_tests'), '--browser', args.build_config_fs.lower(), '--retry-limit', '3', '--write-full-results-to', tempfile_path, ] + filter_tests) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main_run(args): with common.temporary_file() as tempfile_path: rc = common.run_command([ 'vpython', os.path.join(common.SRC_DIR, 'testing', 'test_env.py'), os.path.join(common.SRC_DIR, 'tools', 'metrics', 'metrics_python_tests.py'), '--isolated-script-test-output', tempfile_path, '--skip-set-lpac-acls=1', ], cwd=os.path.join(common.SRC_DIR, 'out', args.build_config_fs)) with open(tempfile_path) as f: isolated_results = json.load(f) results = common.parse_common_test_results(isolated_results, test_separator='.') failures = [ '%s: %s' % (k, v) for k, v in results['unexpected_failures'].items() ] common.record_local_script_results('metrics_python_tests', args.output, failures, True) return rc
def main_run(args): typ_path = os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'third_party', 'catapult', 'third_party', 'typ')) _AddToPathIfNeeded(typ_path) import typ top_level_dir = os.path.join(common.SRC_DIR, 'headless', 'lib', 'browser', 'devtools_api') with common.temporary_file() as tempfile_path: rc = typ.main(argv=[], top_level_dir=top_level_dir, write_full_results_to=tempfile_path, coverage_source=[top_level_dir]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main_run(args): filter_tests = [] if args.filter_file: filter_tests = json.load(args.filter_file) test_args = ['--retry-limit', '3'] if 'android' == args.properties.get('target_platform'): test_args += ['--browser', 'android-chrome-shell', '--device', 'android'] else: test_args += ['--browser', args.build_config_fs.lower()] with common.temporary_file() as tempfile_path: test_args += ['--write-full-results-to', tempfile_path] rc = common.run_runtest(args, [ '--test-type', 'telemetry_perf_unittests', '--run-python-script', os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests') ] + test_args + filter_tests) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument("--isolated-script-test-output", type=argparse.FileType("w"), required=True) parser.add_argument("--xvfb", help="Start xvfb.", action="store_true") args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir=".") assert xvfb_proc and openbox_proc, "Failed to start xvfb" try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + ["--write-full-results-to", tempfile_path], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator=".") failures = parsed_results["unexpected_failures"] json.dump( { "valid": bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), "failures": failures.keys(), }, args.isolated_script_test_output, ) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc)
def main_run(args): with common.temporary_file() as tempfile_path: rc = common.run_command([ sys.executable, os.path.join(common.SRC_DIR, 'third_party', 'WebKit', 'Tools', 'Scripts', 'test-webkitpy'), '--write-full-results-to', tempfile_path, ], cwd=args.paths['checkout']) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results) failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument('--test-launcher-summary-output', type=argparse.FileType('w'), required=True) args, rest_args = parser.parse_known_args() with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + [ '--write-full-results-to', tempfile_path, ]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.test_launcher_summary_output) return rc
def main_run(args): typ_path = os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'third_party', 'typ')) _AddToPathIfNeeded(typ_path) import typ top_level_dir = os.path.join( common.SRC_DIR, 'headless', 'lib', 'browser', 'devtools_api') with common.temporary_file() as tempfile_path: rc = typ.main( argv=[], top_level_dir=top_level_dir, write_full_results_to=tempfile_path, coverage_source=[top_level_dir]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' # Compatibility with gtest-based sharding. total_shards = None shard_index = None if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) del env['GTEST_TOTAL_SHARDS'] if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) del env['GTEST_SHARD_INDEX'] sharding_args = [] if total_shards is not None and shard_index is not None: sharding_args = [ '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + sharding_args + [ '--write-full-results-to', tempfile_path, ], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def main_run(args): runner = os.path.join(common.SRC_DIR, 'mojo', 'tools', 'apptest_runner.py') build_dir = os.path.join(common.SRC_DIR, 'out', args.build_config_fs) with common.temporary_file() as tempfile_path: rc = common.run_command([runner, build_dir, '--verbose', '--write-full-results-to', tempfile_path]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument('--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc, 'Failed to start xvfb' try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + [ '--write-full-results-to', tempfile_path, ], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results( results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump( { 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc)
def main_run(args): with common.temporary_file() as tempfile_path: rc = common.run_command([ sys.executable, os.path.join(common.SRC_DIR, 'third_party', 'WebKit', 'Tools', 'Scripts', 'test-webkitpy'), '--write-full-results-to', tempfile_path, ], cwd=args.paths['checkout']) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results) failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + [ '--write-full-results-to', tempfile_path, ], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) args, rest_args = parser.parse_known_args() with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + [ '--write-full-results-to', tempfile_path, ]) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() # Remove the chartjson extra arg until this script cares about chartjson # results from telemetry index = 0 for arg in rest_args: if '--isolated-script-test-chartjson-output' in arg: rest_args.pop(index) break index += 1 xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' # Compatibility with gtest-based sharding. total_shards = None shard_index = None if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) del env['GTEST_TOTAL_SHARDS'] if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) del env['GTEST_SHARD_INDEX'] sharding_args = [] if total_shards is not None and shard_index is not None: sharding_args = [ '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + sharding_args + [ '--write-full-results-to', tempfile_path, ], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)