def run(venv, wpt_args, **kwargs): do_delayed_imports() setup_logging() retcode = 0 wpt_args = create_parser().parse_args(wpt_args) with open(kwargs["config_file"], 'r') as config_fp: config = SafeConfigParser() config.readfp(config_fp) ignore_changes = set( config.get("file detection", "ignore_changes").split()) if kwargs["output_bytes"] is not None: replace_streams( kwargs["output_bytes"], "Log reached capacity (%s bytes); output disabled." % kwargs["output_bytes"]) wpt_args.metadata_root = kwargs["metadata_root"] try: os.makedirs(wpt_args.metadata_root) except OSError: pass pr_number = pr() with TravisFold("browser_setup"): logger.info(markdown.format_comment_title(wpt_args.product)) if pr is not None: deepen_checkout(kwargs["user"]) # Ensure we have a branch called "master" fetch_wpt(kwargs["user"], "master:master") head_sha1 = get_sha1() logger.info("Testing web-platform-tests at revision %s" % head_sha1) wpt_kwargs = Kwargs(vars(wpt_args)) if not wpt_kwargs["test_list"]: manifest_path = os.path.join(wpt_kwargs["metadata_root"], "MANIFEST.json") tests_changed, files_affected = get_changed_files( manifest_path, kwargs["rev"], ignore_changes) if not (tests_changed or files_affected): logger.info("No tests changed") return 0 if tests_changed: logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed)) if files_affected: logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected)) wpt_kwargs["test_list"] = list(tests_changed | files_affected) do_delayed_imports() wpt_kwargs["prompt"] = False wpt_kwargs["install_browser"] = wpt_kwargs["product"].split( ":")[0] == "firefox" wpt_kwargs["pause_after_test"] = False wpt_kwargs["verify_log_full"] = False if wpt_kwargs["repeat"] == 1: wpt_kwargs["repeat"] = 10 wpt_kwargs["headless"] = False wpt_kwargs["log_tbpl"] = [sys.stdout] wpt_kwargs = setup_wptrunner(venv, **wpt_kwargs) logger.info("Using binary %s" % wpt_kwargs["binary"]) with TravisFold("running_tests"): logger.info("Starting tests") wpt_logger = wptrunner.logger results, inconsistent, slow, iterations = run_step( wpt_logger, wpt_kwargs["repeat"], True, {}, **wpt_kwargs) if results: if inconsistent: write_inconsistent(logger.error, inconsistent, iterations) retcode = 2 elif slow: write_slow_tests(logger.error, slow) retcode = 2 else: logger.info("All results were stable\n") with TravisFold("full_results"): write_results(logger.info, results, iterations, pr_number=pr_number, use_details=True) else: logger.info("No tests run.") # Be conservative and only return errors when we know for sure tests are changed. if tests_changed: retcode = 3 return retcode
def run(venv, wpt_args, **kwargs): do_delayed_imports() retcode = 0 wpt_args = create_parser().parse_args(wpt_args) with open(kwargs["config_file"], 'r') as config_fp: config = SafeConfigParser() config.readfp(config_fp) skip_tests = config.get("file detection", "skip_tests").split() ignore_changes = set(config.get("file detection", "ignore_changes").split()) if kwargs["output_bytes"] is not None: replace_streams(kwargs["output_bytes"], "Log reached capacity (%s bytes); output disabled." % kwargs["output_bytes"]) wpt_args.metadata_root = kwargs["metadata_root"] try: os.makedirs(wpt_args.metadata_root) except OSError: pass setup_logging() pr_number = pr() with TravisFold("browser_setup"): logger.info(markdown.format_comment_title(wpt_args.product)) if pr is not None: deepen_checkout(kwargs["user"]) # Ensure we have a branch called "master" fetch_wpt(kwargs["user"], "master:master") head_sha1 = get_sha1() logger.info("Testing web-platform-tests at revision %s" % head_sha1) wpt_kwargs = Kwargs(vars(wpt_args)) if not wpt_kwargs["test_list"]: manifest_path = os.path.join(wpt_kwargs["metadata_root"], "MANIFEST.json") tests_changed, files_affected = get_changed_files(manifest_path, kwargs["rev"], ignore_changes, skip_tests) if not (tests_changed or files_affected): logger.info("No tests changed") return 0 if tests_changed: logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed)) if files_affected: logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected)) wpt_kwargs["test_list"] = list(tests_changed | files_affected) do_delayed_imports() wpt_kwargs["prompt"] = False wpt_kwargs["install_browser"] = wpt_kwargs["product"].split(":")[0] == "firefox" wpt_kwargs["pause_after_test"] = False wpt_kwargs["verify_log_full"] = False if wpt_kwargs["repeat"] == 1: wpt_kwargs["repeat"] = 10 wpt_kwargs["headless"] = False wpt_kwargs = setup_wptrunner(venv, **wpt_kwargs) logger.info("Using binary %s" % wpt_kwargs["binary"]) with TravisFold("running_tests"): logger.info("Starting tests") wpt_logger = wptrunner.logger results, inconsistent, slow, iterations = run_step(wpt_logger, wpt_kwargs["repeat"], True, {}, **wpt_kwargs) if results: if inconsistent: write_inconsistent(logger.error, inconsistent, iterations) retcode = 2 elif slow: write_slow_tests(logger.error, slow) retcode = 2 else: logger.info("All results were stable\n") with TravisFold("full_results"): write_results(logger.info, results, iterations, pr_number=pr_number, use_details=True) else: logger.info("No tests run.") # Be conservative and only return errors when we know for sure tests are changed. if tests_changed: retcode = 3 return retcode