Exemple #1
0
def test_affected_testfiles():
    manifest_json = {
        "items": {
            "crashtest": {
                "a/b/c/foo-crash.html": [
                    ["a/b/c/foo-crash.html", {}],
                ]
            }
        },
        "paths": {
            "a/b/c/foo-crash.html": [
                "acdefgh123456",
                "crashtest",
            ]
        },
        "url_base": "/",
        "version": 7,
    }
    manifest = Manifest.from_json("/", manifest_json)
    with patch("tools.wpt.testfiles.load_manifest", return_value=manifest):
        # Dependent affected tests are determined by walking the filesystem,
        # which doesn't work in our test setup. We would need to refactor
        # testfiles.affected_testfiles or have a more complex test setup to
        # support testing those.
        full_test_path = os.path.join(testfiles.wpt_root, "a", "b", "c",
                                      "foo-crash.html")
        tests_changed, _ = testfiles.affected_testfiles([full_test_path])
        assert tests_changed == set([full_test_path])
Exemple #2
0
def get_changed_files(manifest_path, rev, ignore_changes, skip_tests):
    if not rev:
        branch_point = testfiles.branch_point()
        revish = "%s..HEAD" % branch_point
    else:
        revish = rev

    files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)

    if files_ignored:
        logger.info("Ignoring %s changed files:\n%s" %
                    (len(files_ignored), "".join(" * %s\n" % item for item in files_ignored)))

    tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
                                                                 manifest_path=manifest_path)

    return tests_changed, files_affected
Exemple #3
0
def get_changed_files(manifest_path, rev, ignore_changes, skip_tests):
    if not rev:
        branch_point = testfiles.branch_point()
        revish = "%s..HEAD" % branch_point
    else:
        revish = rev

    files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)

    if files_ignored:
        logger.info("Ignoring %s changed files:\n%s" %
                    (len(files_ignored), "".join(" * %s\n" % item for item in files_ignored)))

    tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
                                                                 manifest_path=manifest_path)

    return tests_changed, files_affected
def run(venv, wpt_args, **kwargs):
    global logger

    do_delayed_imports()

    retcode = 0
    parser = get_parser()

    wpt_args = create_parser().parse_args(wpt_args)

    with open(kwargs["config_file"], 'r') as config_fp:
        config = SafeConfigParser()
        config.readfp(config_fp)
        skip_tests = config.get("file detection", "skip_tests").split()
        ignore_changes = set(config.get("file detection", "ignore_changes").split())

    if kwargs["output_bytes"] is not None:
        replace_streams(kwargs["output_bytes"],
                        "Log reached capacity (%s bytes); output disabled." % kwargs["output_bytes"])


    wpt_args.metadata_root = kwargs["metadata_root"]
    try:
        os.makedirs(wpt_args.metadata_root)
    except OSError:
        pass

    logger = logging.getLogger(os.path.splitext(__file__)[0])

    setup_logging()

    browser_name = wpt_args.product.split(":")[0]

    if browser_name == "sauce" and not wpt_args.sauce_key:
        logger.warning("Cannot run tests on Sauce Labs. No access key.")
        return retcode

    pr_number = pr()

    with TravisFold("browser_setup"):
        logger.info(markdown.format_comment_title(wpt_args.product))

        if pr is not None:
            deepen_checkout(kwargs["user"])

        # Ensure we have a branch called "master"
        fetch_wpt(kwargs["user"], "master:master")

        head_sha1 = get_sha1()
        logger.info("Testing web-platform-tests at revision %s" % head_sha1)

        if not kwargs["rev"]:
            branch_point = testfiles.branch_point()
            revish = "%s..HEAD" % branch_point
        else:
            revish = kwargs["rev"]

        files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)

        if files_ignored:
            logger.info("Ignoring %s changed files:\n%s" % (len(files_ignored),
                                                            "".join(" * %s\n" % item for item in files_ignored)))

        tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
                                                                     manifest_path=os.path.join(
                                                                         wpt_args.metadata_root,
                                                                         "MANIFEST.json"))

        if not (tests_changed or files_affected):
            logger.info("No tests changed")
            return 0

        wpt_kwargs = Kwargs(vars(wpt_args))
        wpt_kwargs["test_list"] = list(tests_changed | files_affected)
        set_default_args(wpt_kwargs)

        do_delayed_imports()

        wpt_kwargs["stability"] = True
        wpt_kwargs["prompt"] = False
        wpt_kwargs["install_browser"] = True
        wpt_kwargs["install"] = wpt_kwargs["product"].split(":")[0] == "firefox"

        wpt_kwargs = setup_wptrunner(venv, **wpt_kwargs)

        logger.info("Using binary %s" % wpt_kwargs["binary"])

        if tests_changed:
            logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed))

        if files_affected:
            logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected))


    with TravisFold("running_tests"):
        logger.info("Starting tests")


        wpt_logger = wptrunner.logger
        iterations, results, inconsistent = run(venv, wpt_logger, **wpt_kwargs)

    if results:
        if inconsistent:
            write_inconsistent(logger.error, inconsistent, iterations)
            retcode = 2
        else:
            logger.info("All results were stable\n")
        with TravisFold("full_results"):
            write_results(logger.info, results, iterations,
                          pr_number=kwargs["comment_pr"],
                          use_details=True)
    else:
        logger.info("No tests run.")

    return retcode
def run(venv, wpt_args, **kwargs):
    global logger

    do_delayed_imports()

    retcode = 0
    parser = get_parser()

    wpt_args = create_parser().parse_args(wpt_args)

    with open(kwargs["config_file"], 'r') as config_fp:
        config = SafeConfigParser()
        config.readfp(config_fp)
        skip_tests = config.get("file detection", "skip_tests").split()
        ignore_changes = set(
            config.get("file detection", "ignore_changes").split())
        results_url = config.get("file detection", "results_url")

    if kwargs["output_bytes"] is not None:
        replace_streams(
            kwargs["output_bytes"],
            "Log reached capacity (%s bytes); output disabled." %
            kwargs["output_bytes"])

    wpt_args.metadata_root = kwargs["metadata_root"]
    try:
        os.makedirs(wpt_args.metadata_root)
    except OSError:
        pass

    logger = logging.getLogger(os.path.splitext(__file__)[0])

    setup_logging()

    browser_name = wpt_args.product.split(":")[0]

    if browser_name == "sauce" and not wpt_args.sauce_key:
        logger.warning("Cannot run tests on Sauce Labs. No access key.")
        return retcode

    pr_number = pr()

    with TravisFold("browser_setup"):
        logger.info(markdown.format_comment_title(wpt_args.product))

        if pr is not None:
            deepen_checkout(kwargs["user"])

        # Ensure we have a branch called "master"
        fetch_wpt(kwargs["user"], "master:master")

        head_sha1 = get_sha1()
        logger.info("Testing web-platform-tests at revision %s" % head_sha1)

        if not kwargs["rev"]:
            branch_point = testfiles.branch_point()
            revish = "%s..HEAD" % branch_point
        else:
            revish = kwargs["rev"]

        files_changed, files_ignored = testfiles.files_changed(
            revish, ignore_changes)

        if files_ignored:
            logger.info(
                "Ignoring %s changed files:\n%s" %
                (len(files_ignored), "".join(" * %s\n" % item
                                             for item in files_ignored)))

        tests_changed, files_affected = testfiles.affected_testfiles(
            files_changed,
            skip_tests,
            manifest_path=os.path.join(wpt_args.metadata_root,
                                       "MANIFEST.json"))

        if not (tests_changed or files_affected):
            logger.info("No tests changed")
            return 0

        wpt_kwargs = Kwargs(vars(wpt_args))
        wpt_kwargs["test_list"] = list(tests_changed | files_affected)
        set_default_args(wpt_kwargs)

        do_delayed_imports()

        wpt_kwargs["stability"] = True
        wpt_kwargs["prompt"] = False
        wpt_kwargs["install_browser"] = True
        wpt_kwargs["install"] = wpt_kwargs["product"].split(
            ":")[0] == "firefox"

        wpt_kwargs = setup_wptrunner(venv, **wpt_kwargs)

        logger.info("Using binary %s" % wpt_kwargs["binary"])

        if tests_changed:
            logger.debug("Tests changed:\n%s" %
                         "".join(" * %s\n" % item for item in tests_changed))

        if files_affected:
            logger.debug("Affected tests:\n%s" %
                         "".join(" * %s\n" % item for item in files_affected))

    with TravisFold("running_tests"):
        logger.info("Starting tests")

        wpt_logger = wptrunner.logger
        iterations, results, inconsistent = run(venv, wpt_logger, **wpt_kwargs)

    if results:
        if inconsistent:
            write_inconsistent(logger.error, inconsistent, iterations)
            retcode = 2
        else:
            logger.info("All results were stable\n")
        with TravisFold("full_results"):
            write_results(logger.info,
                          results,
                          iterations,
                          pr_number=pr_number,
                          use_details=True)
            if pr_number:
                post_results(results,
                             iterations=iterations,
                             url=results_url,
                             product=wpt_args.product,
                             pr_number=pr_number,
                             status="failed" if inconsistent else "passed")
    else:
        logger.info("No tests run.")

    return retcode