Esempio n. 1
0
def generate_exclude_yaml(task_path_suffix: str, output: str) -> None:
    # pylint: disable=too-many-locals
    """
    Create a tag file associating multiversion tests to tags for exclusion.

    Compares the BACKPORTS_REQUIRED_FILE on the current branch with the same file on the
    last-lts branch to determine which tests should be blacklisted.
    """

    enable_logging()

    backports_required_latest = generate_resmoke.read_yaml(ETC_DIR, BACKPORTS_REQUIRED_FILE)

    # Get the state of the backports_required_for_multiversion_tests.yml file for the last-lts
    # binary we are running tests against. We do this by using the commit hash from the last-lts
    # mongo shell executable.
    last_lts_commit_hash = get_backports_required_last_lts_hash(task_path_suffix)

    # Get the yaml contents from the last-lts commit.
    backports_required_last_lts = get_last_lts_yaml(last_lts_commit_hash)

    def diff(list1, list2):
        return [elem for elem in (list1 or []) if elem not in (list2 or [])]

    suites_latest = backports_required_latest["suites"] or {}
    # Check if the changed syntax for etc/backports_required_multiversion.yml has been backported.
    # This variable and all branches where it's not set can be deleted after backporting the change.
    change_backported = "all" in backports_required_last_lts.keys()
    if change_backported:
        always_exclude = diff(backports_required_latest["all"], backports_required_last_lts["all"])
        suites_last_lts: defaultdict = defaultdict(list, backports_required_last_lts["suites"])
    else:
        always_exclude = backports_required_latest["all"] or []
        suites_last_lts = defaultdict(list, backports_required_last_lts)

    tags = _tags.TagsConfig()

    # Tag tests that are excluded from every suite.
    for elem in always_exclude:
        tags.add_tag("js_test", elem["test_file"], BACKPORT_REQUIRED_TAG)

    # Tag tests that are excluded on a suite-by-suite basis.
    for suite in suites_latest.keys():
        test_set = set()
        for elem in diff(suites_latest[suite], suites_last_lts[suite]):
            test_set.add(elem["test_file"])
        for test in test_set:
            tags.add_tag("js_test", test, f"{suite}_{BACKPORT_REQUIRED_TAG}")

    LOGGER.info(f"Writing exclude tags to {output}.")
    tags.write_file(filename=output,
                    preamble="Tag file that specifies exclusions from multiversion suites.")
Esempio n. 2
0
    def test_tag_order_custom_cmp(self):
        test_kind = "js_test"
        test_pattern = "jstests/core/example.js"

        def custom_cmp(tag_a, tag_b):
            return cmp(tag_a.split("|"), tag_b.split("|"))

        conf = _tags.TagsConfig(TEST_FILE_PATH, cmp_func=custom_cmp)
        tags = conf.get_tags(test_kind, test_pattern)

        self.assertEqual(["tag1", "tag2", "tag3"], tags)

        # Add a tag that should be at the start.
        conf.add_tag(test_kind, test_pattern, "ta|g2")
        tags = conf.get_tags(test_kind, test_pattern)
        self.assertEqual(["ta|g2", "tag1", "tag2", "tag3"], tags)

        # Add a tag that should be in the middle.
        conf.add_tag(test_kind, test_pattern, "tag1|aaa")
        tags = conf.get_tags(test_kind, test_pattern)
        self.assertEqual(["ta|g2", "tag1", "tag1|aaa", "tag2", "tag3"], tags)
 def setUp(self):
     lifecycle = ci_tags.TagsConfig({"selector": {}})
     self.summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(
         lifecycle)
def main():

    required_options = [
        "project",
        "reliable_test_min_run",
        "unreliable_test_min_run",
        "test_fail_rates",
    ]
    parser = optparse.OptionParser(
        description=__doc__, usage="Usage: %prog [options] test1 test2 ...")
    parser.add_option("--project",
                      dest="project",
                      default=None,
                      help="Evergreen project to analyze [REQUIRED].")
    parser.add_option(
        "--reliableTestMinimumRun",
        dest="reliable_test_min_run",
        default=None,
        type="int",
        help="Minimum number of tests runs for test to be considered as reliable"
        " [REQUIRED].")
    parser.add_option(
        "--unreliableTestMinimumRun",
        dest="unreliable_test_min_run",
        default=None,
        type="int",
        help=
        "Minimum number of tests runs for test to be considered as unreliable"
        " [REQUIRED].")
    parser.add_option(
        "--testFailRates",
        dest="test_fail_rates",
        metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
        default=None,
        type="float",
        nargs=2,
        help="Test fail rates: acceptable fail rate and unacceptable fail rate"
        " Specify floating numbers between 0.0 and 1.0 [REQUIRED].")
    parser.add_option(
        "--taskFailRates",
        dest="task_fail_rates",
        metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
        default=None,
        type="float",
        nargs=2,
        help="Task fail rates: acceptable fail rate and unacceptable fail rate."
        " Specify floating numbers between 0.0 and 1.0."
        " Uses --test-fail-rates if unspecified.")
    parser.add_option(
        "--variantFailRates",
        dest="variant_fail_rates",
        metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
        default=None,
        type="float",
        nargs=2,
        help=
        "Variant fail rates: acceptable fail rate and unacceptable fail rate."
        " Specify floating numbers between 0.0 and 1.0."
        " Uses --task-fail-rates if unspecified.")
    parser.add_option(
        "--distroFailRates",
        dest="distro_fail_rates",
        metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
        default=None,
        type="float",
        nargs=2,
        help=
        "Distro fail rates: acceptable fail rate and unacceptable fail rate."
        " Specify floating numbers between 0.0 and 1.0."
        " Uses --variant-fail-rates if unspecified.")
    parser.add_option(
        "--tasks",
        dest="tasks",
        default=None,
        help="Names of tasks to analyze for tagging unreliable tests."
        " If specified and no tests are specified, then only tests"
        " associated with the tasks will be analyzed."
        " If unspecified and no tests are specified, the list of tasks will be"
        " the non-excluded list of tasks from the file specified by"
        " '--evergreenYML'.")
    parser.add_option(
        "--variants",
        dest="variants",
        default="",
        help="Names of variants to analyze for tagging unreliable tests.")
    parser.add_option(
        "--distros",
        dest="distros",
        default="",
        help=
        "Names of distros to analyze for tagging unreliable tests [UNUSED].")
    parser.add_option("--evergreenYML",
                      dest="evergreen_yml",
                      default="etc/evergreen.yml",
                      help="Evergreen YML file used to get the list of tasks,"
                      " defaults to '%default'.")
    parser.add_option(
        "--lifecycleFile",
        dest="lifecycle_file",
        default="etc/test_lifecycle.yml",
        help="Evergreen lifecycle file to update, defaults to '%default'.")
    parser.add_option(
        "--reliableDays",
        dest="reliable_days",
        default=7,
        type="int",
        help=
        "Number of days to check for reliable tests, defaults to '%default'.")
    parser.add_option(
        "--unreliableDays",
        dest="unreliable_days",
        default=28,
        type="int",
        help=
        "Number of days to check for unreliable tests, defaults to '%default'."
    )
    parser.add_option("--batchGroupSize",
                      dest="batch_size",
                      default=100,
                      type="int",
                      help="Size of test batch group, defaults to '%default'.")

    (options, tests) = parser.parse_args()

    for option in required_options:
        if not getattr(options, option):
            parser.print_help()
            parser.error("Missing required option")

    evg_conf = evergreen.EvergreenProjectConfig(options.evergreen_yml)
    use_test_tasks_membership = False

    tasks = options.tasks.split(",") if options.tasks else []
    if not tasks:
        # If no tasks are specified, then the list of tasks is all.
        tasks = evg_conf.lifecycle_task_names
        use_test_tasks_membership = True

    variants = options.variants.split(",") if options.variants else []

    distros = options.distros.split(",") if options.distros else []

    check_fail_rates("Test", options.test_fail_rates[0],
                     options.test_fail_rates[1])
    # The less specific failures rates are optional and default to a lower level value.
    if not options.task_fail_rates:
        options.task_fail_rates = options.test_fail_rates
    else:
        check_fail_rates("Task", options.task_fail_rates[0],
                         options.task_fail_rates[1])
    if not options.variant_fail_rates:
        options.variant_fail_rates = options.task_fail_rates
    else:
        check_fail_rates("Variant", options.variant_fail_rates[0],
                         options.variant_fail_rates[1])
    if not options.distro_fail_rates:
        options.distro_fail_rates = options.variant_fail_rates
    else:
        check_fail_rates("Distro", options.distro_fail_rates[0],
                         options.distro_fail_rates[1])

    check_days("Reliable days", options.reliable_days)
    check_days("Unreliable days", options.unreliable_days)

    lifecycle = tags.TagsConfig(options.lifecycle_file, cmp_func=compare_tags)

    test_tasks_membership = get_test_tasks_membership(evg_conf)
    # If no tests are specified then the list of tests is generated from the list of tasks.
    if not tests:
        tests = get_tests_from_tasks(tasks, test_tasks_membership)
        if not options.tasks:
            use_test_tasks_membership = True

    commit_first, commit_last = git_commit_range_since("{}.days".format(
        options.unreliable_days))
    commit_prior = git_commit_prior(commit_first)

    # For efficiency purposes, group the tests and process in batches of batch_size.
    test_groups = create_batch_groups(create_test_groups(tests),
                                      options.batch_size)

    for tests in test_groups:
        # Find all associated tasks for the test_group if tasks or tests were not specified.
        if use_test_tasks_membership:
            tasks_set = set()
            for test in tests:
                tasks_set = tasks_set.union(test_tasks_membership[test])
            tasks = list(tasks_set)
        if not tasks:
            print(
                "Warning - No tasks found for tests {}, skipping this group.".
                format(tests))
            continue
        report = tf.HistoryReport(period_type="revision",
                                  start=commit_prior,
                                  end=commit_last,
                                  group_period=options.reliable_days,
                                  project=options.project,
                                  tests=tests,
                                  tasks=tasks,
                                  variants=variants,
                                  distros=distros)
        view_report = report.generate_report()

        # We build up report_combo to check for more specific test failures rates.
        report_combo = []
        # TODO EVG-1665: Uncomment this line once this has been supported.
        # for combo in ["test", "task", "variant", "distro"]:
        for combo in ["test", "task", "variant"]:
            report_combo.append(combo)
            if combo == "distro":
                acceptable_fail_rate = options.distro_fail_rates[0]
                unacceptable_fail_rate = options.distro_fail_rates[1]
            elif combo == "variant":
                acceptable_fail_rate = options.variant_fail_rates[0]
                unacceptable_fail_rate = options.variant_fail_rates[1]
            elif combo == "task":
                acceptable_fail_rate = options.task_fail_rates[0]
                unacceptable_fail_rate = options.task_fail_rates[1]
            else:
                acceptable_fail_rate = options.test_fail_rates[0]
                unacceptable_fail_rate = options.test_fail_rates[1]

            # Unreliable tests are analyzed from the entire period.
            update_lifecycle(lifecycle,
                             view_report.view_summary(group_on=report_combo),
                             unreliable_test, True, unacceptable_fail_rate,
                             options.unreliable_test_min_run)

            # Reliable tests are analyzed from the last period, i.e., last 14 days.
            (reliable_start_date,
             reliable_end_date) = view_report.last_period()
            update_lifecycle(
                lifecycle,
                view_report.view_summary(group_on=report_combo,
                                         start_date=reliable_start_date,
                                         end_date=reliable_end_date),
                reliable_test, False, acceptable_fail_rate,
                options.reliable_test_min_run)

    # Update the lifecycle_file only if there have been changes.
    if lifecycle.is_modified():
        write_yaml_file(options.lifecycle_file, lifecycle)
Esempio n. 5
0
def generate_exclude_yaml(old_bin_version: str, output: str,
                          logger: logging.Logger) -> None:
    """
    Create a tag file associating multiversion tests to tags for exclusion.

    Compares the BACKPORTS_REQUIRED_FILE on the current branch with the same file on the
    last-lts and/or last-continuous branch to determine which tests should be denylisted.
    """

    output = os.path.abspath(output)
    location, _ = os.path.split(output)
    if not os.path.isdir(location):
        logger.info(f"Cannot write to {output}. Not generating tag file.")
        return

    backports_required_latest = read_yaml_file(
        os.path.join(ETC_DIR, BACKPORTS_REQUIRED_FILE))

    # Get the state of the backports_required_for_multiversion_tests.yml file for the old
    # binary we are running tests against. We do this by using the commit hash from the old
    # mongo shell executable.
    shell_version = {
        MultiversionOptions.LAST_LTS:
        multiversionconstants.LAST_LTS_MONGO_BINARY,
        MultiversionOptions.LAST_CONTINUOUS:
        multiversionconstants.LAST_CONTINUOUS_MONGO_BINARY,
    }[old_bin_version]

    old_version_commit_hash = get_backports_required_hash_for_shell_version(
        mongo_shell_path=shell_version)

    # Get the yaml contents from the old commit.
    logger.info(
        f"Downloading file from commit hash of old branch {old_version_commit_hash}"
    )
    backports_required_old = get_old_yaml(old_version_commit_hash)

    def diff(list1, list2):
        return [elem for elem in (list1 or []) if elem not in (list2 or [])]

    def get_suite_exclusions(version_key):

        _suites_latest = backports_required_latest[version_key]["suites"] or {}
        # Check if the changed syntax for etc/backports_required_for_multiversion_tests.yml has been
        # backported.
        # This variable and all branches where it's not set can be deleted after backporting the change.
        change_backported = version_key in backports_required_old.keys()
        if change_backported:
            _always_exclude = diff(
                backports_required_latest[version_key]["all"],
                backports_required_old[version_key]["all"])
            _suites_old: defaultdict = defaultdict(
                list, backports_required_old[version_key]["suites"])
        else:
            _always_exclude = diff(
                backports_required_latest[version_key]["all"],
                backports_required_old["all"])
            _suites_old: defaultdict = defaultdict(
                list, backports_required_old["suites"])

        return _suites_latest, _suites_old, _always_exclude

    suites_latest, suites_old, always_exclude = get_suite_exclusions(
        old_bin_version.replace("_", "-"))

    tags = _tags.TagsConfig()

    # Tag tests that are excluded from every suite.
    for elem in always_exclude:
        tags.add_tag("js_test", elem["test_file"], BACKPORT_REQUIRED_TAG)

    # Tag tests that are excluded on a suite-by-suite basis.
    for suite in suites_latest.keys():
        test_set = set()
        for elem in diff(suites_latest[suite], suites_old[suite]):
            test_set.add(elem["test_file"])
        for test in test_set:
            tags.add_tag("js_test", test, f"{suite}_{BACKPORT_REQUIRED_TAG}")

    logger.info(f"Writing exclude tags to {output}.")
    tags.write_file(
        filename=output,
        preamble="Tag file that specifies exclusions from multiversion suites."
    )
Esempio n. 6
0
 def test_invalid_path(self):
     invalid_path = "non_existing_file"
     with self.assertRaises(IOError):
         _tags.TagsConfig(invalid_path)
Esempio n. 7
0
 def setUp(self):
     self.conf = _tags.TagsConfig(TEST_FILE_PATH)