def test_push_does_not_exist(responses): # We hit hgmo when 'rev' is less than 40 characters. rev = "foobar" responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format( branch="integration/autoland", rev="foobar"), json={"error": f"unknown revision '{rev}'"}, status=404, ) with pytest.raises(PushNotFound): Push(rev) # Otherwise we need to hit hgmo some other way. rev = "a" * 40 responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format( branch="integration/autoland", rev=rev), json={"error": f"unknown revision '{rev}'"}, status=404, ) p = Push(rev) with pytest.raises(PushNotFound): p.id
def test_push_child_raises(responses): rev = "a" * 40 # Try and mozilla-unified are not supported. for branch in ("try", "mozilla-unified"): push = Push(rev, branch=branch) with pytest.raises(ChildPushNotFound): push.child # A push with no children raises. push = Push(rev, branch="integration/autoland") push._id = 100 url = HgRev.JSON_PUSHES_TEMPLATE.format( branch=push.branch, push_id_start=push.id, push_id_end=push.id + 1, ) responses.add( responses.GET, url, json={ "lastpushid": push.id, "pushes": {} }, status=200, ) with pytest.raises(ChildPushNotFound): push.child
def go(repo_dir): with hglib.open(repo_dir) as hg: revs = repository.get_revs(hg, -1000, -500) commits = repository.hg_log(hg, revs) backouts = list( set(commit.backedoutby for commit in commits if commit.ever_backedout)) backedouts = list( set(commit.node for commit in commits if commit.ever_backedout)) likely_label_count = 0 possible_label_count = 0 likely_group_count = 0 possible_group_count = 0 backout_regressions = {} for backout in tqdm(backouts): p = Push(backout) label_regressions = p.get_regressions("label") likely_label_count += len(p.get_likely_regressions("label")) possible_label_count += len(p.get_possible_regressions("label")) group_regressions = p.get_regressions("group") likely_group_count += len(p.get_likely_regressions("label")) possible_group_count += len(p.get_possible_regressions("label")) if len(label_regressions) > 0 or len(group_regressions) > 0: backout_regressions[backout] = { "label": label_regressions, "group": group_regressions, } print(f"Likely labels for backouts: {likely_label_count}") print(f"Likely groups for backouts: {likely_group_count}") print(f"Possible labels for backouts: {possible_label_count}") print(f"Possible groups for backouts: {possible_group_count}") backedout_regressions = {} for backedout in tqdm(backedouts): p = Push(backedout) label_regressions = p.get_regressions("label") group_regressions = p.get_regressions("group") if (len(p.get_likely_regressions("label")) == 0 or len(p.get_likely_regressions("group")) == 0): backedout_regressions[backedout] = { "label": label_regressions, "group": group_regressions, } with open("backout_regressions.json", "w") as f: json.dump(backout_regressions, f) with open("backedout_regressions.json", "w") as f: json.dump(backedout_regressions, f)
def test_create_push(responses): def setup_responses(ctx): responses.reset() responses.add( responses.GET, HgRev.JSON_PUSHES_TEMPLATE.format(**ctx), json={ "pushes": { "123": { "changesets": ["123456"], "date": 1213174092, "user": "******", }, }, }, status=200, ) responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(branch=ctx["branch"], rev="abcdef"), json={"changesets": [{ "node": "abcdef" }]}, status=200, ) responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(branch=ctx["branch"], rev="123456"), json={"changesets": [{ "node": "123456" }]}, status=200, ) ctx = { "branch": "integration/autoland", "push_id_start": "122", "push_id_end": "123", } setup_responses(ctx) p1 = Push("abcdef") p2 = p1.create_push(123) assert p2.rev == "123456" assert p2.id == 123 assert p2.date == 1213174092 assert p2.branch in ctx["branch"] ctx["branch"] = "mozilla-central" setup_responses(ctx) p1 = Push("abcdef", branch=ctx["branch"]) p2 = p1.create_push(123) assert p2.rev == "123456" assert p2.id == 123 assert p2.date == 1213174092 assert p2.branch in ctx["branch"]
def test_get_test_selection_data_from_bugbug(responses): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision" responses.add(responses.GET, task_url, status=200, json={"taskId": "a" * 10}) cache_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/queue/v1/task/aaaaaaaaaa/artifacts/public/bugbug-push-schedules.json" responses.add(responses.GET, cache_url, status=404) url = f"{bugbug.BUGBUG_BASE_URL}/push/{branch}/{rev}/schedules" responses.add(responses.GET, url, status=200, json=SCHEDULES_EXTRACT) data = push.get_test_selection_data() assert data == SCHEDULES_EXTRACT assert len(responses.calls) == 3 assert [(call.request.method, call.request.url) for call in responses.calls] == [ ("GET", task_url), ("GET", cache_url), ("GET", url), ]
def test_get_test_selection_data_from_bugbug_handle_exceeded_timeout( responses, monkeypatch): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision" responses.add(responses.GET, task_url, status=200, json={"taskId": "a" * 10}) cache_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/queue/v1/task/aaaaaaaaaa/artifacts/public/bugbug-push-schedules.json" responses.add(responses.GET, cache_url, status=404) url = f"{bugbug.BUGBUG_BASE_URL}/push/{branch}/{rev}/schedules" responses.add(responses.GET, url, status=202) monkeypatch.setattr(bugbug, "DEFAULT_RETRY_TIMEOUT", 3) monkeypatch.setattr(bugbug, "DEFAULT_RETRY_INTERVAL", 1) with pytest.raises(bugbug.BugbugTimeoutException) as e: push.get_test_selection_data() assert str( e.value) == "Timed out waiting for result from Bugbug HTTP Service" assert len(responses.calls) == 5 assert [(call.request.method, call.request.url) for call in responses.calls] == [ ("GET", task_url), ("GET", cache_url), # We retry 3 times the call to the Bugbug HTTP service ("GET", url), ("GET", url), ("GET", url), ]
def test_push_bugs_multiple(responses): rev = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" responses.add( responses.GET, f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}", json={ "changesets": [ { "bugs": [{ "no": "1617050" }, { "no": "123" }] }, { "bugs": [{ "no": "1617050" }] }, { "bugs": [{ "no": "456" }] }, ] }, status=200, ) p = Push(rev) assert p.bugs == {"123", "456", "1617050"}
def test_push_parent_on_autoland(responses): ctx = { "branch": "integration/autoland", "push_id_start": "121", "push_id_end": "122", } responses.add( responses.GET, HgRev.JSON_PUSHES_TEMPLATE.format(**ctx), json={ "pushes": { "122": { "changesets": [{ "node": "b" * 40 }], "date": 1213174092, "user": "******", }, }, }, status=200, ) p1 = Push("a" * 40) p1._id = 123 parent = p1.parent assert parent.id == 122 assert parent.rev == "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
def test_push_parent_on_try_fails_when_not_a_push_head(responses, create_changesets): changesets = create_changesets(3) head = changesets[-1]["node"] ctx = { "branch": "try", "rev": head, } responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(**ctx), json={"changesets": changesets}, status=200, ) # We raise if rev is not found or a push head anywhere. ctx["rev"] = changesets[0]["parents"][0] for branch in ( "mozilla-central", "mozilla-beta", "mozilla-release", "integration/autoland", ): ctx["branch"] = branch responses.add( responses.GET, HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(**ctx), json={"changesets": changesets}, status=200, ) push = Push(head, branch="try") with pytest.raises(ParentPushNotFound): push.parent
def test_classify_almost_bad_push(monkeypatch, test_selection_data, likely_regressions, are_cross_config): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) generate_mocks( monkeypatch, push, test_selection_data, likely_regressions, are_cross_config, ) assert push.classify() == ( PushStatus.UNKNOWN, Regressions( real={}, intermittent={}, unknown={ "group1": make_tasks("group1"), "group2": make_tasks("group2"), "group3": make_tasks("group3"), "group4": make_tasks("group4"), "group5": make_tasks("group5"), }, ), )
def test_classify_almost_good_push(monkeypatch, test_selection_data, are_cross_config): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) generate_mocks( monkeypatch, push, test_selection_data, set(), are_cross_config, ) assert push.classify( unknown_from_regressions=False, consistent_failures_counts=None, consider_children_pushes_configs=False, ) == ( PushStatus.UNKNOWN, Regressions( real={}, intermittent={}, unknown={ "group1": make_tasks("group1"), "group2": make_tasks("group2"), "group3": make_tasks("group3"), "group4": make_tasks("group4"), "group5": make_tasks("group5"), }, ), )
def test_backfill_trigger_hook_error(responses, create_task): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) decision_task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision" responses.add( responses.GET, decision_task_url, status=200, json={"taskId": "a" * 10} ) responses.add( responses.GET, get_artifact_url(push.decision_task.id, "public/actions.json"), status=200, json=ACTIONS_ARTIFACT_EXTRACT, ) config._config["taskcluster_firefox_ci"] = { "client_id": "a client id", "access_token": "an access token", } hookGroupId = ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookGroupId"] hookId = ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookId"].replace("/", "%2F") responses.add( responses.POST, f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/hooks/v1/hooks/{hookGroupId}/{hookId}/trigger", status=500, ) task = create_task(label="foobar") with pytest.raises(TaskclusterRestFailure): task.backfill(push)
def test_backfill_incomplete_secret(responses, secret_content, create_task): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) decision_task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision" responses.add( responses.GET, decision_task_url, status=200, json={"taskId": "a" * 10} ) responses.add( responses.GET, get_artifact_url(push.decision_task.id, "public/actions.json"), status=200, json=ACTIONS_ARTIFACT_EXTRACT, ) # Update configuration config._config["taskcluster_firefox_ci"] = secret_content task = create_task(label="foobar") with pytest.raises( AssertionError, match="Missing Taskcluster Firefox CI credentials in mozci config secret", ): task.backfill(push)
def test_retrigger_should_not_retrigger(responses, create_task): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) task = create_task(label="foobar") task.retrigger(push)
def test_caching_of_push(cache): # A recent push will have almost no tasks in AD, few days later it will have # all data come from AD and after 6 weeks it will have no data there. # Results data for a task will either come via AD or through the errorsummary artifact # via Taskcluster. Regardless of which source was used we store in the same data # in the cache. # Once this push is older than a year update the revision # Once this push is older than 6 weeks the test will run slower because # all test tasks results will come from Taskcluster REV = "08c29f9d87799463cdf99ab81f08f62339b49328" # Push from Jul. 23, 2020. BRANCH = "mozilla-central" TASKS_KEY = "{}/{}/tasks".format(BRANCH, REV) # Making sure there's nothing left in the cache if cache.get(TASKS_KEY): cache.forget(TASKS_KEY) assert cache.get(TASKS_KEY) is None push = Push(REV, branch=BRANCH) # Q: Calling push.tasks a second time would hit the cache; Should we test that scenario? assert len(push.tasks) > 0 cached_tasks = cache.get(TASKS_KEY) assert cached_tasks is not None TOTAL_TEST_TASKS = 3517 # Testing that the tasks associated to a push have been cached assert len(cached_tasks) == TOTAL_TEST_TASKS assert len(cached_tasks) == len(push.tasks) assert cached_tasks == push.tasks
def test_classify_good_push_only_intermittent_failures(monkeypatch): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) test_selection_data = {"groups": {"group1": 0.7, "group2": 0.3}} likely_regressions = {"group3", "group4"} are_cross_config = [False for i in range(0, len(GROUP_SUMMARIES_DEFAULT))] generate_mocks( monkeypatch, push, test_selection_data, likely_regressions, are_cross_config, ) assert push.classify() == ( PushStatus.GOOD, Regressions( real={}, # All groups aren't cross config failures and were either selected by bugbug # with low confidence or not at all (no confidence) intermittent={ "group1": make_tasks("group1"), "group2": make_tasks("group2"), "group3": make_tasks("group3"), "group4": make_tasks("group4"), "group5": make_tasks("group5"), }, unknown={}, ), )
def run(args): push = Push(args.rev) num_scheduled = len(push.scheduled_task_labels) num_total = len(push.target_task_labels) percentage = round(float(num_scheduled) / num_total * 100, 1) all_regressions = push.get_possible_regressions("label") | push.get_likely_regressions("label") return [[ 'Tasks Scheduled', 'Tasks Total', 'Percentage', 'Total Hours (scheduled)', 'Backed Out', 'Regressions (possible)', 'Regressions (likely)', 'Caught', 'Missed', ], [ num_scheduled, num_total, percentage, push.scheduled_duration, push.backedout, len(push.get_possible_regressions("label")), len(push.get_likely_regressions("label")), len(all_regressions & push.scheduled_task_labels), len(all_regressions - push.scheduled_task_labels), ]]
def test_push_tasks_with_cached_completed_tasks(monkeypatch, responses): rev = "abcdef" branch = "autoland" cached_tasks = [ Task.create(id=1, label="test-task", result="passed", state="completed") ] monkeypatch.setattr(config.cache, "get", lambda x: cached_tasks) responses.add( responses.GET, f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}", json={"changesets": [{ "node": rev, "pushdate": [1638349140] }]}, status=200, ) responses.add( responses.GET, "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.autoland.revision.abcdef.taskgraph.decision", json={"taskId": 1}, status=200, ) push = Push(rev, branch) tasks = push.tasks assert len(tasks) == 1
def test_create_push(responses): responses.add( responses.GET, "https://hg.mozilla.org/integration/autoland/json-pushes?version=2&startID=122&endID=123", json={ "pushes": { "123": { "changesets": ["123456"], "date": 1213174092, "user": "******", }, }, }, status=200, ) responses.add( responses.GET, HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="abcdef"), json={"node": "abcdef"}, status=200, ) responses.add( responses.GET, HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="123456"), json={"node": "123456"}, status=200, ) p1 = Push("abcdef") p2 = p1.create_push(123) assert p2.rev == "123456" assert p2.id == 123 assert p2.date == 1213174092
def run(args): manifestsA = get_manifests_by_task(Push(args.revA, branch=args.branch)) manifestsB = get_manifests_by_task(Push(args.revB, branch=args.branch)) labels = sorted(set(list(manifestsA.keys()) + list(manifestsB.keys()))) if args.task_filter: fltr = re.compile(args.task_filter) labels = filter(fltr.search, labels) for label in labels: logger.info(f"Processing {label}") if label not in manifestsA: logger.warning(f"{label} not run in rev1!") continue if label not in manifestsB: logger.warning(f"{label} not run in rev2!") continue groupsA = sorted(manifestsA[label]) groupsB = sorted(manifestsB[label]) if groupsA == groupsB: logger.info(f"{label} matches!") continue logger.warning(f"{label} doesn't match!") out = unified_diff(groupsA, groupsB, fromfile=f'Rev 1: {args.revA}', tofile=f'Rev 2: {args.revB}', n=8) diff = [] for line in out: line = line.rstrip() if line.startswith('+'): line = f"<green>{line}</green>" elif line.startswith('-'): line = f"<red>{line}</red>" diff.append(line) diff = '\n'.join(diff) logger.opt(ansi=True).info("Diff:\n" + f"{diff}") return []
def test_generate_all_shadow_scheduler_tasks(responses): rev = "a" * 40 shadow_schedulers = ( ( "bar", ["task-1", "task-3", "task-4"], ), # names will be generated alphabetically ("foo", ["task-2", "task-4"]), ) push = Push(rev) responses.add( responses.GET, get_index_url(push.index + ".taskgraph.decision"), json={"taskId": 1}, status=200, ) id = count(2) responses.add( responses.GET, get_artifact_url(1, "public/task-graph.json"), json={ next(id): { "label": f"source-test-shadow-scheduler-{s[0]}" } for s in shadow_schedulers }, status=200, ) id = count(2) for ss in shadow_schedulers: s_id = next(id) responses.add( responses.GET, get_index_url(f"{push.index}.source.shadow-scheduler-{ss[0]}"), json={"taskId": s_id}, status=200, ) responses.add( responses.GET, get_artifact_url(s_id, "public/shadow-scheduler/optimized-tasks.json"), stream=True, json={next(id): { "label": task } for task in ss[1]}, status=200, ) # retrieve the data for i, (name, tasks) in enumerate(push.generate_all_shadow_scheduler_tasks()): print(i, name, tasks) assert name == shadow_schedulers[i][0] assert tasks == set(shadow_schedulers[i][1])
def handle(self): push = Push(self.argument("rev"), self.argument("branch")) table = [] for task in sorted(push.tasks, key=lambda t: t.label): table.append([task.label, task.result or "running"]) self.line(tabulate(table, headers=["Label", "Result"]))
def test_classify(monkeypatch, classify_regressions_return_value, expected_result): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) def mock_return(self, *args, **kwargs): return classify_regressions_return_value monkeypatch.setattr(Push, "classify_regressions", mock_return) assert push.classify()[0] == expected_result
def main(argv): if len(argv) != 3: print(f"{sys.argv[0]} branch revs") return 1 branch = argv[1] revs = argv[2].split(",")[0] # { # (test, subtest): { # config: { # status: int # } # } # } results = collections.defaultdict( lambda: collections.defaultdict(lambda: collections.defaultdict(int))) push = Push(revs, branch) for (config, group), summary in push.config_group_summaries.items(): if not ("-fis-" in config and group.startswith("testing/web-platform")): continue print(config, group) for task in summary.tasks: paths = [a for a in task.artifacts if a.endswith("wptreport.json")] for path in paths: run = task.get_artifact(path) for result in run["results"]: name = (result["test"], "") status = result["status"] results[name][config][status] += 1 for subtest in result["subtests"]: name = (result["test"], subtest["name"]) status = subtest["status"] results[name][config][status] += 1 # figure out what other statuses exists print("test,subtest,config,OK,PASS,SKIP,FAIL") for (test, subtest), configs in results.items(): for config, statuses in configs.items(): line = [ test, subtest, config, *[str(statuses[k]) for k in ["OK", "PASS", "SKIP", "FAIL"]], ] print(",".join(line)) return 0
def test_classify_bad_push_some_real_failures(monkeypatch): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) test_selection_data = { "groups": { "group1": 0.99, "group2": 0.95, "group3": 0.91 } } likely_regressions = {"group1", "group2", "group3"} are_cross_config = [ False if i % 2 else True for i in range(0, len(GROUP_SUMMARIES_DEFAULT)) ] generate_mocks( monkeypatch, push, test_selection_data, likely_regressions, set(), are_cross_config, ) assert push.classify( unknown_from_regressions=False, consider_children_pushes_configs=False ) == ( PushStatus.BAD, Regressions( # group1 & group3 were both selected by bugbug with high confidence, likely to regress # and are cross config failures real={ "group1": make_tasks("group1"), "group3": make_tasks("group3") }, # group4 isn't a cross config failure and was not selected by bugbug (no confidence) intermittent={"group4": make_tasks("group4")}, # group2 isn't a cross config failure but was selected with high confidence by bugbug # group5 is a cross config failure but was not selected by bugbug nor likely to regress unknown={ "group2": make_tasks("group2"), "group5": make_tasks("group5") }, ), ToRetriggerOrBackfill( real_retrigger={"group2": make_tasks("group2")}, intermittent_retrigger={"group5": make_tasks("group5")}, backfill={}, ), )
def test_classify(monkeypatch, classify_regressions_return_value, expected_result): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) def mock_return(self, *args, **kwargs): return classify_regressions_return_value, ToRetriggerOrBackfill( real_retrigger={}, intermittent_retrigger={}, backfill={}, ) monkeypatch.setattr(Push, "classify_regressions", mock_return) assert push.classify()[0] == expected_result
def classify_commands_pushes(branch: str, from_date: str, to_date: str, rev: str) -> List[Push]: if not (bool(rev) ^ bool(from_date or to_date)): raise Exception( "You must either provide a single push revision with --rev or define at least --from-date option to classify a range of pushes (note: --to-date will default to current time if not given)." ) if rev: pushes = [Push(rev, branch)] else: if not from_date: raise Exception( "You must provide at least --from-date to classify a range of pushes (note: --to-date will default to current time if not given)." ) now = datetime.datetime.now() if not to_date: to_date = datetime.datetime.strftime(now, "%Y-%m-%d") arrow_now = arrow.get(now) try: datetime.datetime.strptime(from_date, "%Y-%m-%d") except ValueError: try: from_date = arrow_now.dehumanize(from_date).format( "YYYY-MM-DD") except ValueError: raise Exception( 'Provided --from-date should be a date in yyyy-mm-dd format or a human expression like "1 days ago".' ) try: datetime.datetime.strptime(to_date, "%Y-%m-%d") except ValueError: try: to_date = arrow_now.dehumanize(to_date).format("YYYY-MM-DD") except ValueError: raise Exception( 'Provided --to-date should be a date in yyyy-mm-dd format or a human expression like "1 days ago".' ) pushes = make_push_objects(from_date=from_date, to_date=to_date, branch=branch) return pushes
def test_retrigger_should_retrigger(responses, create_task): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) decision_task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision" responses.add( responses.GET, decision_task_url, status=200, json={"taskId": "a" * 10} ) responses.add( responses.GET, get_artifact_url(push.decision_task.id, "public/actions.json"), status=200, json=RETRIGGER_ACTIONS_ARTIFACT_EXTRACT, ) config._config["taskcluster_firefox_ci"] = { "client_id": "a client id", "access_token": "an access token", } task = create_task(label="foobar", tags={"retrigger": "true"}) hookGroupId = RETRIGGER_ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookGroupId"] hookId = RETRIGGER_ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookId"].replace( "/", "%2F" ) hookPayload = copy.deepcopy( RETRIGGER_ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookPayload"] ) hookPayload["user"] = { "input": {"times": 3}, "taskGroupId": push.decision_task.id, "taskId": task.id, } responses.add( responses.POST, f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/hooks/v1/hooks/{hookGroupId}/{hookId}/trigger", status=200, json={"status": {"taskId": "new-retrigger-task"}}, match=[matchers.json_params_matcher(hookPayload)], ) assert task.retrigger(push) == "new-retrigger-task"
def test_iterate_parents(responses): rev = "a" * 40 branch = "integration/autoland" push = Push(rev, branch) push_id = 10 depth = 5 responses.add( responses.GET, f"https://hg.mozilla.org/{branch}/json-automationrelevance/{rev}", json={"changesets": [ { "pushid": push_id }, ]}, status=200, ) responses.add( responses.GET, f"https://hg.mozilla.org/{branch}/json-pushes?version=2&full=1&startID={push_id-2-depth}&endID={push_id-1}", json={ "pushes": { push_id - i: { "changesets": [{ "node": chr(ord("a") + i) * 40, "desc": "A nice description about Bug 1234567", }], "date": 1, } for i in range(1, depth + 2) } }, status=200, ) for other in push._iterate_parents(depth): assert other.id == push_id push_id -= 1
def test_classify_almost_bad_push(monkeypatch, test_selection_data, likely_regressions, are_cross_config, to_retrigger): rev = "a" * 40 branch = "autoland" push = Push(rev, branch) generate_mocks( monkeypatch, push, test_selection_data, likely_regressions, set(), are_cross_config, ) to_retrigger_or_backill = { "real_retrigger": {}, "intermittent_retrigger": {}, "backfill": {}, } for key, groups in to_retrigger.items(): to_retrigger[key] = {group: make_tasks(group) for group in groups} to_retrigger_or_backill.update(to_retrigger) assert push.classify( unknown_from_regressions=False, consistent_failures_counts=None, consider_children_pushes_configs=False, ) == ( PushStatus.UNKNOWN, Regressions( real={}, intermittent={}, unknown={ "group1": make_tasks("group1"), "group2": make_tasks("group2"), "group3": make_tasks("group3"), "group4": make_tasks("group4"), "group5": make_tasks("group5"), }, ), ToRetriggerOrBackfill(**to_retrigger_or_backill), )