Beispiel #1
0
    def test_round_trip(self):
        graph = TaskGraph(
            tasks={
                "a":
                Task(
                    kind="fancy",
                    label="a",
                    description="Task A",
                    attributes={},
                    dependencies={"prereq": "b"},  # must match edges, below
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def"},
                ),
                "b":
                Task(
                    kind="pre",
                    label="b",
                    attributes={},
                    dependencies={},
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def2"},
                ),
            },
            graph=Graph(nodes={"a", "b"}, edges={("a", "b", "prereq")}),
        )

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)
Beispiel #2
0
    def test_create_tasks_fails_if_create_fails(self, create_task):
        "creat_tasks fails if a single create_task call fails"
        tasks = {
            "tid-a":
            Task(kind="test",
                 label="a",
                 attributes={},
                 task={"payload": "hello world"}),
        }
        label_to_taskid = {"a": "tid-a"}
        graph = Graph(nodes={"tid-a"}, edges=set())
        taskgraph = TaskGraph(tasks, graph)

        def fail(*args):
            print("UHOH")
            raise RuntimeError("oh noes!")

        create_task.side_effect = fail

        with self.assertRaises(RuntimeError):
            create.create_tasks(
                GRAPH_CONFIG,
                taskgraph,
                label_to_taskid,
                {"level": "4"},
                decision_task_id="decisiontask",
            )
Beispiel #3
0
    def test_create_tasks(self):
        tasks = {
            "tid-a":
            Task(kind="test",
                 label="a",
                 attributes={},
                 task={"payload": "hello world"}),
            "tid-b":
            Task(kind="test",
                 label="b",
                 attributes={},
                 task={"payload": "hello world"}),
        }
        label_to_taskid = {"a": "tid-a", "b": "tid-b"}
        graph = Graph(nodes={"tid-a", "tid-b"},
                      edges={("tid-a", "tid-b", "edge")})
        taskgraph = TaskGraph(tasks, graph)

        create.create_tasks(
            GRAPH_CONFIG,
            taskgraph,
            label_to_taskid,
            {"level": "4"},
            decision_task_id="decisiontask",
        )

        for tid, task in self.created_tasks.items():
            self.assertEqual(task["payload"], "hello world")
            self.assertEqual(task["schedulerId"], "domain-level-4")
            # make sure the dependencies exist, at least
            for depid in task.get("dependencies", []):
                if depid == "decisiontask":
                    # Don't look for decisiontask here
                    continue
                self.assertIn(depid, self.created_tasks)
Beispiel #4
0
def get_filtered_taskgraph(taskgraph, tasksregex):
    """
    Filter all the tasks on basis of a regular expression
    and returns a new TaskGraph object
    """
    from gecko_taskgraph.graph import Graph
    from gecko_taskgraph.taskgraph import TaskGraph

    # return original taskgraph if no regular expression is passed
    if not tasksregex:
        return taskgraph
    named_links_dict = taskgraph.graph.named_links_dict()
    filteredtasks = {}
    filterededges = set()
    regexprogram = re.compile(tasksregex)

    for key in taskgraph.graph.visit_postorder():
        task = taskgraph.tasks[key]
        if regexprogram.match(task.label):
            filteredtasks[key] = task
            for depname, dep in named_links_dict[key].items():
                if regexprogram.match(dep):
                    filterededges.add((key, dep, depname))
    filtered_taskgraph = TaskGraph(
        filteredtasks, Graph(set(filteredtasks), filterededges)
    )
    return filtered_taskgraph
Beispiel #5
0
 def inner(tasks):
     label_to_taskid = {k: k + "-tid" for k in tasks}
     for label, task_id in label_to_taskid.items():
         tasks[label].task_id = task_id
     graph = Graph(nodes=set(tasks), edges=set())
     taskgraph = TaskGraph(tasks, graph)
     return taskgraph, label_to_taskid
Beispiel #6
0
def make_graph(*tasks_and_edges, **kwargs):
    tasks = {t.label: t for t in tasks_and_edges if isinstance(t, Task)}
    edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
    tg = TaskGraph(tasks, graph.Graph(set(tasks), edges))

    if kwargs.get("deps", True):
        # set dependencies based on edges
        for l, r, name in tg.graph.edges:
            tg.tasks[l].dependencies[name] = r

    return tg
Beispiel #7
0
 def default_matches(self, attributes, parameters):
     method = target_tasks.get_method("default")
     graph = TaskGraph(
         tasks={
             "a": Task(kind="build",
                       label="a",
                       attributes=attributes,
                       task={}),
         },
         graph=Graph(nodes={"a"}, edges=set()),
     )
     return "a" in method(graph, parameters, {})
Beispiel #8
0
def tg(request):
    if not hasattr(request.module, "TASKS"):
        pytest.fail(
            "'tg' fixture used from a module that didn't define the TASKS variable"
        )

    tasks = request.module.TASKS
    for task in tasks:
        task.setdefault("task", {})
        task["task"].setdefault("tags", {})

    tasks = {t["label"]: Task(**t) for t in tasks}
    return TaskGraph(tasks, Graph(tasks.keys(), set()))
Beispiel #9
0
 def make_task_graph(self):
     tasks = {
         "a":
         Task(kind=None, label="a", attributes={}, task={}),
         "b":
         Task(kind=None, label="b", attributes={"at-at": "yep"}, task={}),
         "c":
         Task(kind=None,
              label="c",
              attributes={"run_on_projects": ["try"]},
              task={}),
     }
     graph = Graph(nodes=set("abc"), edges=set())
     return TaskGraph(tasks, graph)
Beispiel #10
0
    def test_create_task_without_dependencies(self):
        "a task with no dependencies depends on the decision task"
        tasks = {
            "tid-a":
            Task(kind="test",
                 label="a",
                 attributes={},
                 task={"payload": "hello world"}),
        }
        label_to_taskid = {"a": "tid-a"}
        graph = Graph(nodes={"tid-a"}, edges=set())
        taskgraph = TaskGraph(tasks, graph)

        create.create_tasks(
            GRAPH_CONFIG,
            taskgraph,
            label_to_taskid,
            {"level": "4"},
            decision_task_id="decisiontask",
        )

        for tid, task in self.created_tasks.items():
            self.assertEqual(task.get("dependencies"), ["decisiontask"])
Beispiel #11
0
class TestTaskGraph(unittest.TestCase):

    maxDiff = None

    def test_taskgraph_to_json(self):
        tasks = {
            "a":
            Task(
                kind="test",
                label="a",
                description="Task A",
                attributes={"attr": "a-task"},
                task={"taskdef": True},
            ),
            "b":
            Task(
                kind="test",
                label="b",
                attributes={},
                task={"task": "def"},
                optimization={"skip-unless-has-relevant-tests": None},
                # note that this dep is ignored, superseded by that
                # from the taskgraph's edges
                dependencies={"first": "a"},
            ),
        }
        graph = Graph(nodes=set("ab"), edges={("a", "b", "edgelabel")})
        taskgraph = TaskGraph(tasks, graph)

        res = taskgraph.to_json()

        self.assertEqual(
            res,
            {
                "a": {
                    "kind": "test",
                    "label": "a",
                    "description": "Task A",
                    "attributes": {
                        "attr": "a-task",
                        "kind": "test"
                    },
                    "task": {
                        "taskdef": True
                    },
                    "dependencies": {
                        "edgelabel": "b"
                    },
                    "soft_dependencies": [],
                    "if_dependencies": [],
                    "optimization": None,
                },
                "b": {
                    "kind": "test",
                    "label": "b",
                    "description": "",
                    "attributes": {
                        "kind": "test"
                    },
                    "task": {
                        "task": "def"
                    },
                    "dependencies": {},
                    "soft_dependencies": [],
                    "if_dependencies": [],
                    "optimization": {
                        "skip-unless-has-relevant-tests": None
                    },
                },
            },
        )

    def test_round_trip(self):
        graph = TaskGraph(
            tasks={
                "a":
                Task(
                    kind="fancy",
                    label="a",
                    description="Task A",
                    attributes={},
                    dependencies={"prereq": "b"},  # must match edges, below
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def"},
                ),
                "b":
                Task(
                    kind="pre",
                    label="b",
                    attributes={},
                    dependencies={},
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def2"},
                ),
            },
            graph=Graph(nodes={"a", "b"}, edges={("a", "b", "prereq")}),
        )

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)

    simple_graph = TaskGraph(
        tasks={
            "a":
            Task(
                kind="fancy",
                label="a",
                attributes={},
                dependencies={"prereq": "b"},  # must match edges, below
                optimization={"skip-unless-has-relevant-tests": None},
                task={"task": "def"},
            ),
            "b":
            Task(
                kind="pre",
                label="b",
                attributes={},
                dependencies={},
                optimization={"skip-unless-has-relevant-tests": None},
                task={"task": "def2"},
            ),
        },
        graph=Graph(nodes={"a", "b"}, edges={("a", "b", "prereq")}),
    )

    def test_contains(self):
        assert "a" in self.simple_graph
        assert "c" not in self.simple_graph
Beispiel #12
0
def make_opt_graph(*tasks_and_edges):
    tasks = {t.task_id: t for t in tasks_and_edges if isinstance(t, Task)}
    edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
    return TaskGraph(tasks, graph.Graph(set(tasks), edges))
Beispiel #13
0
def release_promotion_action(parameters, graph_config, input, task_group_id, task_id):
    release_promotion_flavor = input["release_promotion_flavor"]
    promotion_config = graph_config["release-promotion"]["flavors"][
        release_promotion_flavor
    ]
    release_history = {}
    product = promotion_config["product"]

    next_version = str(input.get("next_version") or "")
    if promotion_config.get("version-bump", False):
        # We force str() the input, hence the 'None'
        if next_version in ["", "None"]:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor)
            )

    if promotion_config.get("partial-updates", False):
        partial_updates = input.get("partial_updates", {})
        if not partial_updates and release_level(parameters["project"]) == "production":
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor)
            )
        balrog_prefix = product.title()
        os.environ["PARTIAL_UPDATES"] = json.dumps(partial_updates, sort_keys=True)
        release_history = populate_release_history(
            balrog_prefix, parameters["project"], partial_updates=partial_updates
        )

    target_tasks_method = promotion_config["target-tasks-method"].format(
        project=parameters["project"]
    )
    rebuild_kinds = input.get(
        "rebuild_kinds", promotion_config.get("rebuild-kinds", [])
    )
    do_not_optimize = input.get(
        "do_not_optimize", promotion_config.get("do-not-optimize", [])
    )

    # Build previous_graph_ids from ``previous_graph_ids``, ``revision``,
    # or the action parameters.
    previous_graph_ids = input.get("previous_graph_ids")
    if not previous_graph_ids:
        revision = input.get("revision")
        if revision:
            head_rev_param = "{}head_rev".format(
                graph_config["project-repo-param-prefix"]
            )
            push_parameters = {
                head_rev_param: revision,
                "project": parameters["project"],
            }
        else:
            push_parameters = parameters
        previous_graph_ids = [find_decision_task(push_parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters["existing_tasks"] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds
    )
    parameters["do_not_optimize"] = do_not_optimize
    parameters["target_tasks_method"] = target_tasks_method
    parameters["build_number"] = int(input["build_number"])
    parameters["next_version"] = next_version
    parameters["release_history"] = release_history
    if promotion_config.get("is-rc"):
        parameters["release_type"] += "-rc"
    parameters["release_eta"] = input.get("release_eta", "")
    parameters["release_product"] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters["optimize_target_tasks"] = True

    if release_promotion_flavor == "promote_firefox_partner_repack":
        release_enable_partner_repack = True
        release_enable_partner_attribution = False
        release_enable_emefree = False
    elif release_promotion_flavor == "promote_firefox_partner_attribution":
        release_enable_partner_repack = False
        release_enable_partner_attribution = True
        release_enable_emefree = False
    else:
        # for promotion or ship phases, we use the action input to turn the repacks/attribution off
        release_enable_partner_repack = input.get("release_enable_partner_repack", True)
        release_enable_partner_attribution = input.get(
            "release_enable_partner_attribution", True
        )
        release_enable_emefree = input.get("release_enable_emefree", True)

    partner_url_config = get_partner_url_config(parameters, graph_config)
    if (
        release_enable_partner_repack
        and not partner_url_config["release-partner-repack"]
    ):
        raise Exception("Can't enable partner repacks when no config url found")
    if (
        release_enable_partner_attribution
        and not partner_url_config["release-partner-attribution"]
    ):
        raise Exception("Can't enable partner attribution when no config url found")
    if release_enable_emefree and not partner_url_config["release-eme-free-repack"]:
        raise Exception("Can't enable EMEfree repacks when no config url found")
    parameters["release_enable_partner_repack"] = release_enable_partner_repack
    parameters[
        "release_enable_partner_attribution"
    ] = release_enable_partner_attribution
    parameters["release_enable_emefree"] = release_enable_emefree

    partner_config = input.get("release_partner_config")
    if not partner_config and any(
        [
            release_enable_partner_repack,
            release_enable_partner_attribution,
            release_enable_emefree,
        ]
    ):
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)
    if partner_config:
        parameters["release_partner_config"] = fix_partner_config(partner_config)
    parameters["release_partners"] = input.get("release_partners")
    if input.get("release_partner_build_number"):
        parameters["release_partner_build_number"] = input[
            "release_partner_build_number"
        ]

    if input["version"]:
        parameters["version"] = input["version"]

    parameters["required_signoffs"] = get_required_signoffs(input, parameters)
    parameters["signoff_urls"] = get_signoff_urls(input, parameters)

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({"root": graph_config.root_dir}, parameters=parameters)
Beispiel #14
0
def generate_tasks(params=None, full=False, disable_target_task_filter=False):
    attr = "full_task_set" if full else "target_task_set"
    target_tasks_method = (
        "try_select_tasks"
        if not disable_target_task_filter
        else "try_select_tasks_uncommon"
    )
    params = parameters_loader(
        params,
        strict=False,
        overrides={
            "try_mode": "try_select",
            "target_tasks_method": target_tasks_method,
        },
    )
    root = os.path.join(build.topsrcdir, "taskcluster", "ci")
    gecko_taskgraph.fast = True
    generator = TaskGraphGenerator(root_dir=root, parameters=params)

    cache_dir = os.path.join(get_state_dir(srcdir=True), "cache", "taskgraph")
    key = cache_key(attr, generator.parameters, disable_target_task_filter)
    cache = os.path.join(cache_dir, key)

    invalidate(cache)
    if os.path.isfile(cache):
        with open(cache) as fh:
            return TaskGraph.from_json(json.load(fh))[1]

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    print("Task configuration changed, generating {}".format(attr.replace("_", " ")))

    cwd = os.getcwd()
    os.chdir(build.topsrcdir)

    def generate(attr):
        try:
            tg = getattr(generator, attr)
        except ParameterMismatch as e:
            print(PARAMETER_MISMATCH.format(e.args[0]))
            sys.exit(1)

        # write cache
        key = cache_key(attr, generator.parameters, disable_target_task_filter)
        with open(os.path.join(cache_dir, key), "w") as fh:
            json.dump(tg.to_json(), fh)
        return tg

    # Cache both full_task_set and target_task_set regardless of whether or not
    # --full was requested. Caching is cheap and can potentially save a lot of
    # time.
    tg_full = generate("full_task_set")
    tg_target = generate("target_task_set")

    # discard results from these, we only need cache.
    if full:
        generate("full_task_graph")
    generate("target_task_graph")

    os.chdir(cwd)
    if full:
        return tg_full
    return tg_target
Beispiel #15
0
def create_tasks(
    graph_config,
    to_run,
    full_task_graph,
    label_to_taskid,
    params,
    decision_task_id,
    suffix="",
    modifier=lambda t: t,
):
    """Create new tasks.  The task definition will have {relative-datestamp':
    '..'} rendered just like in a decision task.  Action callbacks should use
    this function to create new tasks,
    allowing easy debugging with `mach taskgraph action-callback --test`.
    This builds up all required tasks to run in order to run the tasks requested.

    Optionally this function takes a `modifier` function that is passed in each
    task before it is put into a new graph. It should return a valid task. Note
    that this is passed _all_ tasks in the graph, not just the set in to_run. You
    may want to skip modifying tasks not in your to_run list.

    If `suffix` is given, then it is used to give unique names to the resulting
    artifacts.  If you call this function multiple times in the same action,
    pass a different suffix each time to avoid overwriting artifacts.

    If you wish to create the tasks in a new group, leave out decision_task_id.

    Returns an updated label_to_taskid containing the new tasks"""
    if suffix != "":
        suffix = f"-{suffix}"
    to_run = set(to_run)

    #  Copy to avoid side-effects later
    full_task_graph = copy.deepcopy(full_task_graph)
    label_to_taskid = label_to_taskid.copy()

    target_graph = full_task_graph.graph.transitive_closure(to_run)
    target_task_graph = TaskGraph(
        {l: modifier(full_task_graph[l])
         for l in target_graph.nodes}, target_graph)
    target_task_graph.for_each_task(update_parent)
    if decision_task_id and decision_task_id != os.environ.get("TASK_ID"):
        target_task_graph.for_each_task(update_dependencies)
    optimized_task_graph, label_to_taskid = optimize_task_graph(
        target_task_graph,
        to_run,
        params,
        to_run,
        decision_task_id,
        existing_tasks=label_to_taskid,
    )
    write_artifact(f"task-graph{suffix}.json", optimized_task_graph.to_json())
    write_artifact(f"label-to-taskid{suffix}.json", label_to_taskid)
    write_artifact(f"to-run{suffix}.json", list(to_run))
    create.create_tasks(
        graph_config,
        optimized_task_graph,
        label_to_taskid,
        params,
        decision_task_id,
    )
    return label_to_taskid
Beispiel #16
0
def fetch_graph_and_labels(parameters, graph_config):
    decision_task_id = find_decision_task(parameters, graph_config)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    logger.info("Load taskgraph from JSON.")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    logger.info("Fetching additional tasks from action and cron tasks.")
    # fetch everything in parallel; this avoids serializing any delay in downloading
    # each artifact (such as waiting for the artifact to be mirrored locally)
    with futures.ThreadPoolExecutor(CONCURRENCY) as e:
        fetches = []

        # fetch any modifications made by action tasks and swap out new tasks
        # for old ones
        def fetch_action(task_id):
            logger.info(
                f"fetching label-to-taskid.json for action task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id,
                                               "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(
                    f"No label-to-taskid.json found for {task_id}: {e}")

        head_rev_param = "{}head_rev".format(
            graph_config["project-repo-param-prefix"])

        namespace = "{}.v2.{}.revision.{}.taskgraph.actions".format(
            graph_config["trust-domain"],
            parameters["project"],
            parameters[head_rev_param],
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_action, task_id))

        # Similarly for cron tasks..
        def fetch_cron(task_id):
            logger.info(
                f"fetching label-to-taskid.json for cron task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id,
                                               "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(
                    f"No label-to-taskid.json found for {task_id}: {e}")

        namespace = "{}.v2.{}.revision.{}.cron".format(
            graph_config["trust-domain"],
            parameters["project"],
            parameters[head_rev_param],
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_cron, task_id))

        # now wait for each fetch to complete, raising an exception if there
        # were any issues
        for f in futures.as_completed(fetches):
            f.result()

    return (decision_task_id, full_task_graph, label_to_taskid)
Beispiel #17
0
    def test_taskgraph_to_json(self):
        tasks = {
            "a":
            Task(
                kind="test",
                label="a",
                description="Task A",
                attributes={"attr": "a-task"},
                task={"taskdef": True},
            ),
            "b":
            Task(
                kind="test",
                label="b",
                attributes={},
                task={"task": "def"},
                optimization={"skip-unless-has-relevant-tests": None},
                # note that this dep is ignored, superseded by that
                # from the taskgraph's edges
                dependencies={"first": "a"},
            ),
        }
        graph = Graph(nodes=set("ab"), edges={("a", "b", "edgelabel")})
        taskgraph = TaskGraph(tasks, graph)

        res = taskgraph.to_json()

        self.assertEqual(
            res,
            {
                "a": {
                    "kind": "test",
                    "label": "a",
                    "description": "Task A",
                    "attributes": {
                        "attr": "a-task",
                        "kind": "test"
                    },
                    "task": {
                        "taskdef": True
                    },
                    "dependencies": {
                        "edgelabel": "b"
                    },
                    "soft_dependencies": [],
                    "if_dependencies": [],
                    "optimization": None,
                },
                "b": {
                    "kind": "test",
                    "label": "b",
                    "description": "",
                    "attributes": {
                        "kind": "test"
                    },
                    "task": {
                        "task": "def"
                    },
                    "dependencies": {},
                    "soft_dependencies": [],
                    "if_dependencies": [],
                    "optimization": {
                        "skip-unless-has-relevant-tests": None
                    },
                },
            },
        )
Beispiel #18
0
def get_subgraph(
    target_task_graph,
    removed_tasks,
    replaced_tasks,
    label_to_taskid,
    decision_task_id,
):
    """
    Return the subgraph of target_task_graph consisting only of
    non-optimized tasks and edges between them.

    To avoid losing track of taskIds for tasks optimized away, this method
    simultaneously substitutes real taskIds for task labels in the graph, and
    populates each task definition's `dependencies` key with the appropriate
    taskIds.  Task references are resolved in the process.
    """

    # check for any dependency edges from included to removed tasks
    bad_edges = [(l, r, n) for l, r, n in target_task_graph.graph.edges
                 if l not in removed_tasks and r in removed_tasks]
    if bad_edges:
        probs = ", ".join(f"{l} depends on {r} as {n} but it has been removed"
                          for l, r, n in bad_edges)
        raise Exception("Optimization error: " + probs)

    # fill in label_to_taskid for anything not removed or replaced
    assert replaced_tasks <= set(label_to_taskid)
    for label in sorted(target_task_graph.graph.nodes - removed_tasks -
                        set(label_to_taskid)):
        label_to_taskid[label] = slugid()

    # resolve labels to taskIds and populate task['dependencies']
    tasks_by_taskid = {}
    named_links_dict = target_task_graph.graph.named_links_dict()
    omit = removed_tasks | replaced_tasks
    for label, task in target_task_graph.tasks.items():
        if label in omit:
            continue
        task.task_id = label_to_taskid[label]
        named_task_dependencies = {
            name: label_to_taskid[label]
            for name, label in named_links_dict.get(label, {}).items()
        }

        # Add remaining soft dependencies
        if task.soft_dependencies:
            named_task_dependencies.update({
                label: label_to_taskid[label]
                for label in task.soft_dependencies
                if label in label_to_taskid and label not in omit
            })

        task.task = resolve_task_references(
            task.label,
            task.task,
            task_id=task.task_id,
            decision_task_id=decision_task_id,
            dependencies=named_task_dependencies,
        )
        deps = task.task.setdefault("dependencies", [])
        deps.extend(sorted(named_task_dependencies.values()))
        tasks_by_taskid[task.task_id] = task

    # resolve edges to taskIds
    edges_by_taskid = ((label_to_taskid.get(left), label_to_taskid.get(right),
                        name) for (left, right,
                                   name) in target_task_graph.graph.edges)
    # ..and drop edges that are no longer entirely in the task graph
    #   (note that this omits edges to replaced tasks, but they are still in task.dependnecies)
    edges_by_taskid = {(left, right, name)
                       for (left, right, name) in edges_by_taskid
                       if left in tasks_by_taskid and right in tasks_by_taskid}

    return TaskGraph(tasks_by_taskid,
                     Graph(set(tasks_by_taskid), edges_by_taskid))
Beispiel #19
0
def geckoprofile_action(parameters, graph_config, input, task_group_id,
                        task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]
    pushes = []
    depth = 2
    end_id = int(parameters["pushlog_id"])

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters["head_repository"],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + list(r.json()["pushes"].keys())
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/full-task-graph.json",
            )
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/label-to-taskid.json",
            )
            push_params = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/parameters.yml")
            push_decision_task_id = find_decision_task(push_params,
                                                       graph_config)
        except HTTPError as e:
            logger.info(
                f"Skipping {push} due to missing index artifacts! Error: {e}")
            continue

        if label in full_task_graph.tasks.keys():

            def modifier(task):
                if task.label != label:
                    return task

                cmd = task.task["payload"]["command"]
                task.task["payload"]["command"] = add_args_to_perf_command(
                    cmd, ["--gecko-profile"])
                task.task["extra"]["treeherder"]["symbol"] += "-p"
                task.task["extra"]["treeherder"]["groupName"] += " (profiling)"
                return task

            create_tasks(
                graph_config,
                [label],
                full_task_graph,
                label_to_taskid,
                push_params,
                push_decision_task_id,
                push,
                modifier=modifier,
            )
            backfill_pushes.append(push)
        else:
            logging.info(f"Could not find {label} on {push}. Skipping.")
    combine_task_graph_files(backfill_pushes)
RIDEALONG_BUILDS = {
    "linux": ["linux-ridealong"],
    "linux64": ["linux64-ridealong"],
}

GRAPH_CONFIG = {
    "try": {"ridealong-builds": RIDEALONG_BUILDS},
}

for r in RIDEALONG_BUILDS.values():
    tasks.update({k: v for k, v in [unittest_task(n + "-test", n) for n in r]})

unittest_tasks = {k: v for k, v in tasks.items() if "unittest_try_name" in v.attributes}
talos_tasks = {k: v for k, v in tasks.items() if "talos_try_name" in v.attributes}
graph_with_jobs = TaskGraph(tasks, Graph(set(tasks), set()))


class TestTryOptionSyntax(unittest.TestCase):
    def test_unknown_args(self):
        "unknown arguments are ignored"
        parameters = parse_message("try: --doubledash -z extra")
        tos = TryOptionSyntax(parameters, graph_with_jobs, GRAPH_CONFIG)
        # equilvant to "try:"..
        self.assertEqual(tos.build_types, [])
        self.assertEqual(tos.jobs, [])

    def test_apostrophe_in_message(self):
        "apostrophe does not break parsing"
        parameters = parse_message("Increase spammy log's log level. try: -b do")
        tos = TryOptionSyntax(parameters, graph_with_jobs, GRAPH_CONFIG)