Exemplo n.º 1
0
def retrigger_decision_action(parameters, graph_config, input, task_group_id,
                              task_id):
    """For a single task, we try to just run exactly the same task once more.
    It's quite possible that we don't have the scopes to do so (especially for
    an action), but this is best-effort."""

    # make all of the timestamps relative; they will then be turned back into
    # absolute timestamps relative to the current time.
    task = taskcluster.get_task_definition(task_id)
    task = relativize_datestamps(task)
    create_task_from_def(task, parameters["level"])
Exemplo n.º 2
0
def purge_caches_action(parameters, graph_config, input, task_group_id,
                        task_id):
    task = taskcluster.get_task_definition(task_id)
    if task["payload"].get("cache"):
        for cache in task["payload"]["cache"]:
            purge_cache(task["provisionerId"],
                        task["workerType"],
                        cache,
                        use_proxy=True)
    else:
        logger.info("Task has no caches. Will not clear anything!")
Exemplo n.º 3
0
def rerun_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    parameters = dict(parameters)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)
    label = task["metadata"]["name"]
    if task_id not in label_to_taskid.values():
        logger.error(
            "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!"
            .format(label, task_id, decision_task_id))

    _rerun_task(task_id, label)
Exemplo n.º 4
0
def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]

    is_browsertime = "browsertime" in label
    if "vismet" in label:
        label = rename_browsertime_vismet_task(label)

    with_downstream = " "
    to_run = [label]

    if not input.get("force", None) and not _should_retrigger(
            full_task_graph, label):
        logger.info("Not retriggering task {}, task should not be retrigged "
                    "and force not specified.".format(label))
        sys.exit(1)

    if input.get("downstream") or is_browsertime:
        if is_browsertime:
            to_run = get_downstream_browsertime_tasks(to_run, full_task_graph,
                                                      label_to_taskid)
        else:
            to_run = get_tasks_with_downstream(to_run, full_task_graph,
                                               label_to_taskid)
        with_downstream = " (with downstream) "

    times = input.get("times", 1)
    for i in range(times):
        create_tasks(
            graph_config,
            to_run,
            full_task_graph,
            label_to_taskid,
            parameters,
            decision_task_id,
            i,
        )

        logger.info(
            f"Scheduled {label}{with_downstream}(time {i + 1}/{times})")
    combine_task_graph_files(list(range(times)))
Exemplo n.º 5
0
def backfill_action(parameters, graph_config, input, task_group_id, task_id):
    """
    This action takes a task ID and schedules it on previous pushes (via support action).

    To execute this action locally follow the documentation here:
    https://firefox-source-docs.mozilla.org/taskcluster/actions.html#testing-the-action-locally
    """
    task = get_task_definition(task_id)
    pushes = get_pushes_from_params_input(parameters, input)
    failed = False
    input_for_action = input_for_support_action(
        revision=parameters["head_rev"],
        task=task,
        times=input.get("times", 1),
        retrigger=input.get("retrigger", True),
    )

    for push_id in pushes:
        try:
            # The Gecko decision task can sometimes fail on a push and we need to handle
            # the exception that this call will produce
            push_decision_task_id = get_decision_task_id(
                parameters["project"], push_id)
        except Exception:
            logger.warning(f"Could not find decision task for push {push_id}")
            # The decision task may have failed, this is common enough that we
            # don't want to report an error for it.
            continue

        try:
            trigger_action(
                action_name="backfill-task",
                # This lets the action know on which push we want to add a new task
                decision_task_id=push_decision_task_id,
                input=input_for_action,
            )
        except Exception:
            logger.exception(f"Failed to trigger action for {push_id}")
            failed = True

    if failed:
        sys.exit(1)
Exemplo n.º 6
0
def _extract_applicable_action(actions_json, action_name, task_group_id,
                               task_id):
    """Extract action that applies to the given task or task group.

    A task (as defined by its tags) is said to match a tag-set if its
    tags are a super-set of the tag-set. A tag-set is a set of key-value pairs.

    An action (as defined by its context) is said to be relevant for
    a given task, if the task's tags match one of the tag-sets given
    in the context property of the action.

    The order of the actions is significant. When multiple actions apply to a
    task the first one takes precedence.

    For more details visit:
    https://docs.taskcluster.net/docs/manual/design/conventions/actions/spec
    """
    if task_id:
        tags = get_task_definition(task_id).get("tags")

    for _action in actions_json["actions"]:
        if action_name != _action["name"]:
            continue

        context = _action.get("context", [])
        # Ensure the task is within the context of the action
        if task_id and tags and _tags_within_context(tags, context):
            return _action
        elif context == []:
            return _action

    available_actions = ", ".join(
        sorted({a["name"]
                for a in actions_json["actions"]}))
    raise LookupError(
        "{} action is not available for this task. Available: {}".format(
            action_name, available_actions))
Exemplo n.º 7
0
def isolate_test_failures(parameters, graph_config, input, task_group_id, task_id):
    task = get_task_definition(task_id)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config
    )

    pre_task = full_task_graph.tasks[task["metadata"]["name"]]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {
        name: label_to_taskid[label] for name, label in pre_task.dependencies.items()
    }

    task_definition = resolve_task_references(
        pre_task.label, pre_task.task, task_id, decision_task_id, dependencies
    )
    task_definition.setdefault("dependencies", []).extend(dependencies.values())

    failures = get_failures(task_id)
    logger.info("isolate_test_failures: %s" % failures)
    create_isolate_failure_tasks(
        task_definition, failures, parameters["level"], input["times"]
    )
Exemplo n.º 8
0
def geckoprofile_action(parameters, graph_config, input, task_group_id,
                        task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]
    pushes = []
    depth = 2
    end_id = int(parameters["pushlog_id"])

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters["head_repository"],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + list(r.json()["pushes"].keys())
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/full-task-graph.json",
            )
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/label-to-taskid.json",
            )
            push_params = get_artifact_from_index(
                INDEX_TMPL.format(parameters["project"], push),
                "public/parameters.yml")
            push_decision_task_id = find_decision_task(push_params,
                                                       graph_config)
        except HTTPError as e:
            logger.info(
                f"Skipping {push} due to missing index artifacts! Error: {e}")
            continue

        if label in full_task_graph.tasks.keys():

            def modifier(task):
                if task.label != label:
                    return task

                cmd = task.task["payload"]["command"]
                task.task["payload"]["command"] = add_args_to_perf_command(
                    cmd, ["--gecko-profile"])
                task.task["extra"]["treeherder"]["symbol"] += "-p"
                task.task["extra"]["treeherder"]["groupName"] += " (profiling)"
                return task

            create_tasks(
                graph_config,
                [label],
                full_task_graph,
                label_to_taskid,
                push_params,
                push_decision_task_id,
                push,
                modifier=modifier,
            )
            backfill_pushes.append(push)
        else:
            logging.info(f"Could not find {label} on {push}. Skipping.")
    combine_task_graph_files(backfill_pushes)
Exemplo n.º 9
0
def create_interactive_action(parameters, graph_config, input, task_group_id, task_id):
    # fetch the original task definition from the taskgraph, to avoid
    # creating interactive copies of unexpected tasks.  Note that this only applies
    # to docker-worker tasks, so we can assume the docker-worker payload format.
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config
    )
    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]

    def edit(task):
        if task.label != label:
            return task
        task_def = task.task

        # drop task routes (don't index this!)
        task_def["routes"] = []

        # only try this once
        task_def["retries"] = 0

        # short expirations, at least 3 hour maxRunTime
        task_def["deadline"] = {"relative-datestamp": "12 hours"}
        task_def["created"] = {"relative-datestamp": "0 hours"}
        task_def["expires"] = {"relative-datestamp": "1 day"}

        # filter scopes with the SCOPE_WHITELIST
        task.task["scopes"] = [
            s
            for s in task.task.get("scopes", [])
            if any(p.match(s) for p in SCOPE_WHITELIST)
        ]

        payload = task_def["payload"]

        # make sure the task runs for long enough..
        payload["maxRunTime"] = max(3600 * 3, payload.get("maxRunTime", 0))

        # no caches or artifacts
        payload["cache"] = {}
        payload["artifacts"] = {}

        # enable interactive mode
        payload.setdefault("features", {})["interactive"] = True
        payload.setdefault("env", {})["TASKCLUSTER_INTERACTIVE"] = "true"

        return task

    # Create the task and any of its dependencies. This uses a new taskGroupId to avoid
    # polluting the existing taskGroup with interactive tasks.
    action_task_id = os.environ.get("TASK_ID")
    label_to_taskid = create_tasks(
        graph_config,
        [label],
        full_task_graph,
        label_to_taskid,
        parameters,
        decision_task_id=action_task_id,
        modifier=edit,
    )

    taskId = label_to_taskid[label]
    logger.info(f"Created interactive task {taskId}; sending notification")

    if input and "notify" in input:
        email = input["notify"]
        # no point sending to a noreply address!
        if email == "*****@*****.**":
            return

        info = {
            "url": taskcluster_urls.ui(get_root_url(False), f"tasks/{taskId}/connect"),
            "label": label,
            "revision": parameters["head_rev"],
            "repo": parameters["head_repository"],
        }
        send_email(
            email,
            subject=EMAIL_SUBJECT.format(**info),
            content=EMAIL_CONTENT.format(**info),
            link={
                "text": "Connect",
                "href": info["url"],
            },
            use_proxy=True,
        )