示例#1
0
def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']

    with_downstream = ' '
    to_run = [label]

    if input.get('downstream'):
        to_run = full_task_graph.graph.transitive_closure(set(to_run),
                                                          reverse=True).nodes
        to_run = to_run & set(label_to_taskid.keys())
        with_downstream = ' (with downstream) '

    times = input.get('times', 1)
    for i in xrange(times):
        create_tasks(
            graph_config,
            to_run,
            full_task_graph,
            label_to_taskid,
            parameters,
            decision_task_id,
            i,
        )

        logger.info('Scheduled {}{}(time {}/{})'.format(
            label, with_downstream, i + 1, times))
    combine_task_graph_files(list(range(times)))
示例#2
0
def trigger_action_callback(task_group_id,
                            task_id,
                            input,
                            callback,
                            parameters,
                            root,
                            test=False):
    """
    Trigger action callback with the given inputs. If `test` is true, then run
    the action callback in testing mode, without actually creating tasks.
    """
    graph_config = load_graph_config(root)
    callbacks = _get_callbacks(graph_config)
    cb = callbacks.get(callback, None)
    if not cb:
        raise Exception('Unknown callback: {}. Known callbacks: {}'.format(
            callback, ', '.join(callbacks)))

    if test:
        create.testing = True
        taskcluster.testing = True

    if not test:
        sanity_check_task_scope(callback, parameters, graph_config)

    # fetch the target task, if taskId was given
    # FIXME: many actions don't need this, so move this fetch into the callbacks
    # that do need it
    if task_id:
        task = taskcluster.get_task_definition(task_id)
    else:
        task = None

    cb(Parameters(**parameters), graph_config, input, task_group_id, task_id,
       task)
示例#3
0
def isolate_test_failures(parameters, graph_config, input, task_group_id, task_id):
    task = get_task_definition(task_id)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config
    )

    pre_task = full_task_graph.tasks[task["metadata"]["name"]]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {
        name: label_to_taskid[label]
        for name, label in six.iteritems(pre_task.dependencies)
    }

    task_definition = resolve_task_references(
        pre_task.label, pre_task.task, task_id, decision_task_id, dependencies
    )
    task_definition.setdefault("dependencies", []).extend(six.itervalues(dependencies))

    failures = get_failures(task_id)
    logger.info("isolate_test_failures: %s" % failures)
    create_isolate_failure_tasks(
        task_definition, failures, parameters["level"], input["times"]
    )
示例#4
0
def handle_custom_retrigger(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {name: label_to_taskid[label]
                    for name, label in pre_task.dependencies.iteritems()}
    new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
    new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())

    # don't want to run mozharness tests, want a custom mach command instead
    new_task_definition['payload']['command'] += ['--no-run-tests']

    custom_mach_command = [task['tags']['test-type']]

    # mochitests may specify a flavor
    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
        custom_mach_command += [
            '--keep-open=false',
            '-f',
            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
        ]

    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
        'ENABLE_E10S', 'true'))
    if not enable_e10s:
        custom_mach_command += ['--disable-e10s']

    custom_mach_command += ['--log-tbpl=-',
                            '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
    if input.get('runUntilFail'):
        custom_mach_command += ['--run-until-failure']
    if input.get('repeat'):
        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]

    # add any custom gecko preferences
    for (key, val) in input.get('preferences', {}).iteritems():
        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]

    custom_mach_command += [input['path']]
    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
        custom_mach_command)

    # update environment
    new_task_definition['payload']['env'].update(input.get('environment', {}))

    # tweak the treeherder symbol
    new_task_definition['extra']['treeherder']['symbol'] += '-custom'

    logging.info("New task definition: %s", new_task_definition)

    # actually create the new task
    new_task_id = slugid()
    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
示例#5
0
def geckoprofile_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']
    pushes = []
    depth = 2
    end_id = int(parameters['pushlog_id'])

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'], start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + list(r.json()['pushes'].keys())
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/full-task-graph.json')
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/label-to-taskid.json')
            push_params = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/parameters.yml')
            push_decision_task_id = find_decision_task(push_params, graph_config)
        except HTTPError as e:
            logger.info('Skipping {} due to missing index artifacts! Error: {}'.format(push, e))
            continue

        if label in full_task_graph.tasks.keys():
            def modifier(task):
                if task.label != label:
                    return task

                cmd = task.task['payload']['command']
                task.task['payload']['command'] = add_args_to_perf_command(
                        cmd, ['--gecko-profile'])
                task.task['extra']['treeherder']['symbol'] += '-p'
                return task

            create_tasks(graph_config, [label], full_task_graph, label_to_taskid,
                         push_params, push_decision_task_id, push, modifier=modifier)
            backfill_pushes.append(push)
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(label, push))
    combine_task_graph_files(backfill_pushes)
示例#6
0
def retrigger_decision_action(parameters, graph_config, input, task_group_id, task_id):
    """For a single task, we try to just run exactly the same task once more.
    It's quite possible that we don't have the scopes to do so (especially for
    an action), but this is best-effort."""

    # make all of the timestamps relative; they will then be turned back into
    # absolute timestamps relative to the current time.
    task = taskcluster.get_task_definition(task_id)
    task = relativize_datestamps(task)
    create_task_from_def(slugid(), task, parameters['level'])
示例#7
0
def purge_caches_action(parameters, graph_config, input, task_group_id,
                        task_id):
    task = taskcluster.get_task_definition(task_id)
    if task['payload'].get('cache'):
        for cache in task['payload']['cache']:
            purge_cache(task['provisionerId'],
                        task['workerType'],
                        cache,
                        use_proxy=True)
    else:
        logger.info('Task has no caches. Will not clear anything!')
示例#8
0
def purge_caches_action(parameters, graph_config, input, task_group_id,
                        task_id):
    task = taskcluster.get_task_definition(task_id)
    if task["payload"].get("cache"):
        for cache in task["payload"]["cache"]:
            purge_cache(task["provisionerId"],
                        task["workerType"],
                        cache,
                        use_proxy=True)
    else:
        logger.info("Task has no caches. Will not clear anything!")
示例#9
0
def rerun_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    parameters = dict(parameters)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)
    label = task["metadata"]["name"]
    if task_id not in label_to_taskid.values():
        logger.error(
            "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!"
            .format(label, task_id, decision_task_id))

    _rerun_task(task_id, label)
示例#10
0
    def test_action_callback(self, **options):
        import taskgraph.parameters
        from taskgraph.util.taskcluster import get_task_definition
        import taskgraph.actions
        import yaml

        def load_data(filename):
            with open(filename) as f:
                if filename.endswith('.yml'):
                    return yaml.safe_load(f)
                elif filename.endswith('.json'):
                    return json.load(f)
                else:
                    raise Exception("unknown filename {}".format(filename))

        try:
            self.setup_logging()
            task_id = options['task_id']
            if options['task']:
                task = load_data(options['task'])
            elif task_id:
                task = get_task_definition(task_id)
            else:
                task = None

            if options['input']:
                input = load_data(options['input'])
            else:
                input = None

            parameters = taskgraph.parameters.load_parameters_file(
                options['parameters'])
            parameters.check()

            root = options['root']

            return taskgraph.actions.trigger_action_callback(
                task_group_id=options['task_group_id'],
                task_id=task_id,
                task=task,
                input=input,
                callback=options['callback'],
                parameters=parameters,
                root=root,
                test=True)
        except Exception:
            traceback.print_exc()
            sys.exit(1)
示例#11
0
def rerun_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    parameters = dict(parameters)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)
    label = task['metadata']['name']
    if task_id not in label_to_taskid.values():
        logger.error(
            "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!"
            .format(label, task_id, decision_task_id))

    status = status_task(task_id)
    if status not in RERUN_STATES:
        logger.error("Refusing to rerun {}: state {} not in {}!".format(
            label, status, RERUN_STATES))
        sys.exit(1)
    rerun_task(task_id)
    logger.info('Reran {}'.format(label))
示例#12
0
    def test_action_callback(self, **options):
        import taskgraph.parameters
        from taskgraph.util.taskcluster import get_task_definition
        import taskgraph.actions
        import yaml

        def load_data(filename):
            with open(filename) as f:
                if filename.endswith('.yml'):
                    return yaml.safe_load(f)
                elif filename.endswith('.json'):
                    return json.load(f)
                else:
                    raise Exception("unknown filename {}".format(filename))

        try:
            self.setup_logging()
            task_id = options['task_id']
            if options['task']:
                task = load_data(options['task'])
            elif task_id:
                task = get_task_definition(task_id)
            else:
                task = None

            if options['input']:
                input = load_data(options['input'])
            else:
                input = None

            parameters = taskgraph.parameters.load_parameters_file(options['parameters'])
            parameters.check()

            return taskgraph.actions.trigger_action_callback(
                    task_group_id=options['task_group_id'],
                    task_id=task_id,
                    task=task,
                    input=input,
                    callback=options['callback'],
                    parameters=parameters,
                    test=True)
        except Exception:
            traceback.print_exc()
            sys.exit(1)
示例#13
0
def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]

    is_browsertime = "browsertime" in label
    if "vismet" in label:
        label = rename_browsertime_vismet_task(label)

    with_downstream = " "
    to_run = [label]

    if not input.get("force", None) and not _should_retrigger(
            full_task_graph, label):
        logger.info("Not retriggering task {}, task should not be retrigged "
                    "and force not specified.".format(label))
        sys.exit(1)

    if input.get("downstream") or is_browsertime:
        if is_browsertime:
            to_run = get_downstream_browsertime_tasks(to_run, full_task_graph,
                                                      label_to_taskid)
        else:
            to_run = get_tasks_with_downstream(to_run, full_task_graph,
                                               label_to_taskid)
        with_downstream = " (with downstream) "

    times = input.get("times", 1)
    for i in range(times):
        create_tasks(
            graph_config,
            to_run,
            full_task_graph,
            label_to_taskid,
            parameters,
            decision_task_id,
            i,
        )

        logger.info("Scheduled {}{}(time {}/{})".format(
            label, with_downstream, i + 1, times))
    combine_task_graph_files(list(range(times)))
示例#14
0
def isolate_test_failures(parameters, graph_config, input, task_group_id, task_id):
    task = get_task_definition(task_id)
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {name: label_to_taskid[label]
                    for name, label in pre_task.dependencies.iteritems()}

    task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
    task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())

    failures = get_failures(task_id)
    logger.info('isolate_test_failures: %s' % failures)
    for i in range(input['times']):
        create_isolate_failure_tasks(task_definition, failures, parameters['level'])
示例#15
0
def backfill_action(parameters, graph_config, input, task_group_id, task_id):
    """
    This action takes a task ID and schedules it on previous pushes (via support action).

    To execute this action locally follow the documentation here:
    https://firefox-source-docs.mozilla.org/taskcluster/actions.html#testing-the-action-locally
    """
    task = get_task_definition(task_id)
    pushes = get_pushes_from_params_input(parameters, input)
    failed = False
    input_for_action = input_for_support_action(
        revision=parameters["head_rev"],
        task=task,
        times=input.get("times", 1),
    )

    for push_id in pushes:
        try:
            # The Gecko decision task can sometimes fail on a push and we need to handle
            # the exception that this call will produce
            push_decision_task_id = get_decision_task_id(
                parameters["project"], push_id)
        except Exception:
            logger.warning(
                "Could not find decision task for push {}".format(push_id))
            # The decision task may have failed, this is common enough that we
            # don't want to report an error for it.
            continue

        try:
            trigger_action(
                action_name="backfill-task",
                # This lets the action know on which push we want to add a new task
                decision_task_id=push_decision_task_id,
                input=input_for_action,
            )
        except Exception:
            logger.exception("Failed to trigger action for {}".format(push_id))
            failed = True

    if failed:
        sys.exit(1)
示例#16
0
def _extract_applicable_action(actions_json, action_name, task_group_id,
                               task_id):
    """Extract action that applies to the given task or task group.

    A task (as defined by its tags) is said to match a tag-set if its
    tags are a super-set of the tag-set. A tag-set is a set of key-value pairs.

    An action (as defined by its context) is said to be relevant for
    a given task, if the task's tags match one of the tag-sets given
    in the context property of the action.

    The order of the actions is significant. When multiple actions apply to a
    task the first one takes precedence.

    For more details visit:
    https://docs.taskcluster.net/docs/manual/design/conventions/actions/spec
    """
    if task_id:
        tags = get_task_definition(task_id).get("tags")
    action = None

    for _action in actions_json["actions"]:
        if action_name != _action["name"]:
            continue

        context = _action.get("context", [])
        # Ensure the task is within the context of the action
        if task_id and tags and _tags_within_context(tags, context):
            action = _action
        elif context == []:
            action = _action

    if not action:
        available_actions = ", ".join(
            sorted({a["name"]
                    for a in actions_json["actions"]}))
        raise LookupError(
            "{} action is not available for this task. Available: {}".format(
                action_name, available_actions))

    return action
示例#17
0
def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']

    with_downstream = ' '
    to_run = [label]

    if not input.get('force', None) and not full_task_graph[label].attributes.get('retrigger'):
        logger.info(
            "Not retriggering task {}, task should not be retrigged "
            "and force not specified.".format(
                label
            )
        )
        sys.exit(1)

    if input.get('downstream'):
        to_run = full_task_graph.graph.transitive_closure(set(to_run), reverse=True).nodes
        to_run = to_run & set(label_to_taskid.keys())
        with_downstream = ' (with downstream) '

    times = input.get('times', 1)
    for i in xrange(times):
        create_tasks(
            graph_config,
            to_run,
            full_task_graph,
            label_to_taskid,
            parameters,
            decision_task_id,
            i,
        )

        logger.info('Scheduled {}{}(time {}/{})'.format(label, with_downstream, i+1, times))
    combine_task_graph_files(list(range(times)))
示例#18
0
def new_backfill_action(parameters, graph_config, input, task_group_id,
                        task_id):
    '''
    This action takes a task ID and schedules it on previous pushes (via support action).

    To execute this action locally follow the documentation here:
    https://firefox-source-docs.mozilla.org/taskcluster/actions.html#testing-the-action-locally
    '''
    task = get_task_definition(task_id)
    pushes = get_pushes_from_params_input(parameters, input)

    for push_id in pushes:
        try:
            trigger_action(
                action_name='backfill-task',
                # This lets the action know on which push we want to add a new task
                decision_task_id=get_decision_task_id(parameters['project'],
                                                      push_id),
                input=input_for_support_action(parameters['head_rev'], task),
            )
        except Exception as e:
            logger.warning('Failure to trigger action for {}'.format(push_id))
            logger.exception(e)
示例#19
0
def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]

    with_downstream = " "
    to_run = [label]

    if not input.get("force", None) and not _should_retrigger(
            full_task_graph, label):
        logger.info("Not retriggering task {}, task should not be retrigged "
                    "and force not specified.".format(label))
        sys.exit(1)

    if input.get("downstream"):
        to_run = full_task_graph.graph.transitive_closure(set(to_run),
                                                          reverse=True).nodes
        to_run = to_run & set(label_to_taskid.keys())
        with_downstream = " (with downstream) "

    times = input.get("times", 1)
    for i in range(times):
        create_tasks(
            graph_config,
            to_run,
            full_task_graph,
            label_to_taskid,
            parameters,
            decision_task_id,
            i,
        )

        logger.info("Scheduled {}{}(time {}/{})".format(
            label, with_downstream, i + 1, times))
    combine_task_graph_files(list(range(times)))
示例#20
0
def create_interactive_action(parameters, graph_config, input, task_group_id,
                              task_id):
    # fetch the original task definition from the taskgraph, to avoid
    # creating interactive copies of unexpected tasks.  Note that this only applies
    # to docker-worker tasks, so we can assume the docker-worker payload format.
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)
    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']

    def edit(task):
        if task.label != label:
            return task
        task_def = task.task

        # drop task routes (don't index this!)
        task_def['routes'] = []

        # only try this once
        task_def['retries'] = 0

        # short expirations, at least 3 hour maxRunTime
        task_def['deadline'] = {'relative-datestamp': '12 hours'}
        task_def['created'] = {'relative-datestamp': '0 hours'}
        task_def['expires'] = {'relative-datestamp': '1 day'}

        # filter scopes with the SCOPE_WHITELIST
        task.task['scopes'] = [
            s for s in task.task.get('scopes', []) if any(
                p.match(s) for p in SCOPE_WHITELIST)
        ]

        payload = task_def['payload']

        # make sure the task runs for long enough..
        payload['maxRunTime'] = max(3600 * 3, payload.get('maxRunTime', 0))

        # no caches or artifacts
        payload['cache'] = {}
        payload['artifacts'] = {}

        # enable interactive mode
        payload.setdefault('features', {})['interactive'] = True
        payload.setdefault('env', {})['TASKCLUSTER_INTERACTIVE'] = 'true'

        return task

    # Create the task and any of its dependencies. This uses a new taskGroupId to avoid
    # polluting the existing taskGroup with interactive tasks.
    label_to_taskid = create_tasks(graph_config, [label],
                                   full_task_graph,
                                   label_to_taskid,
                                   parameters,
                                   modifier=edit)

    taskId = label_to_taskid[label]
    logger.info(
        'Created interactive task {}; sending notification'.format(taskId))

    if input and 'notify' in input:
        email = input['notify']
        # no point sending to a noreply address!
        if email == '*****@*****.**':
            return

        info = {
            'url':
            taskcluster_urls.ui(get_root_url(False),
                                'tasks/{}/connect'.format(taskId)),
            'label':
            label,
            'revision':
            parameters['head_rev'],
            'repo':
            parameters['head_repository'],
        }
        send_email(email,
                   subject=EMAIL_SUBJECT.format(**info),
                   content=EMAIL_CONTENT.format(**info),
                   link={
                       'text': 'Connect',
                       'href': info['url'],
                   },
                   use_proxy=True)
示例#21
0
def backfill_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']
    pushes = []
    inclusive_tweak = 1 if input.get('inclusive') else 0
    depth = input.get('depth', 5) + inclusive_tweak
    end_id = int(parameters['pushlog_id']) - (1 - inclusive_tweak)

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + r.json()['pushes'].keys()
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/full-task-graph.json')
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/label-to-taskid.json')
            push_params = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/parameters.yml')
            push_decision_task_id = find_decision_task(push_params,
                                                       graph_config)
        except HTTPError as e:
            logger.info(
                'Skipping {} due to missing index artifacts! Error: {}'.format(
                    push, e))
            continue

        if label in full_task_graph.tasks.keys():

            def modifier(task):
                if task.label != label:
                    return task

                if input.get('testPath', ''):
                    is_wpttest = 'web-platform' in task.task['metadata'][
                        'name']
                    is_android = 'android' in task.task['metadata']['name']
                    gpu_required = False
                    if (not is_wpttest) and \
                       ('gpu' in task.task['metadata']['name'] or
                        'webgl' in task.task['metadata']['name'] or
                        ('reftest' in task.task['metadata']['name'] and
                         'jsreftest' not in task.task['metadata']['name'])):
                        gpu_required = True

                    # Create new cmd that runs a test-verify type job
                    preamble_length = 3
                    verify_args = [
                        '--e10s', '--verify', '--total-chunk=1',
                        '--this-chunk=1'
                    ]
                    if is_android:
                        # no --e10s; todo, what about future geckoView?
                        verify_args.remove('--e10s')

                    if gpu_required:
                        verify_args.append('--gpu-required')

                    if 'testPath' in input:
                        task.task['payload']['env'][
                            'MOZHARNESS_TEST_PATHS'] = json.dumps({
                                task.task['extra']['suite']['flavor']:
                                [input['testPath']]
                            })

                    cmd_parts = task.task['payload']['command']
                    keep_args = [
                        '--installer-url', '--download-symbols',
                        '--test-packages-url'
                    ]
                    cmd_parts = remove_args_from_command(
                        cmd_parts, preamble_length, keep_args)
                    cmd_parts = add_args_to_command(cmd_parts, verify_args)
                    task.task['payload']['command'] = cmd_parts

                    # morph the task label to a test-verify job
                    pc = task.task['metadata']['name'].split('/')
                    config = pc[-1].split('-')
                    subtype = ''
                    symbol = 'TV-bf'
                    if gpu_required:
                        subtype = '-gpu'
                        symbol = 'TVg-bf'
                    if is_wpttest:
                        subtype = '-wpt'
                        symbol = 'TVw-bf'
                    if not is_android:
                        subtype = "%s-e10s" % subtype
                    newlabel = "%s/%s-test-verify%s" % (pc[0], config[0],
                                                        subtype)
                    task.task['metadata']['name'] = newlabel
                    task.task['tags']['label'] = newlabel

                    task.task['extra']['index']['rank'] = 0
                    task.task['extra']['chunks']['current'] = 1
                    task.task['extra']['chunks']['total'] = 1

                    task.task['extra']['suite']['name'] = 'test-verify'
                    task.task['extra']['suite']['flavor'] = 'test-verify'

                    task.task['extra']['treeherder']['symbol'] = symbol
                    del task.task['extra']['treeherder']['groupSymbol']
                return task

            times = input.get('times', 1)
            for i in xrange(times):
                create_tasks(graph_config, [label],
                             full_task_graph,
                             label_to_taskid,
                             push_params,
                             push_decision_task_id,
                             push,
                             modifier=modifier)
            backfill_pushes.append(push)
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(
                label, push))
    combine_task_graph_files(backfill_pushes)
示例#22
0
def create_interactive_action(parameters, graph_config, input, task_group_id,
                              task_id):
    # fetch the original task definition from the taskgraph, to avoid
    # creating interactive copies of unexpected tasks.  Note that this only applies
    # to docker-worker tasks, so we can assume the docker-worker payload format.
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)
    task = taskcluster.get_task_definition(task_id)
    label = task["metadata"]["name"]

    def edit(task):
        if task.label != label:
            return task
        task_def = task.task

        # drop task routes (don't index this!)
        task_def["routes"] = []

        # only try this once
        task_def["retries"] = 0

        # short expirations, at least 3 hour maxRunTime
        task_def["deadline"] = {"relative-datestamp": "12 hours"}
        task_def["created"] = {"relative-datestamp": "0 hours"}
        task_def["expires"] = {"relative-datestamp": "1 day"}

        # filter scopes with the SCOPE_WHITELIST
        task.task["scopes"] = [
            s for s in task.task.get("scopes", []) if any(
                p.match(s) for p in SCOPE_WHITELIST)
        ]

        payload = task_def["payload"]

        # make sure the task runs for long enough..
        payload["maxRunTime"] = max(3600 * 3, payload.get("maxRunTime", 0))

        # no caches or artifacts
        payload["cache"] = {}
        payload["artifacts"] = {}

        # enable interactive mode
        payload.setdefault("features", {})["interactive"] = True
        payload.setdefault("env", {})["TASKCLUSTER_INTERACTIVE"] = "true"

        return task

    # Create the task and any of its dependencies. This uses a new taskGroupId to avoid
    # polluting the existing taskGroup with interactive tasks.
    action_task_id = os.environ.get("TASK_ID")
    label_to_taskid = create_tasks(
        graph_config,
        [label],
        full_task_graph,
        label_to_taskid,
        parameters,
        decision_task_id=action_task_id,
        modifier=edit,
    )

    taskId = label_to_taskid[label]
    logger.info(
        "Created interactive task {}; sending notification".format(taskId))

    if input and "notify" in input:
        email = input["notify"]
        # no point sending to a noreply address!
        if email == "*****@*****.**":
            return

        info = {
            "url":
            taskcluster_urls.ui(get_root_url(False),
                                "tasks/{}/connect".format(taskId)),
            "label":
            label,
            "revision":
            parameters["head_rev"],
            "repo":
            parameters["head_repository"],
        }
        send_email(
            email,
            subject=EMAIL_SUBJECT.format(**info),
            content=EMAIL_CONTENT.format(**info),
            link={
                "text": "Connect",
                "href": info["url"],
            },
            use_proxy=True,
        )