def test_multiple_subs(self):
     input = [{'task-reference': 'toolchain=<toolchain>, build=<build>'}]
     self.assertEqual(
         resolve_task_references('lable', input, {
             'toolchain': 'abcd',
             'build': 'def'
         }), ['toolchain=abcd, build=def'])
 def test_invalid(self):
     "resolve_task_references raises a KeyError on reference to an invalid task"
     self.assertRaisesRegexp(
         KeyError,
         "task 'subject' has no dependency named 'no-such'",
         lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
     )
 def test_buried_replacement(self):
     input = {"key": [{"key2": [{'task-reference': 'taskid=<toolchain>'}]}]}
     self.assertEqual(
         resolve_task_references('lable', input, {'toolchain': 'abcd'}),
         {u'key': [{
             u'key2': [u'taskid=abcd']
         }]})
Beispiel #4
0
 def test_badly_formed(self):
     "resolve_task_references ignores badly-formatted artifact references"
     for inv in ['<edge1>', 'edge1/foo>', '<edge1>/foo', '<edge1>foo']:
         resolved = resolve_task_references("subject",
                                            {"artifact-reference": inv},
                                            "tid-self", "tid-decision", {})
         self.assertEqual(resolved, inv)
 def test_no_change(self):
     input = {
         "key": "value",
         "numeric": 10,
         "list": ["a", True, False, None]
     }
     self.assertEqual(resolve_task_references('lable', input, {}), input)
Beispiel #6
0
def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    for elem in input['tasks']:
        if elem in full_task_graph.tasks:
            task = full_task_graph.tasks[elem]

            # fix up the task's dependencies, similar to how optimization would
            # have done in the decision
            dependencies = {
                name: label_to_taskid[label]
                for name, label in task.dependencies.iteritems()
            }
            task_def = resolve_task_references(task.label, task.task,
                                               dependencies)
            task_def.setdefault('dependencies',
                                []).extend(dependencies.itervalues())
            # actually create the new task
            create_task(slugid(), task_def, parameters['level'])
        else:
            raise Exception('{} was not found in the task-graph'.format(elem))
 def test_appears_with_other_keys(self):
     input = [{'task-reference': '<toolchain>', 'another-key': True}]
     self.assertEqual(
         resolve_task_references('lable', input, {'toolchain': 'abcd'}),
         [{
             'task-reference': '<toolchain>',
             'another-key': True
         }])
Beispiel #8
0
 def do(self, input, output):
     taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
     with mock.patch.dict(
             os.environ,
         {'TASKCLUSTER_ROOT_URL': 'https://tc-tests.localhost'}):
         self.assertEqual(
             resolve_task_references('subject', input,
                                     taskid_for_edge_name), output)
Beispiel #9
0
 def test_invalid(self):
     "resolve_task_references raises a KeyError on reference to an invalid task"
     self.assertRaisesRegexp(
         KeyError,
         "task 'subject' has no dependency named 'no-such'",
         lambda: resolve_task_references(
             "subject", {"artifact-reference": "<no-such/public/artifact>"
                         }, "tid-self", "tid-decision", {}),
     )
Beispiel #10
0
 def test_self(self):
     "resolve_task_references raises KeyError on artifact references to `self`"
     self.assertRaisesRegexp(
         KeyError,
         "task 'subject' can't reference artifacts of self",
         lambda: resolve_task_references(
             "subject", {"artifact-reference": "<self/public/artifact>"
                         }, "tid-self", "tid-decision", {}),
     )
Beispiel #11
0
def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config)

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {name: label_to_taskid[label]
                    for name, label in pre_task.dependencies.iteritems()}
    new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
    new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())

    # don't want to run mozharness tests, want a custom mach command instead
    new_task_definition['payload']['command'] += ['--no-run-tests']

    custom_mach_command = [task['tags']['test-type']]

    # mochitests may specify a flavor
    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
        custom_mach_command += [
            '--keep-open=false',
            '-f',
            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
        ]

    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
        'ENABLE_E10S', 'true'))
    if not enable_e10s:
        custom_mach_command += ['--disable-e10s']

    custom_mach_command += ['--log-tbpl=-',
                            '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
    if input.get('runUntilFail'):
        custom_mach_command += ['--run-until-failure']
    if input.get('repeat'):
        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]

    # add any custom gecko preferences
    for (key, val) in input.get('preferences', {}).iteritems():
        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]

    custom_mach_command += [input['path']]
    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
        custom_mach_command)

    # update environment
    new_task_definition['payload']['env'].update(input.get('environment', {}))

    # tweak the treeherder symbol
    new_task_definition['extra']['treeherder']['symbol'] += '-custom'

    logging.info("New task definition: %s", new_task_definition)

    # actually create the new task
    new_task_id = slugid()
    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
def mochitest_retrigger_action(parameters, input, task_group_id, task_id, task):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(parameters)

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {name: label_to_taskid[label]
                    for name, label in pre_task.dependencies.iteritems()}
    new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
    new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())

    # don't want to run mozharness tests, want a custom mach command instead
    new_task_definition['payload']['command'] += ['--no-run-tests']

    custom_mach_command = [task['tags']['test-type']]

    # mochitests may specify a flavor
    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
        custom_mach_command += [
            '--keep-open=false',
            '-f',
            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
        ]

    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
        'ENABLE_E10S', 'true'))
    if not enable_e10s:
        custom_mach_command += ['--disable-e10s']

    custom_mach_command += ['--log-tbpl=-',
                            '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
    if input.get('runUntilFail'):
        custom_mach_command += ['--run-until-failure']
    if input.get('repeat'):
        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]

    # add any custom gecko preferences
    for (key, val) in input.get('preferences', {}).iteritems():
        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]

    custom_mach_command += [input['path']]
    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
        custom_mach_command)

    # update environment
    new_task_definition['payload']['env'].update(input.get('environment', {}))

    # tweak the treeherder symbol
    new_task_definition['extra']['treeherder']['symbol'] += '-custom'

    logging.info("New task definition: %s", new_task_definition)

    # actually create the new task
    new_task_id = slugid()
    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
 def do(self, input, output):
     taskid_for_edge_name = {"edge%d" % n: "tid%d" % n for n in range(1, 4)}
     with mock.patch.dict(
             os.environ,
         {"TASKCLUSTER_ROOT_URL": "https://tc-tests.localhost"}):
         self.assertEqual(
             resolve_task_references("subject", input, "tid-self",
                                     "tid-decision", taskid_for_edge_name),
             output,
         )
Beispiel #14
0
 def do(self, input, output):
     taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
     self.assertEqual(
         resolve_task_references(
             "subject",
             input,
             "tid-self",
             "tid-decision",
             taskid_for_edge_name,
         ),
         output,
     )
Beispiel #15
0
def backfill_action(parameters, input, task_group_id, task_id, task):
    label = task['metadata']['name']
    pushes = []
    depth = input.get('depth', 5)
    end_id = int(parameters['pushlog_id']) - 1

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + r.json()['pushes'].keys()
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]

    for push in pushes:
        full_task_graph = get_artifact_from_index(
            INDEX_TMPL.format(parameters['project'], push),
            'public/full-task-graph.json')
        _, full_task_graph = TaskGraph.from_json(full_task_graph)
        label_to_taskid = get_artifact_from_index(
            INDEX_TMPL.format(parameters['project'], push),
            'public/label-to-taskid.json')

        if label in full_task_graph.tasks.keys():
            task = full_task_graph.tasks[label]
            dependencies = {
                name: label_to_taskid[label]
                for name, label in task.dependencies.iteritems()
            }
            task_def = resolve_task_references(task.label, task.task,
                                               dependencies)
            task_def.setdefault('dependencies',
                                []).extend(dependencies.itervalues())
            create_task(slugid(), task_def, parameters['level'])
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(
                label, push))
Beispiel #16
0
def run_missing_tests(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != 'test':
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(task)

    for task in to_run:

        # fix up the task's dependencies, similar to how optimization would
        # have done in the decision
        dependencies = {
            name: label_to_taskid[label]
            for name, label in task.dependencies.iteritems()
        }
        task_def = resolve_task_references(task.label, task.task, dependencies)
        task_def.setdefault('dependencies',
                            []).extend(dependencies.itervalues())
        create_task(slugid(), task_def, parameters['level'])

    logger.info(
        'Out of {} test tasks, {} already existed and the action created {}'.
        format(already_run + len(to_run), already_run, len(to_run)))
 def test_escaped(self):
     input = [{'task-reference': '<<><toolchain>>'}]
     self.assertEqual(
         resolve_task_references('lable', input, {'toolchain': 'abcd'}),
         ['<abcd>'])
 def do(self, input, output):
     taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
     self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
Beispiel #19
0
def get_subgraph(
    target_task_graph,
    removed_tasks,
    replaced_tasks,
    label_to_taskid,
    decision_task_id,
):
    """
    Return the subgraph of target_task_graph consisting only of
    non-optimized tasks and edges between them.

    To avoid losing track of taskIds for tasks optimized away, this method
    simultaneously substitutes real taskIds for task labels in the graph, and
    populates each task definition's `dependencies` key with the appropriate
    taskIds.  Task references are resolved in the process.
    """

    # check for any dependency edges from included to removed tasks
    bad_edges = [(l, r, n) for l, r, n in target_task_graph.graph.edges
                 if l not in removed_tasks and r in removed_tasks]
    if bad_edges:
        probs = ', '.join(
            '{} depends on {} as {} but it has been removed'.format(l, r, n)
            for l, r, n in bad_edges)
        raise Exception("Optimization error: " + probs)

    # fill in label_to_taskid for anything not removed or replaced
    assert replaced_tasks <= set(label_to_taskid)
    for label in sorted(target_task_graph.graph.nodes - removed_tasks -
                        set(label_to_taskid)):
        label_to_taskid[label] = slugid().decode('ascii')

    # resolve labels to taskIds and populate task['dependencies']
    tasks_by_taskid = {}
    named_links_dict = target_task_graph.graph.named_links_dict()
    omit = removed_tasks | replaced_tasks
    for label, task in six.iteritems(target_task_graph.tasks):
        if label in omit:
            continue
        task.task_id = label_to_taskid[label]
        named_task_dependencies = {
            name: label_to_taskid[label]
            for name, label in named_links_dict.get(label, {}).items()
        }

        # Add remaining soft dependencies
        if task.soft_dependencies:
            named_task_dependencies.update({
                label: label_to_taskid[label]
                for label in task.soft_dependencies
                if label in label_to_taskid and label not in omit
            })

        task.task = resolve_task_references(
            task.label,
            task.task,
            task_id=task.task_id,
            decision_task_id=decision_task_id,
            dependencies=named_task_dependencies,
        )
        deps = task.task.setdefault('dependencies', [])
        deps.extend(sorted(named_task_dependencies.values()))
        tasks_by_taskid[task.task_id] = task

    # resolve edges to taskIds
    edges_by_taskid = ((label_to_taskid.get(left), label_to_taskid.get(right),
                        name) for (left, right,
                                   name) in target_task_graph.graph.edges)
    # ..and drop edges that are no longer entirely in the task graph
    #   (note that this omits edges to replaced tasks, but they are still in task.dependnecies)
    edges_by_taskid = set(
        (left, right, name) for (left, right, name) in edges_by_taskid
        if left in tasks_by_taskid and right in tasks_by_taskid)

    return TaskGraph(tasks_by_taskid,
                     Graph(set(tasks_by_taskid), edges_by_taskid))
Beispiel #20
0
 def do(self, input, output):
     taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
     self.assertEqual(
         resolve_task_references('subject', input, taskid_for_edge_name),
         output)
Beispiel #21
0
 def test_invalid(self):
     "resolve_task_references ignores badly-formatted artifact references"
     for inv in ['<edge1>', 'edge1/foo>', '<edge1>/foo', '<edge1>foo']:
         resolved = resolve_task_references('subject',
                                            {'artifact-reference': inv}, {})
         self.assertEqual(resolved, inv)
Beispiel #22
0
 def test_invalid(self):
     "resolve_task_references raises a KeyError on reference to an invalid task"
     self.assertRaisesRegexp(
         KeyError, "task 'subject' has no dependency named 'no-such'",
         lambda: resolve_task_references(
             'subject', {'task-reference': '<no-such>'}, {}))