Пример #1
0
def run_missing_tests(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != 'test':
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(label)

    create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                 decision_task_id)

    logger.info(
        'Out of {} test tasks, {} already existed and the action created {}'.
        format(already_run + len(to_run), already_run, len(to_run)))
Пример #2
0
def generate_tasks(params=None, full=False, disable_target_task_filter=False):
    cache_dir = os.path.join(get_state_dir(srcdir=True), "cache", "taskgraph")
    attr = "full_task_set" if full else "target_task_set"
    cache = os.path.join(cache_dir, attr)

    invalidate(cache)
    if os.path.isfile(cache):
        with open(cache, "r") as fh:
            return TaskGraph.from_json(json.load(fh))[1]

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    print("Task configuration changed, generating {}".format(
        attr.replace("_", " ")))

    taskgraph.fast = True
    cwd = os.getcwd()
    os.chdir(build.topsrcdir)

    root = os.path.join(build.topsrcdir, "taskcluster", "ci")
    target_tasks_method = ("try_select_tasks" if not disable_target_task_filter
                           else "try_select_tasks_uncommon")
    params = parameters_loader(
        params,
        strict=False,
        overrides={
            "try_mode": "try_select",
            "target_tasks_method": target_tasks_method,
        },
    )

    # Cache both full_task_set and target_task_set regardless of whether or not
    # --full was requested. Caching is cheap and can potentially save a lot of
    # time.
    generator = TaskGraphGenerator(root_dir=root, parameters=params)

    def generate(attr):
        try:
            tg = getattr(generator, attr)
        except ParameterMismatch as e:
            print(PARAMETER_MISMATCH.format(e.args[0]))
            sys.exit(1)

        # write cache
        with open(os.path.join(cache_dir, attr), "w") as fh:
            json.dump(tg.to_json(), fh)
        return tg

    tg_full = generate("full_task_set")
    tg_target = generate("target_task_set")
    # discard results from these, we only need cache.
    if full:
        generate("full_task_graph")
    generate("target_task_graph")

    os.chdir(cwd)
    if full:
        return tg_full
    return tg_target
Пример #3
0
    def test_round_trip(self):
        graph = TaskGraph(
            tasks={
                "a": Task(
                    kind="fancy",
                    label="a",
                    description="Task A",
                    attributes={},
                    dependencies={"prereq": "b"},  # must match edges, below
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def"},
                ),
                "b": Task(
                    kind="pre",
                    label="b",
                    attributes={},
                    dependencies={},
                    optimization={"skip-unless-has-relevant-tests": None},
                    task={"task": "def2"},
                ),
            },
            graph=Graph(nodes={"a", "b"}, edges={("a", "b", "prereq")}),
        )

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)
Пример #4
0
def retrigger_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    label = task['metadata']['name']
    with_downstream = ' '
    to_run = [label]

    if input.get('downstream'):
        to_run = full_task_graph.graph.transitive_closure(set(to_run),
                                                          reverse=True).nodes
        to_run = to_run & set(label_to_taskid.keys())
        with_downstream = ' (with downstream) '

    times = input.get('times', 1)
    for i in xrange(times):
        create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                     decision_task_id)

        logger.info('Scheduled {}{}(time {}/{})'.format(
            label, with_downstream, i + 1, times))
Пример #5
0
def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    for elem in input['tasks']:
        if elem in full_task_graph.tasks:
            task = full_task_graph.tasks[elem]

            # fix up the task's dependencies, similar to how optimization would
            # have done in the decision
            dependencies = {
                name: label_to_taskid[label]
                for name, label in task.dependencies.iteritems()
            }
            task_def = resolve_task_references(task.label, task.task,
                                               dependencies)
            task_def.setdefault('dependencies',
                                []).extend(dependencies.itervalues())
            # actually create the new task
            create_task(slugid(), task_def, parameters['level'])
        else:
            raise Exception('{} was not found in the task-graph'.format(elem))
Пример #6
0
def fetch_graph_and_labels(parameters):
    decision_task_id = find_decision_task(parameters)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = 'gecko.v2.{}.pushlog-id.{}.actions'.format(
        parameters['project'], parameters['pushlog_id'])
    for action in list_tasks(namespace):
        try:
            run_label_to_id = get_artifact(action,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.info(
                'Skipping {} due to missing artifact! Error: {}'.format(
                    action, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
Пример #7
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds') or promotion_config.get(
        'rebuild-kinds', [])
    do_not_optimize = input.get('do_not_optimize') or promotion_config.get(
        'do-not-optimize', [])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True
    parameters['xpi_name'] = input['xpi_name']
    # TODO
    #  - require this is a specific revision
    #  - possibly also check that this is on a reviewed PR or merged into
    #    a trusted branch. this will require an oauth token
    parameters['xpi_revision'] = input.get('revision', 'master')
    parameters['shipping_phase'] = input['release_promotion_flavor']

    # We blow away `tasks_for` when we load the on-push decision task's
    # parameters.yml. Let's set this back to `action`.
    parameters['tasks_for'] = "action"

    if input.get('version'):
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
Пример #8
0
def geckoprofile_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']
    pushes = []
    depth = 2
    end_id = int(parameters['pushlog_id'])

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'], start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + list(r.json()['pushes'].keys())
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/full-task-graph.json')
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/label-to-taskid.json')
            push_params = get_artifact_from_index(
                    INDEX_TMPL.format(parameters['project'], push),
                    'public/parameters.yml')
            push_decision_task_id = find_decision_task(push_params, graph_config)
        except HTTPError as e:
            logger.info('Skipping {} due to missing index artifacts! Error: {}'.format(push, e))
            continue

        if label in full_task_graph.tasks.keys():
            def modifier(task):
                if task.label != label:
                    return task

                cmd = task.task['payload']['command']
                task.task['payload']['command'] = add_args_to_perf_command(
                        cmd, ['--gecko-profile'])
                task.task['extra']['treeherder']['symbol'] += '-p'
                return task

            create_tasks(graph_config, [label], full_task_graph, label_to_taskid,
                         push_params, push_decision_task_id, push, modifier=modifier)
            backfill_pushes.append(push)
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(label, push))
    combine_task_graph_files(backfill_pushes)
Пример #9
0
def generate_tasks(params=None, full=False):
    # TODO: Remove after January 1st, 2020.
    # Try to delete the old taskgraph cache directories.
    root = build.topsrcdir
    root_hash = hashlib.sha256(os.path.abspath(root)).hexdigest()
    old_cache_dirs = [
        os.path.join(get_state_dir(), 'cache', 'taskgraph'),
        os.path.join(get_state_dir(), 'cache', root_hash, 'taskgraph'),
    ]
    for cache_dir in old_cache_dirs:
        if os.path.isdir(cache_dir):
            shutil.rmtree(cache_dir)

    cache_dir = os.path.join(get_state_dir(srcdir=True), 'cache', 'taskgraph')
    attr = 'full_task_set' if full else 'target_task_set'
    cache = os.path.join(cache_dir, attr)

    invalidate(cache, root)
    if os.path.isfile(cache):
        with open(cache, 'r') as fh:
            return TaskGraph.from_json(json.load(fh))[1]

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    print("Task configuration changed, generating {}".format(attr.replace('_', ' ')))

    taskgraph.fast = True
    cwd = os.getcwd()
    os.chdir(root)

    root = os.path.join(root, 'taskcluster', 'ci')
    params = parameters_loader(params, strict=False, overrides={'try_mode': 'try_select'})

    # Cache both full_task_set and target_task_set regardless of whether or not
    # --full was requested. Caching is cheap and can potentially save a lot of
    # time.
    generator = TaskGraphGenerator(root_dir=root, parameters=params)

    def generate(attr):
        try:
            tg = getattr(generator, attr)
        except ParameterMismatch as e:
            print(PARAMETER_MISMATCH.format(e.args[0]))
            sys.exit(1)

        # write cache
        with open(os.path.join(cache_dir, attr), 'w') as fh:
            json.dump(tg.to_json(), fh)
        return tg

    tg_full = generate('full_task_set')
    tg_target = generate('target_task_set')

    os.chdir(cwd)
    if full:
        return tg_full
    return tg_target
Пример #10
0
def fetch_graph_and_labels(parameters, graph_config):
    decision_task_id = find_decision_task(parameters, graph_config)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")

    # fetch everything in parallel; this avoids serializing any delay in downloading
    # each artifact (such as waiting for the artifact to be mirrored locally)
    with futures.ThreadPoolExecutor(CONCURRENCY) as e:
        fetches = []

        # fetch any modifications made by action tasks and swap out new tasks
        # for old ones
        def fetch_action(task_id):
            logger.info(f"fetching label-to-taskid.json for action task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")

        namespace = "{}.v2.{}.pushlog-id.{}.actions".format(
            graph_config["trust-domain"],
            parameters["project"],
            parameters["pushlog_id"],
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_action, task_id))

        # Similarly for cron tasks..
        def fetch_cron(task_id):
            logger.info(f"fetching label-to-taskid.json for cron task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")

        namespace = "{}.v2.{}.revision.{}.cron".format(
            graph_config["trust-domain"], parameters["project"], parameters["head_rev"]
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_cron, task_id))

        # now wait for each fetch to complete, raising an exception if there
        # were any issues
        for f in futures.as_completed(fetches):
            f.result()

    return (decision_task_id, full_task_graph, label_to_taskid)
Пример #11
0
def generate_tasks(params=None, full=False):
    cache_dir = os.path.join(get_state_dir(srcdir=True), 'cache', 'taskgraph')
    attr = 'full_task_set' if full else 'target_task_set'
    cache = os.path.join(cache_dir, attr)

    invalidate(cache)
    if os.path.isfile(cache):
        with open(cache, 'r') as fh:
            return TaskGraph.from_json(json.load(fh))[1]

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    print("Task configuration changed, generating {}".format(attr.replace('_', ' ')))

    taskgraph.fast = True
    cwd = os.getcwd()
    os.chdir(build.topsrcdir)

    root = os.path.join(build.topsrcdir, 'taskcluster', 'ci')
    params = parameters_loader(params, strict=False, overrides={'try_mode': 'try_select'})

    # Cache both full_task_set and target_task_set regardless of whether or not
    # --full was requested. Caching is cheap and can potentially save a lot of
    # time.
    generator = TaskGraphGenerator(root_dir=root, parameters=params)

    def generate(attr):
        try:
            tg = getattr(generator, attr)
        except ParameterMismatch as e:
            print(PARAMETER_MISMATCH.format(e.args[0]))
            sys.exit(1)

        # write cache
        with open(os.path.join(cache_dir, attr), 'w') as fh:
            json.dump(tg.to_json(), fh)
        return tg

    tg_full = generate('full_task_set')
    tg_target = generate('target_task_set')
    # discard results from these, we only need cache.
    if full:
        generate('full_task_graph')
    generate('target_task_graph')

    os.chdir(cwd)
    if full:
        return tg_full
    return tg_target
Пример #12
0
def backfill_action(parameters, graph_config, input, task_group_id, task_id,
                    task):
    label = task['metadata']['name']
    pushes = []
    depth = input.get('depth', 5)
    end_id = int(parameters['pushlog_id']) - 1

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + r.json()['pushes'].keys()
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/full-task-graph.json')
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/label-to-taskid.json')
            push_params = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/parameters.yml')
            push_decision_task_id = find_decision_task(push_params,
                                                       graph_config)
        except HTTPError as e:
            logger.info(
                'Skipping {} due to missing index artifacts! Error: {}'.format(
                    push, e))
            continue

        if label in full_task_graph.tasks.keys():
            create_tasks([label], full_task_graph, label_to_taskid,
                         push_params, push_decision_task_id, push)
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(
                label, push))
Пример #13
0
def generate_tasks(params, full, root):
    params = params or "project=mozilla-central"

    # Try to delete the old taskgraph cache directory.
    old_cache_dir = os.path.join(get_state_dir(), 'cache', 'taskgraph')
    if os.path.isdir(old_cache_dir):
        shutil.rmtree(old_cache_dir)

    root_hash = hashlib.sha256(os.path.abspath(root)).hexdigest()
    cache_dir = os.path.join(get_state_dir(), 'cache', root_hash, 'taskgraph')

    # Cleanup old cache files
    for path in glob.glob(os.path.join(cache_dir, '*_set')):
        os.remove(path)

    attr = 'full_task_graph' if full else 'target_task_graph'
    cache = os.path.join(cache_dir, attr)

    invalidate(cache, root)
    if os.path.isfile(cache):
        with open(cache, 'r') as fh:
            return TaskGraph.from_json(json.load(fh))[1]

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    print("Task configuration changed, generating {}".format(
        attr.replace('_', ' ')))

    taskgraph.fast = True
    cwd = os.getcwd()
    os.chdir(build.topsrcdir)

    root = os.path.join(root, 'taskcluster', 'ci')
    params = parameters_loader(params,
                               strict=False,
                               overrides={'try_mode': 'try_select'})
    try:
        tg = getattr(TaskGraphGenerator(root_dir=root, parameters=params),
                     attr)
    except ParameterMismatch as e:
        print(PARAMETER_MISMATCH.format(e.args[0]))
        sys.exit(1)

    os.chdir(cwd)

    with open(cache, 'w') as fh:
        json.dump(tg.to_json(), fh)
    return tg
Пример #14
0
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS))
    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = input.get(
        'target_tasks_method', promotion_config['target_tasks_method'].format(
            project=parameters['project']))
    previous_graph_kinds = input.get('previous_graph_kinds',
                                     promotion_config['previous_graph_kinds'])
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config['do_not_optimize'])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters and full task graph from the first decision task.
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    full_task_graph = get_artifact(previous_graph_ids[0],
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        full_task_graph, previous_graph_ids, previous_graph_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = str(input['build_number'])
    parameters['next_version'] = next_version

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
Пример #15
0
def backfill_action(parameters, input, task_group_id, task_id, task):
    label = task['metadata']['name']
    pushes = []
    depth = input.get('depth', 5)
    end_id = int(parameters['pushlog_id']) - 1

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + r.json()['pushes'].keys()
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]

    for push in pushes:
        full_task_graph = get_artifact_from_index(
            INDEX_TMPL.format(parameters['project'], push),
            'public/full-task-graph.json')
        _, full_task_graph = TaskGraph.from_json(full_task_graph)
        label_to_taskid = get_artifact_from_index(
            INDEX_TMPL.format(parameters['project'], push),
            'public/label-to-taskid.json')

        if label in full_task_graph.tasks.keys():
            task = full_task_graph.tasks[label]
            dependencies = {
                name: label_to_taskid[label]
                for name, label in task.dependencies.iteritems()
            }
            task_def = resolve_task_references(task.label, task.task,
                                               dependencies)
            task_def.setdefault('dependencies',
                                []).extend(dependencies.itervalues())
            create_task(slugid(), task_def, parameters['level'])
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(
                label, push))
Пример #16
0
def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    to_run = []
    for elem in input['tasks']:
        if elem in full_task_graph.tasks:
            to_run.append(elem)
        else:
            raise Exception('{} was not found in the task-graph'.format(elem))

    create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                 decision_task_id)
Пример #17
0
def fetch_graph_and_labels(parameters, graph_config):
    decision_task_id = find_decision_task(parameters, graph_config)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = '{}.v2.{}.pushlog-id.{}.actions'.format(
        graph_config['trust-domain'], parameters['project'],
        parameters['pushlog_id'])
    for task_id in list_tasks(namespace):
        logger.info(
            'fetching label-to-taskid.json for action task {}'.format(task_id))
        try:
            run_label_to_id = get_artifact(task_id,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.debug('No label-to-taskid.json found for {}: {}'.format(
                task_id, e))
            continue

    # Similarly for cron tasks..
    namespace = '{}.v2.{}.revision.{}.cron'.format(
        graph_config['trust-domain'], parameters['project'],
        parameters['head_rev'])
    for task_id in list_tasks(namespace):
        logger.info(
            'fetching label-to-taskid.json for cron task {}'.format(task_id))
        try:
            run_label_to_id = get_artifact(task_id,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.debug('No label-to-taskid.json found for {}: {}'.format(
                task_id, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
Пример #18
0
    def test_round_trip(self):
        graph = TaskGraph(tasks={
            'a': Task(
                kind='fancy',
                label='a',
                attributes={},
                dependencies={'prereq': 'b'},  # must match edges, below
                optimization={'seta': None},
                task={'task': 'def'}),
            'b': Task(
                kind='pre',
                label='b',
                attributes={},
                dependencies={},
                optimization={'seta': None},
                task={'task': 'def2'}),
        }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)
Пример #19
0
def add_all_talos(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    times = input.get('times', 1)
    for i in xrange(times):
        to_run = [
            label for label, entry in full_task_graph.tasks.iteritems()
            if 'talos_try_name' in entry.attributes
        ]

        create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                     decision_task_id)
        logger.info('Scheduled {} talos tasks (time {}/{})'.format(
            len(to_run), i + 1, times))
Пример #20
0
    def test_round_trip(self):
        graph = TaskGraph(tasks={
            'a': Task(
                kind='fancy',
                label='a',
                description='Task A',
                attributes={},
                dependencies={'prereq': 'b'},  # must match edges, below
                optimization={'skip-unless-has-relevant-tests': None},
                task={'task': 'def'}),
            'b': Task(
                kind='pre',
                label='b',
                attributes={},
                dependencies={},
                optimization={'skip-unless-has-relevant-tests': None},
                task={'task': 'def2'}),
        }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)
Пример #21
0
def run_missing_tests(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != 'test':
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(task)

    for task in to_run:

        # fix up the task's dependencies, similar to how optimization would
        # have done in the decision
        dependencies = {
            name: label_to_taskid[label]
            for name, label in task.dependencies.iteritems()
        }
        task_def = resolve_task_references(task.label, task.task, dependencies)
        task_def.setdefault('dependencies',
                            []).extend(dependencies.itervalues())
        create_task(slugid(), task_def, parameters['level'])

    logger.info(
        'Out of {} test tasks, {} already existed and the action created {}'.
        format(already_run + len(to_run), already_run, len(to_run)))
Пример #22
0
def fetch_graph_and_labels(parameters):
    decision_task_id = find_decision_task(parameters)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = 'gecko.v2.{}.pushlog-id.{}.actions'.format(
        parameters['project'],
        parameters['pushlog_id'])
    for action in list_tasks(namespace):
        try:
            run_label_to_id = get_artifact(action, "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.info('Skipping {} due to missing artifact! Error: {}'.format(action, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
Пример #23
0
    def test_round_trip(self):
        graph = TaskGraph(
            tasks={
                'a':
                Task(
                    kind='fancy',
                    label='a',
                    attributes={},
                    dependencies={'prereq': 'b'},  # must match edges, below
                    optimizations=[['seta']],
                    task={'task': 'def'}),
                'b':
                Task(kind='pre',
                     label='b',
                     attributes={},
                     dependencies={},
                     optimizations=[['seta']],
                     task={'task': 'def2'}),
            },
            graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))

        tasks, new_graph = TaskGraph.from_json(graph.to_json())
        self.assertEqual(graph, new_graph)
Пример #24
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input["release_promotion_flavor"]
    promotion_config = graph_config["release-promotion"]["flavors"][
        release_promotion_flavor]
    release_history = {}
    product = promotion_config["product"]

    next_version = str(input.get("next_version") or "")
    if promotion_config.get("version-bump", False):
        # We force str() the input, hence the 'None'
        if next_version in ["", "None"]:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor))

    if promotion_config.get("partial-updates", False):
        partial_updates = input.get("partial_updates", {})
        if not partial_updates and release_level(
                parameters["project"]) == "production":
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor))
        balrog_prefix = product.title()
        os.environ["PARTIAL_UPDATES"] = json.dumps(partial_updates,
                                                   sort_keys=True)
        release_history = populate_release_history(
            balrog_prefix,
            parameters["project"],
            partial_updates=partial_updates)

    target_tasks_method = promotion_config["target-tasks-method"].format(
        project=parameters["project"])
    rebuild_kinds = input.get("rebuild_kinds",
                              promotion_config.get("rebuild-kinds", []))
    do_not_optimize = input.get("do_not_optimize",
                                promotion_config.get("do-not-optimize", []))

    # Build previous_graph_ids from ``previous_graph_ids``, ``revision``,
    # or the action parameters.
    previous_graph_ids = input.get("previous_graph_ids")
    if not previous_graph_ids:
        revision = input.get("revision")
        if revision:
            head_rev_param = "{}head_rev".format(
                graph_config["project-repo-param-prefix"])
            push_parameters = {
                head_rev_param: revision,
                "project": parameters["project"],
            }
        else:
            push_parameters = parameters
        previous_graph_ids = [
            find_decision_task(push_parameters, graph_config)
        ]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters["existing_tasks"] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters["do_not_optimize"] = do_not_optimize
    parameters["target_tasks_method"] = target_tasks_method
    parameters["build_number"] = int(input["build_number"])
    parameters["next_version"] = next_version
    parameters["release_history"] = release_history
    if promotion_config.get("is-rc"):
        parameters["release_type"] += "-rc"
    parameters["release_eta"] = input.get("release_eta", "")
    parameters["release_product"] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters["optimize_target_tasks"] = True

    if release_promotion_flavor == "promote_firefox_partner_repack":
        release_enable_partner_repack = True
        release_enable_partner_attribution = False
        release_enable_emefree = False
    elif release_promotion_flavor == "promote_firefox_partner_attribution":
        release_enable_partner_repack = False
        release_enable_partner_attribution = True
        release_enable_emefree = False
    else:
        # for promotion or ship phases, we use the action input to turn the repacks/attribution off
        release_enable_partner_repack = input.get(
            "release_enable_partner_repack", True)
        release_enable_partner_attribution = input.get(
            "release_enable_partner_attribution", True)
        release_enable_emefree = input.get("release_enable_emefree", True)

    partner_url_config = get_partner_url_config(parameters, graph_config)
    if (release_enable_partner_repack
            and not partner_url_config["release-partner-repack"]):
        raise Exception(
            "Can't enable partner repacks when no config url found")
    if (release_enable_partner_attribution
            and not partner_url_config["release-partner-attribution"]):
        raise Exception(
            "Can't enable partner attribution when no config url found")
    if release_enable_emefree and not partner_url_config[
            "release-eme-free-repack"]:
        raise Exception(
            "Can't enable EMEfree repacks when no config url found")
    parameters["release_enable_partner_repack"] = release_enable_partner_repack
    parameters[
        "release_enable_partner_attribution"] = release_enable_partner_attribution
    parameters["release_enable_emefree"] = release_enable_emefree

    partner_config = input.get("release_partner_config")
    if not partner_config and any([
            release_enable_partner_repack,
            release_enable_partner_attribution,
            release_enable_emefree,
    ]):
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)
    if partner_config:
        parameters["release_partner_config"] = fix_partner_config(
            partner_config)
    parameters["release_partners"] = input.get("release_partners")
    if input.get("release_partner_build_number"):
        parameters["release_partner_build_number"] = input[
            "release_partner_build_number"]

    if input["version"]:
        parameters["version"] = input["version"]

    parameters["required_signoffs"] = get_required_signoffs(input, parameters)
    parameters["signoff_urls"] = get_signoff_urls(input, parameters)

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({"root": graph_config.root_dir}, parameters=parameters)
Пример #25
0
def backfill_action(parameters, graph_config, input, task_group_id, task_id):
    task = taskcluster.get_task_definition(task_id)
    label = task['metadata']['name']
    pushes = []
    inclusive_tweak = 1 if input.get('inclusive') else 0
    depth = input.get('depth', 5) + inclusive_tweak
    end_id = int(parameters['pushlog_id']) - (1 - inclusive_tweak)

    while True:
        start_id = max(end_id - depth, 0)
        pushlog_url = PUSHLOG_TMPL.format(parameters['head_repository'],
                                          start_id, end_id)
        r = requests.get(pushlog_url)
        r.raise_for_status()
        pushes = pushes + r.json()['pushes'].keys()
        if len(pushes) >= depth:
            break

        end_id = start_id - 1
        start_id -= depth
        if start_id < 0:
            break

    pushes = sorted(pushes)[-depth:]
    backfill_pushes = []

    for push in pushes:
        try:
            full_task_graph = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/full-task-graph.json')
            _, full_task_graph = TaskGraph.from_json(full_task_graph)
            label_to_taskid = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/label-to-taskid.json')
            push_params = get_artifact_from_index(
                INDEX_TMPL.format(parameters['project'], push),
                'public/parameters.yml')
            push_decision_task_id = find_decision_task(push_params,
                                                       graph_config)
        except HTTPError as e:
            logger.info(
                'Skipping {} due to missing index artifacts! Error: {}'.format(
                    push, e))
            continue

        if label in full_task_graph.tasks.keys():

            def modifier(task):
                if task.label != label:
                    return task

                if input.get('testPath', ''):
                    is_wpttest = 'web-platform' in task.task['metadata'][
                        'name']
                    is_android = 'android' in task.task['metadata']['name']
                    gpu_required = False
                    if (not is_wpttest) and \
                       ('gpu' in task.task['metadata']['name'] or
                        'webgl' in task.task['metadata']['name'] or
                        ('reftest' in task.task['metadata']['name'] and
                         'jsreftest' not in task.task['metadata']['name'])):
                        gpu_required = True

                    # Create new cmd that runs a test-verify type job
                    preamble_length = 3
                    verify_args = [
                        '--e10s', '--verify', '--total-chunk=1',
                        '--this-chunk=1'
                    ]
                    if is_android:
                        # no --e10s; todo, what about future geckoView?
                        verify_args.remove('--e10s')

                    if gpu_required:
                        verify_args.append('--gpu-required')

                    if 'testPath' in input:
                        task.task['payload']['env'][
                            'MOZHARNESS_TEST_PATHS'] = json.dumps({
                                task.task['extra']['suite']['flavor']:
                                [input['testPath']]
                            })

                    cmd_parts = task.task['payload']['command']
                    keep_args = [
                        '--installer-url', '--download-symbols',
                        '--test-packages-url'
                    ]
                    cmd_parts = remove_args_from_command(
                        cmd_parts, preamble_length, keep_args)
                    cmd_parts = add_args_to_command(cmd_parts, verify_args)
                    task.task['payload']['command'] = cmd_parts

                    # morph the task label to a test-verify job
                    pc = task.task['metadata']['name'].split('/')
                    config = pc[-1].split('-')
                    subtype = ''
                    symbol = 'TV-bf'
                    if gpu_required:
                        subtype = '-gpu'
                        symbol = 'TVg-bf'
                    if is_wpttest:
                        subtype = '-wpt'
                        symbol = 'TVw-bf'
                    if not is_android:
                        subtype = "%s-e10s" % subtype
                    newlabel = "%s/%s-test-verify%s" % (pc[0], config[0],
                                                        subtype)
                    task.task['metadata']['name'] = newlabel
                    task.task['tags']['label'] = newlabel

                    task.task['extra']['index']['rank'] = 0
                    task.task['extra']['chunks']['current'] = 1
                    task.task['extra']['chunks']['total'] = 1

                    task.task['extra']['suite']['name'] = 'test-verify'
                    task.task['extra']['suite']['flavor'] = 'test-verify'

                    task.task['extra']['treeherder']['symbol'] = symbol
                    del task.task['extra']['treeherder']['groupSymbol']
                return task

            times = input.get('times', 1)
            for i in xrange(times):
                create_tasks(graph_config, [label],
                             full_task_graph,
                             label_to_taskid,
                             push_params,
                             push_decision_task_id,
                             push,
                             modifier=modifier)
            backfill_pushes.append(push)
        else:
            logging.info('Could not find {} on {}. Skipping.'.format(
                label, push))
    combine_task_graph_files(backfill_pushes)
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds') or promotion_config.get(
        'rebuild-kinds', [])
    do_not_optimize = input.get('do_not_optimize') or promotion_config.get(
        'do-not-optimize', [])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True
    parameters['shipping_phase'] = input['release_promotion_flavor']

    version_in_file = read_version_file()
    parameters['version'] = input['version'] if input.get(
        'version') else read_version_file()
    version_string = parameters['version']
    if version_string != version_in_file:
        raise ValueError(
            "Version given in tag ({}) does not match the one in version.txt ({})"
            .format(version_string, version_in_file))
    parameters['head_tag'] = 'v{}'.format(version_string)

    parameters['next_version'] = input['next_version']

    parameters['release_type'] = "release"

    parameters['pull_request_number'] = None
    parameters['tasks_for'] = 'action'

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
Пример #27
0
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS))

    if product in ('firefox', 'devedition'):
        if release_promotion_flavor in PARTIAL_UPDATES_FLAVORS:
            partial_updates = json.dumps(input.get('partial_updates', {}))
            if partial_updates == "{}":
                raise Exception(
                    "`partial_updates` property needs to be provided for %s "
                    "targets." % ', '.join(PARTIAL_UPDATES_FLAVORS))
            balrog_prefix = product.title()
            os.environ['PARTIAL_UPDATES'] = partial_updates
            release_history = populate_release_history(
                balrog_prefix,
                parameters['project'],
                partial_updates=input['partial_updates'])

    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = promotion_config['target_tasks_method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild_kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do_not_optimize', []))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['release_type'] = promotion_config.get('release_type', '')
    parameters['release_eta'] = input.get('release_eta', '')
    if input['version']:
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
Пример #28
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if promotion_config.get('version-bump', False):
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor))

    if promotion_config.get('partial-updates', False):
        partial_updates = json.dumps(input.get('partial_updates', {}))
        if partial_updates == "{}":
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor))
        balrog_prefix = product.title()
        os.environ['PARTIAL_UPDATES'] = partial_updates
        release_history = populate_release_history(
            balrog_prefix,
            parameters['project'],
            partial_updates=input['partial_updates'])

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild-kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do-not-optimize', []))
    release_enable_partners = input.get(
        'release_enable_partners', parameters['project'] in PARTNER_BRANCHES
        and product in ('firefox', ))
    release_enable_emefree = input.get(
        'release_enable_emefree', parameters['project'] in EMEFREE_BRANCHES
        and product in ('firefox', ))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, graph_config, revision)
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['release_type'] = promotion_config.get('release-type', '')
    parameters['release_eta'] = input.get('release_eta', '')
    parameters['release_enable_partners'] = release_enable_partners
    parameters['release_partners'] = input.get('release_partners')
    parameters['release_enable_emefree'] = release_enable_emefree
    parameters['release_product'] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True

    partner_config = input.get('release_partner_config')
    if not partner_config and (release_enable_emefree
                               or release_enable_partners):
        partner_url_config = get_partner_url_config(
            parameters,
            graph_config,
            enable_emefree=release_enable_emefree,
            enable_partners=release_enable_partners)
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)

    if input.get('release_partner_build_number'):
        parameters['release_partner_build_number'] = input[
            'release_partner_build_number']

    if partner_config:
        parameters['release_partner_config'] = fix_partner_config(
            partner_config)

    if input['version']:
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
Пример #29
0
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    release_history = {}
    desktop_release_type = None

    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS)
            )

    if release_promotion_flavor in DESKTOP_RELEASE_TYPE_FLAVORS:
        desktop_release_type = input.get('desktop_release_type', None)
        if desktop_release_type not in VALID_DESKTOP_RELEASE_TYPES:
            raise Exception("`desktop_release_type` must be one of: %s" %
                            ", ".join(VALID_DESKTOP_RELEASE_TYPES))

        if release_promotion_flavor in PARTIAL_UPDATES_FLAVORS:
            partial_updates = json.dumps(input.get('partial_updates', {}))
            if partial_updates == "{}":
                raise Exception(
                    "`partial_updates` property needs to be provided for %s "
                    "targets." % ', '.join(PARTIAL_UPDATES_FLAVORS)
                )
            balrog_prefix = 'Firefox'
            if desktop_release_type == 'devedition':
                balrog_prefix = 'Devedition'
            os.environ['PARTIAL_UPDATES'] = partial_updates
            release_history = populate_release_history(
                balrog_prefix, parameters['project'],
                partial_updates=input['partial_updates']
            )

        if release_promotion_flavor in UPTAKE_MONITORING_PLATFORMS_FLAVORS:
            uptake_monitoring_platforms = json.dumps(input.get('uptake_monitoring_platforms', []))
            if partial_updates == "[]":
                raise Exception(
                    "`uptake_monitoring_platforms` property needs to be provided for %s "
                    "targets." % ', '.join(UPTAKE_MONITORING_PLATFORMS_FLAVORS)
                )
            os.environ['UPTAKE_MONITORING_PLATFORMS'] = uptake_monitoring_platforms

    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = input.get(
        'target_tasks_method',
        promotion_config['target_tasks_method'].format(project=parameters['project'])
    )
    rebuild_kinds = input.get(
        'rebuild_kinds', promotion_config.get('rebuild_kinds', [])
    )
    do_not_optimize = input.get(
        'do_not_optimize', promotion_config.get('do_not_optimize', [])
    )

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds
    )
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['desktop_release_type'] = desktop_release_type
    parameters['release_eta'] = input.get('release_eta', '')

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
Пример #30
0
def release_promotion_action(parameters, graph_config, input, task_group_id, task_id):
    release_promotion_flavor = input["release_promotion_flavor"]
    promotion_config = graph_config["release-promotion"]["flavors"][
        release_promotion_flavor
    ]

    target_tasks_method = promotion_config["target-tasks-method"].format(
        project=parameters["project"]
    )
    rebuild_kinds = input.get("rebuild_kinds") or promotion_config.get(
        "rebuild-kinds", []
    )
    do_not_optimize = input.get("do_not_optimize") or promotion_config.get(
        "do-not-optimize", []
    )

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get("previous_graph_ids")
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters["existing_tasks"] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds
    )
    parameters["do_not_optimize"] = do_not_optimize
    parameters["target_tasks_method"] = target_tasks_method
    parameters["build_number"] = int(input["build_number"])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters["optimize_target_tasks"] = True
    parameters["shipping_phase"] = input["release_promotion_flavor"]

    version_in_file = read_version_file()
    parameters["version"] = (
        input["version"] if input.get("version") else read_version_file()
    )
    version_string = parameters["version"]
    if version_string != version_in_file:
        raise ValueError(
            "Version given in tag ({}) does not match the one in version.txt ({})".format(
                version_string, version_in_file
            )
        )
    parameters["head_tag"] = "v{}".format(version_string)

    parameters["next_version"] = input["next_version"]

    version = FenixVersion.parse(version_string)
    if version.is_beta:
        release_type = "beta"
    elif version.is_release:
        release_type = "release"
    elif version.is_release_candidate:
        release_type = "release"
    else:
        raise ValueError("Unsupported version type: {}".format(version.version_type))
    parameters["release_type"] = release_type
    parameters["tasks_for"] = "action"

    parameters["pull_request_number"] = None

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({"root": graph_config.root_dir}, parameters=parameters)
Пример #31
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if promotion_config.get('version-bump', False):
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor))

    if promotion_config.get('partial-updates', False):
        partial_updates = input.get('partial_updates', {})
        if not partial_updates and release_level(
                parameters['project']) == 'production':
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor))
        balrog_prefix = product.title()
        os.environ['PARTIAL_UPDATES'] = json.dumps(partial_updates)
        release_history = populate_release_history(
            balrog_prefix,
            parameters['project'],
            partial_updates=partial_updates)

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild-kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do-not-optimize', []))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        if not parameters['pushlog_id']:
            repo_param = '{}head_repository'.format(
                graph_config['project-repo-param-prefix'])
            push_info = find_hg_revision_push_info(
                repository=parameters[repo_param], revision=revision)
            parameters['pushlog_id'] = push_info['pushid']
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    if promotion_config.get('is-rc'):
        parameters['release_type'] += '-rc'
    parameters['release_eta'] = input.get('release_eta', '')
    parameters['release_product'] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True

    # Partner/EMEfree are enabled by default when get_partner_url_config() returns a non-null url
    # The action input may override by sending False. It's an error to send True with no url found
    partner_url_config = get_partner_url_config(parameters, graph_config)
    release_enable_partners = partner_url_config[
        'release-partner-repack'] is not None
    release_enable_emefree = partner_url_config[
        'release-eme-free-repack'] is not None
    if input.get('release_enable_partners') is False:
        release_enable_partners = False
    elif input.get(
            'release_enable_partners') is True and not release_enable_partners:
        raise Exception(
            "Can't enable partner repacks when no config url found")
    if input.get('release_enable_emefree') is False:
        release_enable_emefree = False
    elif input.get(
            'release_enable_emefree') is True and not release_enable_emefree:
        raise Exception("Can't enable EMEfree when no config url found")
    parameters['release_enable_partners'] = release_enable_partners
    parameters['release_enable_emefree'] = release_enable_emefree

    partner_config = input.get('release_partner_config')
    if not partner_config and (release_enable_emefree
                               or release_enable_partners):
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)
    if partner_config:
        parameters['release_partner_config'] = fix_partner_config(
            partner_config)
    parameters['release_partners'] = input.get('release_partners')
    if input.get('release_partner_build_number'):
        parameters['release_partner_build_number'] = input[
            'release_partner_build_number']

    if input['version']:
        parameters['version'] = input['version']

    parameters['required_signoffs'] = get_required_signoffs(input, parameters)
    parameters['signoff_urls'] = get_signoff_urls(input, parameters)

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
Пример #32
0
def mochitest_retrigger_action(parameters, input, task_group_id, task_id,
                               task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {
        name: label_to_taskid[label]
        for name, label in pre_task.dependencies.iteritems()
    }
    new_task_definition = resolve_task_references(pre_task.label,
                                                  pre_task.task, dependencies)
    new_task_definition.setdefault('dependencies',
                                   []).extend(dependencies.itervalues())

    # don't want to run mozharness tests, want a custom mach command instead
    new_task_definition['payload']['command'] += ['--no-run-tests']

    custom_mach_command = [task['tags']['test-type']]

    # mochitests may specify a flavor
    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
        custom_mach_command += [
            '--keep-open=false', '-f',
            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
        ]

    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
        'ENABLE_E10S', 'true'))
    if not enable_e10s:
        custom_mach_command += ['--disable-e10s']

    custom_mach_command += [
        '--log-tbpl=-',
        '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))
    ]
    if input.get('runUntilFail'):
        custom_mach_command += ['--run-until-failure']
    if input.get('repeat'):
        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]

    # add any custom gecko preferences
    for (key, val) in input.get('preferences', {}).iteritems():
        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]

    custom_mach_command += [input['path']]
    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
        custom_mach_command)

    # update environment
    new_task_definition['payload']['env'].update(input.get('environment', {}))

    # tweak the treeherder symbol
    new_task_definition['extra']['treeherder']['symbol'] += '-custom'

    logging.info("New task definition: %s", new_task_definition)

    # actually create the new task
    new_task_id = slugid()
    create_task_from_def(new_task_id, new_task_definition, parameters['level'])