示例#1
0
def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    for elem in input['tasks']:
        if elem in full_task_graph.tasks:
            task = full_task_graph.tasks[elem]

            # fix up the task's dependencies, similar to how optimization would
            # have done in the decision
            dependencies = {
                name: label_to_taskid[label]
                for name, label in task.dependencies.iteritems()
            }
            task_def = resolve_task_references(task.label, task.task,
                                               dependencies)
            task_def.setdefault('dependencies',
                                []).extend(dependencies.itervalues())
            # actually create the new task
            create_task(slugid(), task_def, parameters['level'])
        else:
            raise Exception('{} was not found in the task-graph'.format(elem))
示例#2
0
def retrigger_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    label = task['metadata']['name']
    with_downstream = ' '
    to_run = [label]

    if input.get('downstream'):
        to_run = full_task_graph.graph.transitive_closure(set(to_run),
                                                          reverse=True).nodes
        to_run = to_run & set(label_to_taskid.keys())
        with_downstream = ' (with downstream) '

    times = input.get('times', 1)
    for i in xrange(times):
        create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                     decision_task_id)

        logger.info('Scheduled {}{}(time {}/{})'.format(
            label, with_downstream, i + 1, times))
示例#3
0
文件: util.py 项目: abcwow/gecko-dev
def fetch_graph_and_labels(parameters):
    decision_task_id = find_decision_task(parameters)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = 'gecko.v2.{}.pushlog-id.{}.actions'.format(
        parameters['project'], parameters['pushlog_id'])
    for action in list_tasks(namespace):
        try:
            run_label_to_id = get_artifact(action,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.info(
                'Skipping {} due to missing artifact! Error: {}'.format(
                    action, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
示例#4
0
def run_missing_tests(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != 'test':
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(label)

    create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                 decision_task_id)

    logger.info(
        'Out of {} test tasks, {} already existed and the action created {}'.
        format(already_run + len(to_run), already_run, len(to_run)))
示例#5
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds') or promotion_config.get(
        'rebuild-kinds', [])
    do_not_optimize = input.get('do_not_optimize') or promotion_config.get(
        'do-not-optimize', [])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True
    parameters['xpi_name'] = input['xpi_name']
    # TODO
    #  - require this is a specific revision
    #  - possibly also check that this is on a reviewed PR or merged into
    #    a trusted branch. this will require an oauth token
    parameters['xpi_revision'] = input.get('revision', 'master')
    parameters['shipping_phase'] = input['release_promotion_flavor']

    # We blow away `tasks_for` when we load the on-push decision task's
    # parameters.yml. Let's set this back to `action`.
    parameters['tasks_for'] = "action"

    if input.get('version'):
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
示例#6
0
def fetch_graph_and_labels(parameters, graph_config):
    decision_task_id = find_decision_task(parameters, graph_config)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")

    # fetch everything in parallel; this avoids serializing any delay in downloading
    # each artifact (such as waiting for the artifact to be mirrored locally)
    with futures.ThreadPoolExecutor(CONCURRENCY) as e:
        fetches = []

        # fetch any modifications made by action tasks and swap out new tasks
        # for old ones
        def fetch_action(task_id):
            logger.info(f"fetching label-to-taskid.json for action task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")

        namespace = "{}.v2.{}.pushlog-id.{}.actions".format(
            graph_config["trust-domain"],
            parameters["project"],
            parameters["pushlog_id"],
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_action, task_id))

        # Similarly for cron tasks..
        def fetch_cron(task_id):
            logger.info(f"fetching label-to-taskid.json for cron task {task_id}")
            try:
                run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
                label_to_taskid.update(run_label_to_id)
            except HTTPError as e:
                if e.response.status_code != 404:
                    raise
                logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")

        namespace = "{}.v2.{}.revision.{}.cron".format(
            graph_config["trust-domain"], parameters["project"], parameters["head_rev"]
        )
        for task_id in list_tasks(namespace):
            fetches.append(e.submit(fetch_cron, task_id))

        # now wait for each fetch to complete, raising an exception if there
        # were any issues
        for f in futures.as_completed(fetches):
            f.result()

    return (decision_task_id, full_task_graph, label_to_taskid)
示例#7
0
def find_existing_tasks(previous_graph_ids):
    existing_tasks = {}
    for previous_graph_id in previous_graph_ids:
        label_to_taskid = get_artifact(previous_graph_id,
                                       "public/label-to-taskid.json")
        existing_tasks.update(label_to_taskid)
    return existing_tasks
示例#8
0
def is_backstop(params,
                push_interval=BACKSTOP_PUSH_INTERVAL,
                time_interval=BACKSTOP_TIME_INTERVAL):
    """Determines whether the given parameters represent a backstop push.

    Args:
        push_interval (int): Number of pushes
        time_interval (int): Minutes between forced schedules.
                             Use 0 to disable.
    Returns:
        bool: True if this is a backtop, otherwise False.
    """
    # In case this is being faked on try.
    if params.get("backstop", False):
        return True

    project = params["project"]
    pushid = int(params["pushlog_id"])
    pushdate = int(params["pushdate"])

    if project == "try":
        return False
    elif project != "autoland":
        return True

    # On every Nth push, want to run all tasks.
    if pushid % push_interval == 0:
        return True

    if time_interval <= 0:
        return False

    # We also want to ensure we run all tasks at least once per N minutes.
    index = BACKSTOP_INDEX.format(project=project)

    try:
        last_backstop_id = find_task_id(index)
    except KeyError:
        # Index wasn't found, implying there hasn't been a backstop push yet.
        return True

    if status_task(last_backstop_id) in ("failed", "exception"):
        # If the last backstop failed its decision task, make this a backstop.
        return True

    try:
        last_pushdate = get_artifact(last_backstop_id,
                                     "public/parameters.yml")["pushdate"]
    except HTTPError as e:
        # If the last backstop decision task exists in the index, but
        # parameters.yml isn't available yet, it means the decision task is
        # still running. If that's the case, we can be pretty sure the time
        # component will not cause a backstop, so just return False.
        if e.response.status_code == 404:
            return False
        raise

    if (pushdate - last_pushdate) / 60 >= time_interval:
        return True
    return False
示例#9
0
def trigger_action(action_name, decision_task_id, task_id=None, input={}):
    if not decision_task_id:
        raise ValueError(
            "No decision task. We can't find the actions artifact.")
    actions_json = get_artifact(decision_task_id, "public/actions.json")
    if actions_json["version"] != 1:
        raise RuntimeError("Wrong version of actions.json, unable to continue")

    # These values substitute $eval in the template
    context = {
        "input": input,
        "taskId": task_id,
        "taskGroupId": decision_task_id,
    }
    # https://docs.taskcluster.net/docs/manual/design/conventions/actions/spec#variables
    context.update(actions_json["variables"])
    action = _extract_applicable_action(actions_json, action_name,
                                        decision_task_id, task_id)
    kind = action["kind"]
    if kind == "hook":
        hook_payload = jsone.render(action["hookPayload"], context)
        trigger_hook(action["hookGroupId"], action["hookId"], hook_payload)
    else:
        raise NotImplementedError(
            "Unable to submit actions with {} kind.".format(kind))
示例#10
0
def run_missing_tests(parameters, graph_config, input, task_group_id, task_id):
    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
        parameters, graph_config
    )
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != "test":
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(label)

    create_tasks(
        graph_config,
        to_run,
        full_task_graph,
        label_to_taskid,
        parameters,
        decision_task_id,
    )

    logger.info(
        "Out of {} test tasks, {} already existed and the action created {}".format(
            already_run + len(to_run), already_run, len(to_run)
        )
    )
示例#11
0
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS))
    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = input.get(
        'target_tasks_method', promotion_config['target_tasks_method'].format(
            project=parameters['project']))
    previous_graph_kinds = input.get('previous_graph_kinds',
                                     promotion_config['previous_graph_kinds'])
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config['do_not_optimize'])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters and full task graph from the first decision task.
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    full_task_graph = get_artifact(previous_graph_ids[0],
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        full_task_graph, previous_graph_ids, previous_graph_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = str(input['build_number'])
    parameters['next_version'] = next_version

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
示例#12
0
def add_new_jobs_action(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    to_run = []
    for elem in input['tasks']:
        if elem in full_task_graph.tasks:
            to_run.append(elem)
        else:
            raise Exception('{} was not found in the task-graph'.format(elem))

    create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                 decision_task_id)
示例#13
0
 def fetch_cron(task_id):
     logger.info(f"fetching label-to-taskid.json for cron task {task_id}")
     try:
         run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
         label_to_taskid.update(run_label_to_id)
     except HTTPError as e:
         if e.response.status_code != 404:
             raise
         logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")
示例#14
0
 def fetch_action(task_id):
     logger.info('fetching label-to-taskid.json for action task {}'.format(task_id))
     try:
         run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
         label_to_taskid.update(run_label_to_id)
     except HTTPError as e:
         if e.response.status_code != 404:
             raise
         logger.debug('No label-to-taskid.json found for {}: {}'.format(task_id, e))
示例#15
0
def fetch_graph_and_labels(parameters, graph_config):
    decision_task_id = find_decision_task(parameters, graph_config)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = '{}.v2.{}.pushlog-id.{}.actions'.format(
        graph_config['trust-domain'], parameters['project'],
        parameters['pushlog_id'])
    for task_id in list_tasks(namespace):
        logger.info(
            'fetching label-to-taskid.json for action task {}'.format(task_id))
        try:
            run_label_to_id = get_artifact(task_id,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.debug('No label-to-taskid.json found for {}: {}'.format(
                task_id, e))
            continue

    # Similarly for cron tasks..
    namespace = '{}.v2.{}.revision.{}.cron'.format(
        graph_config['trust-domain'], parameters['project'],
        parameters['head_rev'])
    for task_id in list_tasks(namespace):
        logger.info(
            'fetching label-to-taskid.json for cron task {}'.format(task_id))
        try:
            run_label_to_id = get_artifact(task_id,
                                           "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.debug('No label-to-taskid.json found for {}: {}'.format(
                task_id, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
示例#16
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_test = re.compile(r'"test": "([^"]+)"')
    re_bad_test = re.compile(r'(Last test finished|'
                             r'Main app process exited normally|'
                             r'[(]SimpleTest/TestRunner.js[)]|'
                             r'remoteautomation.py|'
                             r'unknown test url|'
                             r'https?://localhost:\d+/\d+/\d+/.*[.]html)')
    re_extract_tests = [
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ ]+)'),
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ ]+)'),
        re.compile(r'xpcshell-[^ ]+\.ini:(.*)'),
    ]

    def munge_test_path(test_path):
        if re_bad_test.search(test_path):
            return None
        for r in re_extract_tests:
            m = r.match(test_path)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' in artifact and artifact['name'].endswith('errorsummary.log'):
            stream = get_artifact(task_id, artifact['name'])
            if stream:
                # Read all of the content from the stream and split
                # the lines out since on macosx and windows, the first
                # line is empty.
                for line in stream.read().split('\n'):
                    line = line.strip()
                    match = re_test.search(line)
                    if match:
                        test_path = munge_test_path(match.group(1))
                        if test_path:
                            tests.add(test_path)
                            test_dir = os.path.dirname(test_path)
                            if test_dir:
                                dirs.add(test_dir)
    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
示例#17
0
def add_all_talos(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    times = input.get('times', 1)
    for i in xrange(times):
        to_run = [
            label for label, entry in full_task_graph.tasks.iteritems()
            if 'talos_try_name' in entry.attributes
        ]

        create_tasks(to_run, full_task_graph, label_to_taskid, parameters,
                     decision_task_id)
        logger.info('Scheduled {} talos tasks (time {}/{})'.format(
            len(to_run), i + 1, times))
示例#18
0
def run_missing_tests(parameters, input, task_group_id, task_id, task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    # The idea here is to schedule all tasks of the `test` kind that were
    # targetted but did not appear in the final task-graph -- those were the
    # optimized tasks.
    to_run = []
    already_run = 0
    for label in target_tasks:
        task = full_task_graph.tasks[label]
        if task.kind != 'test':
            continue  # not a test
        if label in label_to_taskid:
            already_run += 1
            continue
        to_run.append(task)

    for task in to_run:

        # fix up the task's dependencies, similar to how optimization would
        # have done in the decision
        dependencies = {
            name: label_to_taskid[label]
            for name, label in task.dependencies.iteritems()
        }
        task_def = resolve_task_references(task.label, task.task, dependencies)
        task_def.setdefault('dependencies',
                            []).extend(dependencies.itervalues())
        create_task(slugid(), task_def, parameters['level'])

    logger.info(
        'Out of {} test tasks, {} already existed and the action created {}'.
        format(already_run + len(to_run), already_run, len(to_run)))
示例#19
0
def find_existing_tasks_from_previous_kinds(full_task_graph, previous_graph_ids,
                                            rebuild_kinds):
    """Given a list of previous decision/action taskIds and kinds to ignore
    from the previous graphs, return a dictionary of labels-to-taskids to use
    as ``existing_tasks`` in the optimization step."""
    existing_tasks = {}
    for previous_graph_id in previous_graph_ids:
        label_to_taskid = get_artifact(previous_graph_id, "public/label-to-taskid.json")
        kind_labels = set(t.label for t in full_task_graph.tasks.itervalues()
                          if t.attributes['kind'] not in rebuild_kinds)
        for label in set(label_to_taskid.keys()).intersection(kind_labels):
            existing_tasks[label] = label_to_taskid[label]
    return existing_tasks
示例#20
0
def fetch_graph_and_labels(parameters):
    decision_task_id = find_decision_task(parameters)

    # First grab the graph and labels generated during the initial decision task
    full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")

    # Now fetch any modifications made by action tasks and swap out new tasks
    # for old ones
    namespace = 'gecko.v2.{}.pushlog-id.{}.actions'.format(
        parameters['project'],
        parameters['pushlog_id'])
    for action in list_tasks(namespace):
        try:
            run_label_to_id = get_artifact(action, "public/label-to-taskid.json")
            label_to_taskid.update(run_label_to_id)
        except HTTPError as e:
            logger.info('Skipping {} due to missing artifact! Error: {}'.format(action, e))
            continue

    return (decision_task_id, full_task_graph, label_to_taskid)
示例#21
0
def find_existing_tasks_from_previous_kinds(full_task_graph, previous_graph_ids,
                                            rebuild_kinds):
    """Given a list of previous decision/action taskIds and kinds to ignore
    from the previous graphs, return a dictionary of labels-to-taskids to use
    as ``existing_tasks`` in the optimization step."""
    existing_tasks = {}
    for previous_graph_id in previous_graph_ids:
        label_to_taskid = get_artifact(previous_graph_id, "public/label-to-taskid.json")
        kind_labels = set(t.label for t in full_task_graph.tasks.itervalues()
                          if t.attributes['kind'] not in rebuild_kinds)
        for label in set(label_to_taskid.keys()).intersection(kind_labels):
            existing_tasks[label] = label_to_taskid[label]
    return existing_tasks
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds') or promotion_config.get(
        'rebuild-kinds', [])
    do_not_optimize = input.get('do_not_optimize') or promotion_config.get(
        'do-not-optimize', [])

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True
    parameters['shipping_phase'] = input['release_promotion_flavor']

    version_in_file = read_version_file()
    parameters['version'] = input['version'] if input.get(
        'version') else read_version_file()
    version_string = parameters['version']
    if version_string != version_in_file:
        raise ValueError(
            "Version given in tag ({}) does not match the one in version.txt ({})"
            .format(version_string, version_in_file))
    parameters['head_tag'] = 'v{}'.format(version_string)

    parameters['next_version'] = input['next_version']

    parameters['release_type'] = "release"

    parameters['pull_request_number'] = None
    parameters['tasks_for'] = 'action'

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
示例#23
0
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    release_history = {}
    desktop_release_type = None

    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS)
            )

    if release_promotion_flavor in DESKTOP_RELEASE_TYPE_FLAVORS:
        desktop_release_type = input.get('desktop_release_type', None)
        if desktop_release_type not in VALID_DESKTOP_RELEASE_TYPES:
            raise Exception("`desktop_release_type` must be one of: %s" %
                            ", ".join(VALID_DESKTOP_RELEASE_TYPES))

        if release_promotion_flavor in PARTIAL_UPDATES_FLAVORS:
            partial_updates = json.dumps(input.get('partial_updates', {}))
            if partial_updates == "{}":
                raise Exception(
                    "`partial_updates` property needs to be provided for %s "
                    "targets." % ', '.join(PARTIAL_UPDATES_FLAVORS)
                )
            balrog_prefix = 'Firefox'
            if desktop_release_type == 'devedition':
                balrog_prefix = 'Devedition'
            os.environ['PARTIAL_UPDATES'] = partial_updates
            release_history = populate_release_history(
                balrog_prefix, parameters['project'],
                partial_updates=input['partial_updates']
            )

        if release_promotion_flavor in UPTAKE_MONITORING_PLATFORMS_FLAVORS:
            uptake_monitoring_platforms = json.dumps(input.get('uptake_monitoring_platforms', []))
            if partial_updates == "[]":
                raise Exception(
                    "`uptake_monitoring_platforms` property needs to be provided for %s "
                    "targets." % ', '.join(UPTAKE_MONITORING_PLATFORMS_FLAVORS)
                )
            os.environ['UPTAKE_MONITORING_PLATFORMS'] = uptake_monitoring_platforms

    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = input.get(
        'target_tasks_method',
        promotion_config['target_tasks_method'].format(project=parameters['project'])
    )
    rebuild_kinds = input.get(
        'rebuild_kinds', promotion_config.get('rebuild_kinds', [])
    )
    do_not_optimize = input.get(
        'do_not_optimize', promotion_config.get('do_not_optimize', [])
    )

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds
    )
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['desktop_release_type'] = desktop_release_type
    parameters['release_eta'] = input.get('release_eta', '')

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
def release_promotion_action(parameters, input, task_group_id, task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if release_promotion_flavor in VERSION_BUMP_FLAVORS:
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for %s "
                "targets." % ', '.join(VERSION_BUMP_FLAVORS))

    if product in ('firefox', 'devedition'):
        if release_promotion_flavor in PARTIAL_UPDATES_FLAVORS:
            partial_updates = json.dumps(input.get('partial_updates', {}))
            if partial_updates == "{}":
                raise Exception(
                    "`partial_updates` property needs to be provided for %s "
                    "targets." % ', '.join(PARTIAL_UPDATES_FLAVORS))
            balrog_prefix = product.title()
            os.environ['PARTIAL_UPDATES'] = partial_updates
            release_history = populate_release_history(
                balrog_prefix,
                parameters['project'],
                partial_updates=input['partial_updates'])

    promotion_config = RELEASE_PROMOTION_CONFIG[release_promotion_flavor]

    target_tasks_method = promotion_config['target_tasks_method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild_kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do_not_optimize', []))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, revision)
        previous_graph_ids = [find_decision_task(parameters)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['release_type'] = promotion_config.get('release_type', '')
    parameters['release_eta'] = input.get('release_eta', '')
    if input['version']:
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({}, parameters=parameters)
示例#25
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id, task):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if promotion_config.get('version-bump', False):
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor))

    if promotion_config.get('partial-updates', False):
        partial_updates = json.dumps(input.get('partial_updates', {}))
        if partial_updates == "{}":
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor))
        balrog_prefix = product.title()
        os.environ['PARTIAL_UPDATES'] = partial_updates
        release_history = populate_release_history(
            balrog_prefix,
            parameters['project'],
            partial_updates=input['partial_updates'])

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild-kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do-not-optimize', []))
    release_enable_partners = input.get(
        'release_enable_partners', parameters['project'] in PARTNER_BRANCHES
        and product in ('firefox', ))
    release_enable_emefree = input.get(
        'release_enable_emefree', parameters['project'] in EMEFREE_BRANCHES
        and product in ('firefox', ))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        parameters['pushlog_id'] = parameters['pushlog_id'] or \
            find_hg_revision_pushlog_id(parameters, graph_config, revision)
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    parameters['release_type'] = promotion_config.get('release-type', '')
    parameters['release_eta'] = input.get('release_eta', '')
    parameters['release_enable_partners'] = release_enable_partners
    parameters['release_partners'] = input.get('release_partners')
    parameters['release_enable_emefree'] = release_enable_emefree
    parameters['release_product'] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True

    partner_config = input.get('release_partner_config')
    if not partner_config and (release_enable_emefree
                               or release_enable_partners):
        partner_url_config = get_partner_url_config(
            parameters,
            graph_config,
            enable_emefree=release_enable_emefree,
            enable_partners=release_enable_partners)
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)

    if input.get('release_partner_build_number'):
        parameters['release_partner_build_number'] = input[
            'release_partner_build_number']

    if partner_config:
        parameters['release_partner_config'] = fix_partner_config(
            partner_config)

    if input['version']:
        parameters['version'] = input['version']

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
示例#26
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_bad_tests = [
        re.compile(r'Last test finished'),
        re.compile(r'LeakSanitizer'),
        re.compile(r'Main app process exited normally'),
        re.compile(r'ShutdownLeaks'),
        re.compile(r'[(]SimpleTest/TestRunner.js[)]'),
        re.compile(r'automation.py'),
        re.compile(r'https?://localhost:\d+/\d+/\d+/.*[.]html'),
        re.compile(r'jsreftest'),
        re.compile(r'leakcheck'),
        re.compile(r'mozrunner-startup'),
        re.compile(r'pid: '),
        re.compile(r'remoteautomation.py'),
        re.compile(r'unknown test url'),
    ]
    re_extract_tests = [
        re.compile(
            r'"test": "(?:[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ "]+)'
        ),
        re.compile(
            r'"test": "(?:[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ "]+)'
        ),
        re.compile(r'xpcshell-?[^ "]*\.ini:([^ "]+)'),
        re.compile(r'/tests/([^ "]+) - finished .*'),
        re.compile(r'"test": "([^ "]+)"'),
        re.compile(
            r'"message": "Error running command run_test with arguments '
            '[(]<wptrunner[.]wpttest[.]TestharnessTest ([^>]+)>'),
        re.compile(r'"message": "TEST-[^ ]+ [|] ([^ "]+)[^|]*[|]')
    ]

    def munge_test_path(line):
        test_path = None
        for r in re_bad_tests:
            if r.search(line):
                return None
        for r in re_extract_tests:
            m = r.search(line)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' not in artifact or not artifact['name'].endswith(
                'errorsummary.log'):
            continue

        stream = get_artifact(task_id, artifact['name'])
        if not stream:
            continue

        # The number of tasks created is determined by the
        # `times` value and the number of distinct tests and
        # directories as: times * (1 + len(tests) + len(dirs)).
        # Since the maximum value of `times` specifiable in the
        # Treeherder UI is 100, the number of tasks created can
        # reach a very large value depending on the number of
        # unique tests.  During testing, it was found that 10
        # distinct tests were sufficient to cause the action task
        # to exceed the maxRunTime of 1800 seconds resulting in it
        # being aborted.  We limit the number of distinct tests
        # and thereby the number of distinct test directories to a
        # maximum of 5 to keep the action task from timing out.

        for line in stream.read().split('\n'):
            test_path = munge_test_path(line.strip())

            if test_path:
                tests.add(test_path)
                test_dir = os.path.dirname(test_path)
                if test_dir:
                    dirs.add(test_dir)

            if len(tests) > 4:
                break

    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
示例#27
0
def run(
    task_type,
    release_type,
    try_config=None,
    push=True,
    message="{msg}",
    closed_tree=False,
):
    if task_type == "list":
        print_available_task_types()
        sys.exit(0)

    if release_type == "nightly":
        previous_graph = get_nightly_graph()
    else:
        release = get_releases(RELEASE_TO_BRANCH[release_type])[-1]
        previous_graph = get_release_graph(release)
    existing_tasks = find_existing_tasks([previous_graph])

    previous_parameters = Parameters(
        strict=False, **get_artifact(previous_graph, "public/parameters.yml")
    )

    # Copy L10n configuration from the commit the release we are using was
    # based on. This *should* ensure that the chunking of L10n tasks is the
    # same between graphs.
    files_to_change = {
        path: get_hg_file(previous_parameters, path)
        for path in [
            "browser/locales/l10n-changesets.json",
            "browser/locales/shipped-locales",
        ]
    }

    try_config = try_config or {}
    task_config = {
        "version": 2,
        "parameters": {
            "existing_tasks": existing_tasks,
            "try_task_config": try_config,
            "try_mode": "try_task_config",
        },
    }
    for param in (
        "app_version",
        "build_number",
        "next_version",
        "release_history",
        "release_product",
        "release_type",
        "version",
    ):
        task_config["parameters"][param] = previous_parameters[param]

    try_config["tasks"] = TASK_TYPES[task_type]
    for label in try_config["tasks"]:
        if label in existing_tasks:
            del existing_tasks[label]

    msg = "scriptworker tests: {}".format(task_type)
    return push_to_try(
        "scriptworker",
        message.format(msg=msg),
        push=push,
        closed_tree=closed_tree,
        try_task_config=task_config,
        files_to_change=files_to_change,
    )
示例#28
0
def release_promotion_action(parameters, graph_config, input, task_group_id, task_id):
    release_promotion_flavor = input["release_promotion_flavor"]
    promotion_config = graph_config["release-promotion"]["flavors"][
        release_promotion_flavor
    ]

    target_tasks_method = promotion_config["target-tasks-method"].format(
        project=parameters["project"]
    )
    rebuild_kinds = input.get("rebuild_kinds") or promotion_config.get(
        "rebuild-kinds", []
    )
    do_not_optimize = input.get("do_not_optimize") or promotion_config.get(
        "do-not-optimize", []
    )

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids`` or ``revision``.
    previous_graph_ids = input.get("previous_graph_ids")
    if not previous_graph_ids:
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters["existing_tasks"] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds
    )
    parameters["do_not_optimize"] = do_not_optimize
    parameters["target_tasks_method"] = target_tasks_method
    parameters["build_number"] = int(input["build_number"])
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters["optimize_target_tasks"] = True
    parameters["shipping_phase"] = input["release_promotion_flavor"]

    version_in_file = read_version_file()
    parameters["version"] = (
        input["version"] if input.get("version") else read_version_file()
    )
    version_string = parameters["version"]
    if version_string != version_in_file:
        raise ValueError(
            "Version given in tag ({}) does not match the one in version.txt ({})".format(
                version_string, version_in_file
            )
        )
    parameters["head_tag"] = "v{}".format(version_string)

    parameters["next_version"] = input["next_version"]

    version = FenixVersion.parse(version_string)
    if version.is_beta:
        release_type = "beta"
    elif version.is_release:
        release_type = "release"
    elif version.is_release_candidate:
        release_type = "release"
    else:
        raise ValueError("Unsupported version type: {}".format(version.version_type))
    parameters["release_type"] = release_type
    parameters["tasks_for"] = "action"

    parameters["pull_request_number"] = None

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({"root": graph_config.root_dir}, parameters=parameters)
示例#29
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """
    re_test = re.compile(r'"test": "([^"]+)"')
    re_bad_test = re.compile(r'(Last test finished|'
                             r'Main app process exited normally|'
                             r'[(]SimpleTest/TestRunner.js[)]|'
                             r'remoteautomation.py|'
                             r'unknown test url|'
                             r'https?://localhost:\d+/\d+/\d+/.*[.]html)')
    re_extract_tests = [
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ ]+)'),
        re.compile(r'(?:^[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ ]+)'),
        re.compile(r'xpcshell-[^ ]+\.ini:(.*)'),
    ]

    def munge_test_path(test_path):
        if re_bad_test.search(test_path):
            return None
        for r in re_extract_tests:
            m = r.match(test_path)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if 'name' in artifact and artifact['name'].endswith(
                'errorsummary.log'):
            stream = get_artifact(task_id, artifact['name'])
            if stream:
                # Read all of the content from the stream and split
                # the lines out since on macosx and windows, the first
                # line is empty.
                for line in stream.read().split('\n'):
                    if len(tests) > 4:
                        # The number of tasks created is determined by
                        # the `times` value and the number of distinct
                        # tests and directories as:
                        # times * (1 + len(tests) + len(dirs)).
                        # Since the maximum value of `times`
                        # specifiable in the Treeherder UI is 100, the
                        # number of tasks created can reach a very
                        # large value depending on the number of
                        # unique tests.  During testing, it was found
                        # that 10 distinct tests were sufficient to
                        # cause the action task to exceed the
                        # maxRunTime of 1800 seconds resulting in it
                        # being aborted.  We limit the number of
                        # distinct tests and thereby the number of
                        # distinct test directories to a maximum of 5
                        # to keep the action task from timing out.
                        break
                    line = line.strip()
                    match = re_test.search(line)
                    if match:
                        test_path = munge_test_path(match.group(1))
                        if test_path:
                            tests.add(test_path)
                            test_dir = os.path.dirname(test_path)
                            if test_dir:
                                dirs.add(test_dir)
            break
    return {'dirs': sorted(dirs), 'tests': sorted(tests)}
示例#30
0
def release_promotion_action(parameters, graph_config, input, task_group_id,
                             task_id):
    release_promotion_flavor = input['release_promotion_flavor']
    promotion_config = graph_config['release-promotion']['flavors'][
        release_promotion_flavor]
    release_history = {}
    product = promotion_config['product']

    next_version = str(input.get('next_version') or '')
    if promotion_config.get('version-bump', False):
        # We force str() the input, hence the 'None'
        if next_version in ['', 'None']:
            raise Exception(
                "`next_version` property needs to be provided for `{}` "
                "target.".format(release_promotion_flavor))

    if promotion_config.get('partial-updates', False):
        partial_updates = input.get('partial_updates', {})
        if not partial_updates and release_level(
                parameters['project']) == 'production':
            raise Exception(
                "`partial_updates` property needs to be provided for `{}`"
                "target.".format(release_promotion_flavor))
        balrog_prefix = product.title()
        os.environ['PARTIAL_UPDATES'] = json.dumps(partial_updates)
        release_history = populate_release_history(
            balrog_prefix,
            parameters['project'],
            partial_updates=partial_updates)

    target_tasks_method = promotion_config['target-tasks-method'].format(
        project=parameters['project'])
    rebuild_kinds = input.get('rebuild_kinds',
                              promotion_config.get('rebuild-kinds', []))
    do_not_optimize = input.get('do_not_optimize',
                                promotion_config.get('do-not-optimize', []))

    # make parameters read-write
    parameters = dict(parameters)
    # Build previous_graph_ids from ``previous_graph_ids``, ``pushlog_id``,
    # or ``revision``.
    previous_graph_ids = input.get('previous_graph_ids')
    if not previous_graph_ids:
        revision = input.get('revision')
        if not parameters['pushlog_id']:
            repo_param = '{}head_repository'.format(
                graph_config['project-repo-param-prefix'])
            push_info = find_hg_revision_push_info(
                repository=parameters[repo_param], revision=revision)
            parameters['pushlog_id'] = push_info['pushid']
        previous_graph_ids = [find_decision_task(parameters, graph_config)]

    # Download parameters from the first decision task
    parameters = get_artifact(previous_graph_ids[0], "public/parameters.yml")
    # Download and combine full task graphs from each of the previous_graph_ids.
    # Sometimes previous relpro action tasks will add tasks, like partials,
    # that didn't exist in the first full_task_graph, so combining them is
    # important. The rightmost graph should take precedence in the case of
    # conflicts.
    combined_full_task_graph = {}
    for graph_id in previous_graph_ids:
        full_task_graph = get_artifact(graph_id, "public/full-task-graph.json")
        combined_full_task_graph.update(full_task_graph)
    _, combined_full_task_graph = TaskGraph.from_json(combined_full_task_graph)
    parameters['existing_tasks'] = find_existing_tasks_from_previous_kinds(
        combined_full_task_graph, previous_graph_ids, rebuild_kinds)
    parameters['do_not_optimize'] = do_not_optimize
    parameters['target_tasks_method'] = target_tasks_method
    parameters['build_number'] = int(input['build_number'])
    parameters['next_version'] = next_version
    parameters['release_history'] = release_history
    if promotion_config.get('is-rc'):
        parameters['release_type'] += '-rc'
    parameters['release_eta'] = input.get('release_eta', '')
    parameters['release_product'] = product
    # When doing staging releases on try, we still want to re-use tasks from
    # previous graphs.
    parameters['optimize_target_tasks'] = True

    # Partner/EMEfree are enabled by default when get_partner_url_config() returns a non-null url
    # The action input may override by sending False. It's an error to send True with no url found
    partner_url_config = get_partner_url_config(parameters, graph_config)
    release_enable_partners = partner_url_config[
        'release-partner-repack'] is not None
    release_enable_emefree = partner_url_config[
        'release-eme-free-repack'] is not None
    if input.get('release_enable_partners') is False:
        release_enable_partners = False
    elif input.get(
            'release_enable_partners') is True and not release_enable_partners:
        raise Exception(
            "Can't enable partner repacks when no config url found")
    if input.get('release_enable_emefree') is False:
        release_enable_emefree = False
    elif input.get(
            'release_enable_emefree') is True and not release_enable_emefree:
        raise Exception("Can't enable EMEfree when no config url found")
    parameters['release_enable_partners'] = release_enable_partners
    parameters['release_enable_emefree'] = release_enable_emefree

    partner_config = input.get('release_partner_config')
    if not partner_config and (release_enable_emefree
                               or release_enable_partners):
        github_token = get_token(parameters)
        partner_config = get_partner_config(partner_url_config, github_token)
    if partner_config:
        parameters['release_partner_config'] = fix_partner_config(
            partner_config)
    parameters['release_partners'] = input.get('release_partners')
    if input.get('release_partner_build_number'):
        parameters['release_partner_build_number'] = input[
            'release_partner_build_number']

    if input['version']:
        parameters['version'] = input['version']

    parameters['required_signoffs'] = get_required_signoffs(input, parameters)
    parameters['signoff_urls'] = get_signoff_urls(input, parameters)

    # make parameters read-only
    parameters = Parameters(**parameters)

    taskgraph_decision({'root': graph_config.root_dir}, parameters=parameters)
示例#31
0
def mochitest_retrigger_action(parameters, input, task_group_id, task_id,
                               task):
    decision_task_id = find_decision_task(parameters)

    full_task_graph = get_artifact(decision_task_id,
                                   "public/full-task-graph.json")
    _, full_task_graph = TaskGraph.from_json(full_task_graph)
    label_to_taskid = get_artifact(decision_task_id,
                                   "public/label-to-taskid.json")

    pre_task = full_task_graph.tasks[task['metadata']['name']]

    # fix up the task's dependencies, similar to how optimization would
    # have done in the decision
    dependencies = {
        name: label_to_taskid[label]
        for name, label in pre_task.dependencies.iteritems()
    }
    new_task_definition = resolve_task_references(pre_task.label,
                                                  pre_task.task, dependencies)
    new_task_definition.setdefault('dependencies',
                                   []).extend(dependencies.itervalues())

    # don't want to run mozharness tests, want a custom mach command instead
    new_task_definition['payload']['command'] += ['--no-run-tests']

    custom_mach_command = [task['tags']['test-type']]

    # mochitests may specify a flavor
    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
        custom_mach_command += [
            '--keep-open=false', '-f',
            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
        ]

    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
        'ENABLE_E10S', 'true'))
    if not enable_e10s:
        custom_mach_command += ['--disable-e10s']

    custom_mach_command += [
        '--log-tbpl=-',
        '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))
    ]
    if input.get('runUntilFail'):
        custom_mach_command += ['--run-until-failure']
    if input.get('repeat'):
        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]

    # add any custom gecko preferences
    for (key, val) in input.get('preferences', {}).iteritems():
        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]

    custom_mach_command += [input['path']]
    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
        custom_mach_command)

    # update environment
    new_task_definition['payload']['env'].update(input.get('environment', {}))

    # tweak the treeherder symbol
    new_task_definition['extra']['treeherder']['symbol'] += '-custom'

    logging.info("New task definition: %s", new_task_definition)

    # actually create the new task
    new_task_id = slugid()
    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
示例#32
0
def get_parameters(decision_task_id):
    return get_artifact(decision_task_id, "public/parameters.yml")
示例#33
0
def get_failures(task_id):
    """Returns a dict containing properties containing a list of
    directories containing test failures and a separate list of
    individual test failures from the errorsummary.log artifact for
    the task.

    Calls the helper function munge_test_path to attempt to find an
    appropriate test path to pass to the task in
    MOZHARNESS_TEST_PATHS.  If no appropriate test path can be
    determined, nothing is returned.
    """

    def re_compile_list(*lst):
        # Ideally we'd just use rb"" literals and avoid the encode, but
        # this file needs to be importable in python2 for now.
        return [re.compile(s.encode("utf-8")) for s in lst]

    re_bad_tests = re_compile_list(
        r"Last test finished",
        r"LeakSanitizer",
        r"Main app process exited normally",
        r"ShutdownLeaks",
        r"[(]SimpleTest/TestRunner.js[)]",
        r"automation.py",
        r"https?://localhost:\d+/\d+/\d+/.*[.]html",
        r"jsreftest",
        r"leakcheck",
        r"mozrunner-startup",
        r"pid: ",
        r"RemoteProcessMonitor",
        r"unknown test url",
    )
    re_extract_tests = re_compile_list(
        r'"test": "(?:[^:]+:)?(?:https?|file):[^ ]+/reftest/tests/([^ "]+)',
        r'"test": "(?:[^:]+:)?(?:https?|file):[^:]+:[0-9]+/tests/([^ "]+)',
        r'xpcshell-?[^ "]*\.ini:([^ "]+)',
        r'/tests/([^ "]+) - finished .*',
        r'"test": "([^ "]+)"',
        r'"message": "Error running command run_test with arguments '
        r"[(]<wptrunner[.]wpttest[.]TestharnessTest ([^>]+)>",
        r'"message": "TEST-[^ ]+ [|] ([^ "]+)[^|]*[|]',
    )

    def munge_test_path(line):
        test_path = None
        for r in re_bad_tests:
            if r.search(line):
                return None
        for r in re_extract_tests:
            m = r.search(line)
            if m:
                test_path = m.group(1)
                break
        return test_path

    dirs = set()
    tests = set()
    artifacts = list_artifacts(task_id)
    for artifact in artifacts:
        if "name" not in artifact or not artifact["name"].endswith("errorsummary.log"):
            continue

        stream = get_artifact(task_id, artifact["name"])
        if not stream:
            continue

        # The number of tasks created is determined by the
        # `times` value and the number of distinct tests and
        # directories as: times * (1 + len(tests) + len(dirs)).
        # Since the maximum value of `times` specifiable in the
        # Treeherder UI is 100, the number of tasks created can
        # reach a very large value depending on the number of
        # unique tests.  During testing, it was found that 10
        # distinct tests were sufficient to cause the action task
        # to exceed the maxRunTime of 1800 seconds resulting in it
        # being aborted.  We limit the number of distinct tests
        # and thereby the number of distinct test directories to a
        # maximum of 5 to keep the action task from timing out.

        # We handle the stream as raw bytes because it may contain invalid
        # UTF-8 characters in portions other than those containing the error
        # messages we're looking for.
        for line in stream.read().split(b"\n"):
            test_path = munge_test_path(line.strip())

            if test_path:
                tests.add(test_path.decode("utf-8"))
                test_dir = os.path.dirname(test_path)
                if test_dir:
                    dirs.add(test_dir.decode("utf-8"))

            if len(tests) > 4:
                break

    return {"dirs": sorted(dirs), "tests": sorted(tests)}