Esempio n. 1
0
    def __init__(self, credentials=None, web_auth=False, dry_run=False):
        ''' Initialize the authentication method.'''
        self.dry_run = dry_run

        if dry_run:
            self.queue = taskcluster_client.Queue()

        elif credentials_available():
            self.queue = taskcluster_client.Queue()

        elif credentials:
            self.queue = taskcluster_client.Queue({'credentials': credentials})

        elif web_auth:
            # Your browser will open a new tab asking you to authenticate
            # through TaskCluster and then grant access to this
            self.queue = taskcluster_client.Queue(
                {'credentials': authenticate()})

        else:
            raise TaskClusterError(
                ""
                "Since you're not running in dry run mode, you need to provide "
                "an authentication method:\n"
                " 1) call authenticate() to get credentials and pass it as credentials.\n"
                " 2) set TASKCLUSTER_{CLIENT_ID,ACCESS_TOKEN} as env variables.\n"
                " 3) use web_auth=True to authenticate through your web browser.."
            )
Esempio n. 2
0
    def get_docker_task_id(self, properties):
        """Retrieve docker image task Id used by tests on TC.

        To get the task id of the 'desktop-test' docker image as used by desktop tests
        for a given build the Index has to be queried first. When the task for the build
        has been found, check all its dependent tasks for the first appearance of the
        'desktop-test' worker type. Within its payload the task id of the docker task
        can be extracted.

        Bug 1284236 - Not all Taskcluster builds report correctly to the Index.
        To ensure we get a TC build force to Linux64 debug for now.

        :param properties: Properties of the build and necessary resources.
        """
        build_index = 'gecko.v2.{branch}.revision.{rev}.firefox.{platform}-debug'.format(
            branch=properties['branch'],
            rev=properties['revision'],
            platform=properties['platform'],
        )

        try:
            logger.debug(
                'Querying Taskcluster for "desktop-test" docker image for "{}"...'
                .format(properties['branch']))
            build_task_id = taskcluster.Index().findTask(build_index)['taskId']
        except taskcluster.exceptions.TaskclusterFailure:
            raise errors.NotFoundException(
                'Required build not found for TC index', build_index)

        task_id = None
        continuation_token = None
        while not task_id:
            options = {'limit': 5}
            if continuation_token:
                options.update({'continuationToken': continuation_token})

            resp = taskcluster.Queue().listDependentTasks(build_task_id,
                                                          options=options)
            for task in resp['tasks']:
                if task['task'].get('extra',
                                    {}).get('suite',
                                            {}).get('name') == 'firefox-ui':
                    task_id = task['status']['taskId']
                    break

            continuation_token = resp.get('continuationToken')

            if not continuation_token:
                raise errors.NotFoundException(
                    'No tests found which use docker image', image_name)

        task_definition = taskcluster.Queue().task(task_id)

        return task_definition['payload']['image']['taskId']
Esempio n. 3
0
 def get_queue_service(self):
     """
     Configured Queue Service
     """
     return taskcluster.Queue(
         self.build_options('queue/v1')
     )
Esempio n. 4
0
    def __init__(self):
        self.now = datetime.datetime.utcnow()
        self.found_or_created_indexed_tasks = {}

        options = {"rootUrl": os.environ["TASKCLUSTER_PROXY_URL"]}
        self.queue_service = taskcluster.Queue(options)
        self.index_service = taskcluster.Index(options)
    def __init__(self,
                 branch,
                 rank,
                 client_id,
                 access_token,
                 log_obj,
                 task_id=None):
        self.rank = rank
        self.log_obj = log_obj

        # Try builds use a different set of credentials which have access to the
        # buildbot-try scope.
        if branch == 'try':
            self.buildbot = 'buildbot-try'
        else:
            self.buildbot = 'buildbot'

        # We can't import taskcluster at the top of the script because it is
        # part of the virtualenv, so import it now. The virtualenv needs to be
        # activated before this point by the mozharness script, or else we won't
        # be able to find this module.
        import taskcluster
        taskcluster.config['credentials']['clientId'] = client_id
        taskcluster.config['credentials']['accessToken'] = access_token
        self.taskcluster_queue = taskcluster.Queue()
        self.task_id = task_id or taskcluster.slugId()
        self.put_file = taskcluster.utils.putFile
Esempio n. 6
0
def download_artifact(artifact_path, task_id, artifact_name):
    if os.path.exists(artifact_path):
        return artifact_path

    # Build artifact public url
    # Use un-authenticated Taskcluster client to avoid taskcluster-proxy rewrite issue
    # https://github.com/taskcluster/taskcluster-proxy/issues/44
    queue = taskcluster.Queue(
        {"rootUrl": "https://firefox-ci-tc.services.mozilla.com"})
    url = queue.buildUrl("getLatestArtifact", task_id, artifact_name)
    logger.debug("Downloading artifact", url=url)

    @tenacity.retry(
        reraise=True,
        wait=tenacity.wait_exponential(multiplier=1, min=16, max=64),
        stop=tenacity.stop_after_attempt(5),
    )
    def perform_download():
        r = requests.get(url, stream=True)
        r.raise_for_status()

        with open(artifact_path, "wb") as f:
            r.raw.decode_content = True
            shutil.copyfileobj(r.raw, f)

        if artifact_path.endswith(".zip") and not is_zipfile(artifact_path):
            raise BadZipFile("File is not a zip file")

    perform_download()
Esempio n. 7
0
def update_task(pulse_data):
    import taskcluster
    from .models import Task

    status = pulse_data["status"]
    run_id = pulse_data["runId"]
    run_obj = next(run for run in status["runs"] if run["runId"] == run_id)
    pool = _get_or_create_pool(status["workerType"])

    defaults = {
        "decision_id": status["taskGroupId"],
        "expires": status["expires"],
        "pool": pool,
        "resolved": run_obj.get("resolved"),
        "started": run_obj.get("started"),
        "state": run_obj["state"],
    }
    task_obj, created = Task.objects.update_or_create(
        task_id=status["taskId"],
        run_id=run_id,
        defaults=defaults,
    )
    if created:
        # `created` field isn't available via pulse, so get it from Taskcluster
        queue_svc = taskcluster.Queue({"rootUrl": settings.TC_ROOT_URL})
        task = queue_svc.task(status["taskId"])
        Task.objects.filter(id=task_obj.id).update(created=task["created"])
Esempio n. 8
0
def nightly(apks, commit, date_string, is_staging):
    queue = taskcluster.Queue({'baseUrl': 'http://taskcluster/queue/v1'})
    date = arrow.get(date_string)

    task_graph = {}

    build_task_id, build_task = generate_build_task(apks)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]['task'] = queue.task(build_task_id)

    sign_task_id, sign_task = generate_signing_task(build_task_id, apks, date,
                                                    is_staging)
    lib.tasks.schedule_task(queue, sign_task_id, sign_task)

    task_graph[sign_task_id] = {}
    task_graph[sign_task_id]['task'] = queue.task(sign_task_id)

    push_task_id, push_task = generate_push_task(sign_task_id, apks, commit,
                                                 is_staging)
    lib.tasks.schedule_task(queue, push_task_id, push_task)

    task_graph[push_task_id] = {}
    task_graph[push_task_id]['task'] = queue.task(push_task_id)

    print(json.dumps(task_graph, indent=4, separators=(',', ': ')))

    with open('task-graph.json', 'w') as f:
        json.dump(task_graph, f)

    populate_chain_of_trust_required_but_unused_files()
def main():
    parser = argparse.ArgumentParser(
        description=
        'Spawn tasks to fetch missing symbols from Microsoft symbol server')

    args = parser.parse_args()
    decision_task_id = os.environ.get('TASK_ID')
    if decision_task_id:
        task_group_id = decision_task_id
        options = {'baseUrl': 'http://taskcluster/queue/v1/'}
    else:
        task_group_id = taskcluster.utils.slugId()
        options = {'credentials': read_tc_auth()}
    now = datetime.datetime.utcnow()
    keys = {
        'task_group_id': task_group_id,
        'task_created': format_timedelta(now),
        'task_deadline': format_timedelta(now, hours=8),
        'artifacts_expires': format_timedelta(now, days=1),
        'date_index': now.strftime('%Y%m%d%H%M%S'),
    }
    try:
        queue = taskcluster.Queue(options)
        fetch_task_id = spawn_task(queue, keys, decision_task_id,
                                   "fetch-task.json")
        keys['fetch_task_id'] = fetch_task_id
        spawn_task(queue, keys, decision_task_id, "upload-task.json")
        print('https://tools.taskcluster.net/task-group-inspector/#/' +
              task_group_id)
    except taskcluster.exceptions.TaskclusterAuthFailure as e:
        print('TaskclusterAuthFailure: {}'.format(e.body), file=sys.stderr)
        raise
Esempio n. 10
0
def raptor(is_staging):
    build_tasks = {}
    signing_tasks = {}
    other_tasks = {}

    mozharness_task_id = fetch_mozharness_task_id()
    gecko_revision = taskcluster.Queue().task(
        mozharness_task_id)['payload']['env']['GECKO_HEAD_REV']

    variant = get_variant('raptor')
    assemble_task_id = taskcluster.slugId()
    build_tasks[assemble_task_id] = BUILDER.craft_assemble_task(variant)
    signing_task_id = taskcluster.slugId()
    signing_tasks[signing_task_id] = BUILDER.craft_raptor_signing_task(
        assemble_task_id, variant, is_staging)

    all_raptor_craft_functions = [
        BUILDER.craft_raptor_tp6m_task(for_suite=i) for i in range(1, 11)
    ] + [
        BUILDER.craft_raptor_speedometer_task,
        BUILDER.craft_raptor_speedometer_power_task,
    ]

    for craft_function in all_raptor_craft_functions:
        args = (signing_task_id, mozharness_task_id, variant, gecko_revision)
        other_tasks[taskcluster.slugId()] = craft_function(
            'armeabi-v7a', *args)
        other_tasks[taskcluster.slugId()] = craft_function('arm64-v8a', *args)
        other_tasks[taskcluster.slugId()] = craft_function(
            'armeabi-v7a', *args, force_run_on_64_bit_device=True)

    return (build_tasks, signing_tasks, other_tasks)
def release(version):
    queue = taskcluster.Queue({'baseUrl': 'http://taskcluster/queue/v1'})

    task_graph = {}

    build_task_id, build_task = generate_build_task(version)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]["task"] = queue.task(build_task_id)

    artifacts_info = load_artifacts_manifest()

    for artifact, info in artifacts_info.items():
        beetmover_task_id, beetmover_task = generate_beetmover_task(
            build_task_id, version, artifact, info)
        lib.tasks.schedule_task(queue, beetmover_task_id, beetmover_task)

        task_graph[beetmover_task_id] = {}
        task_graph[beetmover_task_id]["task"] = queue.task(beetmover_task_id)

    print(json.dumps(task_graph, indent=4, separators=(',', ': ')))

    task_graph_path = "task-graph.json"
    with open(task_graph_path, 'w') as f:
        json.dump(task_graph, f)

    populate_chain_of_trust_required_but_unused_files()
Esempio n. 12
0
def raptor(builder, is_staging):
    mozharness_task_id = fetch_mozharness_task_id()
    gecko_revision = taskcluster.Queue({
      'rootUrl': os.environ.get('TASKCLUSTER_PROXY_URL', 'https://taskcluster.net'),
    }).task(mozharness_task_id)['payload']['env']['GECKO_HEAD_REV']

    variant = get_variant('forPerformanceTest', 'geckoNightly')
    build_task = builder.craft_assemble_raptor_task(variant)
    signing_task = builder.craft_raptor_signing_task(build_task['label'], variant, is_staging)

    tasks = [build_task, signing_task]

    for abi in ('armeabi-v7a', 'arm64-v8a'):
        variant_apk = variant.get_apk(abi)
        all_raptor_craft_functions = [
            builder.craft_raptor_tp6m_cold_task(for_suite=i)
                for i in range(1, 28)
            ] + [
                builder.craft_raptor_youtube_playback_task,
            ]
        for craft_function in all_raptor_craft_functions:
            raptor_task = craft_function(
                signing_task['label'], mozharness_task_id, variant_apk, gecko_revision, is_staging
            )
            tasks.append(raptor_task)

    return tasks
Esempio n. 13
0
def raptor(is_staging):
    build_tasks = {}
    signing_tasks = {}
    other_tasks = {}

    geckoview_nightly_version = get_geckoview_versions()
    mozharness_task_id = gecko_revision_for_version(geckoview_nightly_version)
    gecko_revision = taskcluster.Queue().task(mozharness_task_id)['payload']['env']['GECKO_HEAD_REV']

    for variant in [Variant.from_values(abi, False, 'raptor') for abi in ('arm', 'aarch64')]:
        assemble_task_id = taskcluster.slugId()
        build_tasks[assemble_task_id] = BUILDER.craft_assemble_task(variant)
        signing_task_id = taskcluster.slugId()
        signing_tasks[signing_task_id] = BUILDER.craft_raptor_signing_task(
            assemble_task_id, variant, is_staging)

        all_raptor_craft_functions = [
            BUILDER.craft_raptor_tp6m_task(for_suite=i)
            for i in range(1, 11)
        ] + [
            BUILDER.craft_raptor_speedometer_task,
            BUILDER.craft_raptor_speedometer_power_task,
        ]

        for craft_function in all_raptor_craft_functions:
            args = (signing_task_id, mozharness_task_id, variant, gecko_revision)
            other_tasks[taskcluster.slugId()] = craft_function(*args)
            # we also want the arm APK to be tested on 64-bit-devices
            if variant.abi == 'arm':
                other_tasks[taskcluster.slugId()] = craft_function(*args, force_run_on_64_bit_device=True)

    return (build_tasks, signing_tasks, other_tasks)
Esempio n. 14
0
def release(version):
    queue = taskcluster.Queue({'baseUrl': 'http://taskcluster/queue/v1'})

    task_graph = {}
    artifacts_info = [
        info for info in lib.module_definitions.from_gradle()
        if info['shouldPublish']
    ]
    if len(artifacts_info) == 0:
        print("Could not get module names from gradle")
        sys.exit(2)

    build_task_id, build_task = generate_build_task(version, artifacts_info)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]["task"] = queue.task(build_task_id)

    for info in artifacts_info:
        beetmover_task_id, beetmover_task = generate_beetmover_task(
            build_task_id, version, info['artifact'], info['name'])
        lib.tasks.schedule_task(queue, beetmover_task_id, beetmover_task)

        task_graph[beetmover_task_id] = {}
        task_graph[beetmover_task_id]["task"] = queue.task(beetmover_task_id)

    print(json.dumps(task_graph, indent=4, separators=(',', ': ')))

    task_graph_path = "task-graph.json"
    with open(task_graph_path, 'w') as f:
        json.dump(task_graph, f)

    lib.util.populate_chain_of_trust_required_but_unused_files()
Esempio n. 15
0
def main(*task_group_ids):
    for task_group_id in task_group_ids:
        print("https://community-tc.services.mozilla.com/tasks/groups/" +
              task_group_id)
        timings = {}

        def handler(result):
            for task in result["tasks"]:
                name = task["task"]["metadata"]["name"]
                for run in task["status"]["runs"]:
                    resolved = run.get("resolved")
                    if not resolved:
                        print("Not resolved yet:", name)
                        continue
                    key = task["task"]["workerType"]
                    if "WPT" in name:
                        key += " WPT"
                    # fromisoformat doesn’t like the "Z" timezone, [:-1] to remove it
                    timings.setdefault(key, []).append(
                        datetime.datetime.fromisoformat(resolved[:-1]) -
                        datetime.datetime.fromisoformat(run["started"][:-1]))

        queue = taskcluster.Queue(taskcluster.optionsFromEnvironment())
        queue.listTaskGroup(task_group_id, paginationHandler=handler)

        r = lambda d: datetime.timedelta(seconds=round(d.total_seconds()))
        for worker_type, t in sorted(timings.items()):
            print("count {}, total {}, max: {}\t{}\t{}".format(
                len(t),
                r(sum(t[1:], start=t[0])),
                r(max(t)),
                worker_type,
                ' '.join(str(r(s)) for s in t),
            ))
Esempio n. 16
0
def push_artifacts(target, args):
    '''
    Push all artifacts from dependant tasks
    '''
    assert args.task_id is not None, 'Missing task id'

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), 'Missing Docker authentication'

    # Setup skopeo
    skopeo = Skopeo(
        config.docker['registry'],
        config.docker['username'],
        config.docker['password'],
    )

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load current task description to list its dependencies
    logger.info('Loading task status {}'.format(args.task_id))
    task = queue.task(args.task_id)
    nb_deps = len(task['dependencies'])
    assert nb_deps > 0, 'No task dependencies'

    # Load dependencies artifacts
    artifacts = load_artifacts(task, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, skopeo, task_id, artifact_name)

    logger.info('All found artifacts were pushed.')
Esempio n. 17
0
def schedule_phase(name, phase):
    session = flask.g.db.session
    try:
        phase = session.query(Phase) \
            .filter(Release.id == Phase.release_id) \
            .filter(Release.name == name) \
            .filter(Phase.name == phase).one()
    except NoResultFound:
        flask.abort(404)

    if phase.submitted:
        flask.abort(409, 'Already submitted!')

    queue = taskcluster.Queue({
        'credentials': {
            'clientId': os.environ.get('TASKCLUSTER_CLIENT_ID'),
            'accessToken': os.environ.get('TASKCLUSTER_ACCESS_TOKEN')
        },
        'maxRetries': 12
    })
    queue.createTask(phase.task_id, phase.rendered)
    phase.submitted = True
    phase.completed_by = flask.g.userinfo['email']
    phase.completed = datetime.datetime.utcnow()
    if all([ph.submitted for ph in phase.release.phases]):
        phase.release.status = 'shipped'
    session.commit()
    return phase.json
Esempio n. 18
0
 def __init__(self, options):
     cert = options["credentials"].get("certificate")
     if cert and not isinstance(cert, basestring):
         options["credentials"]["certificate"] = json.dumps(cert)
     self.queue = taskcluster.Queue(options)
     self.scheduler = taskcluster.Scheduler(options)
     log.debug("Dict of options: %s", options)
Esempio n. 19
0
def check_task_statuses(task_ids, github_checks_outputter):
    """Verifies whether a set of Taskcluster tasks completed successfully or not.

    Returns 0 if all tasks passed completed successfully, 1 otherwise."""

    queue = taskcluster.Queue({'rootUrl': os.environ['TASKCLUSTER_ROOT_URL']})
    failed_tasks = []
    for task in task_ids:
        status = queue.status(task)
        state = status['status']['state']
        if state == 'failed' or state == 'exception':
            logger.error('Task {0} failed with state "{1}"'.format(
                task, state))
            failed_tasks.append(status)
        elif state != 'completed':
            logger.error('Task {0} had unexpected state "{1}"'.format(
                task, state))
            failed_tasks.append(status)

    if failed_tasks and github_checks_outputter:
        github_checks_outputter.output('Failed tasks:')
        for task in failed_tasks:
            # We need to make an additional call to get the task name.
            task_id = task['status']['taskId']
            task_name = queue.task(task_id)['metadata']['name']
            github_checks_outputter.output(
                '* `{}` failed with status `{}`'.format(
                    task_name, task['status']['state']))
    else:
        logger.info('All tasks completed successfully')
        if github_checks_outputter:
            github_checks_outputter.output('All tasks completed successfully')
    return 1 if failed_tasks else 0
Esempio n. 20
0
    def _preprocess_message(self, body, message=None):
        """Download the update manifest by processing the received funsize message."""
        # Filter out messages which do not apply to our expected routing key regex
        if message:
            self.logger.debug('CC routing keys: %s' % message.headers['CC'])
            if not any([
                    self.cc_key_regex.search(key)
                    for key in message.headers['CC']
            ]):
                raise ValueError(
                    'Routing keys do not match. Skipping message.')

        # In case of --push-update-message we already have the wanted manifest
        if 'workerId' not in body:
            return body

        # Retrieve build properties to be used as the manifest
        queue = taskcluster.Queue()
        task_definition = queue.task(body['status']['taskId'])

        manifest = task_definition.get('extra', {}).get('build_props')

        # Fake specific properties so we are backward compatible
        manifest['tree'] = 'release-%s' % manifest['branch']
        manifest['product'] = 'firefox'

        try:
            d = datetime.strptime(body['status']['runs'][-1]['scheduled'],
                                  '%Y-%m-%dT%H:%M:%S.%fZ')
            manifest['buildid'] = d.strftime('%Y%m%d%H%M')
        except:
            pass

        return manifest
Esempio n. 21
0
def _recreate_task(task_id):
    one_year = 365
    queue = taskcluster_client.Queue()
    task = queue.task(task_id)

    LOG.debug("Original task: (Limit 1024 char)")
    LOG.debug(str(json.dumps(task))[:1024])

    # Start updating the task
    task['taskId'] = taskcluster_client.slugId()

    artifacts = task['payload'].get('artifacts', {})
    for artifact, definition in artifacts.iteritems():
        definition['expires'] = taskcluster_client.fromNow('%s days' %
                                                           one_year)

    # https://bugzilla.mozilla.org/show_bug.cgi?id=1190660
    # TC workers create public logs which are 365 days; if the task expiration
    # date is the same or less than that we won't have logs for the task
    task['expires'] = taskcluster_client.fromNow('%s days' % (one_year + 1))
    now = datetime.datetime.utcnow()
    tomorrow = now + datetime.timedelta(hours=24)
    task['created'] = taskcluster_client.stringDate(now)
    task['deadline'] = taskcluster_client.stringDate(tomorrow)

    LOG.debug("Contents of new task: (Limit 1024 char)")
    LOG.debug(str(task)[:1024])

    return task
Esempio n. 22
0
def release(apks, channel, commit, tag):
    queue = taskcluster.Queue({'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})

    task_graph = {}

    build_task_id, build_task = generate_build_task(apks, tag)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]["task"] = queue.task(build_task_id)

    sign_task_id, sign_task = generate_signing_task(build_task_id, apks, tag)
    lib.tasks.schedule_task(queue, sign_task_id, sign_task)

    task_graph[sign_task_id] = {}
    task_graph[sign_task_id]["task"] = queue.task(sign_task_id)

    push_task_id, push_task = generate_push_task(sign_task_id, apks, channel, commit)
    lib.tasks.schedule_task(queue, push_task_id, push_task)

    task_graph[push_task_id] = {}
    task_graph[push_task_id]["task"] = queue.task(push_task_id)

    print json.dumps(task_graph, indent=4, separators=(',', ': '))

    task_graph_path = "task-graph.json"
    with open(task_graph_path, 'w') as f:
        json.dump(task_graph, f)

    populate_chain_of_trust_required_but_unused_files()
Esempio n. 23
0
def raptor(is_staging):
    build_tasks = {}
    signing_tasks = {}
    other_tasks = {}

    geckoview_nightly_version = get_geckoview_versions()['nightly']
    mozharness_task_id = fetch_mozharness_task_id(geckoview_nightly_version)
    gecko_revision = taskcluster.Queue().task(
        mozharness_task_id)['payload']['env']['GECKO_HEAD_REV']

    for variant in [
            Variant.from_values(abi, False, 'forPerformanceTest')
            for abi in ('aarch64', 'arm')
    ]:
        assemble_task_id = taskcluster.slugId()
        build_tasks[assemble_task_id] = BUILDER.craft_assemble_raptor_task(
            variant)
        signing_task_id = taskcluster.slugId()
        signing_tasks[signing_task_id] = BUILDER.craft_raptor_signing_task(
            assemble_task_id, variant, is_staging)

        all_raptor_craft_functions = [
            BUILDER.craft_raptor_tp6m_cold_task(for_suite=i)
            for i in range(1, 15)
        ]
        for craft_function in all_raptor_craft_functions:
            args = (signing_task_id, mozharness_task_id, variant,
                    gecko_revision)
            other_tasks[taskcluster.slugId()] = craft_function(*args)

    return (build_tasks, signing_tasks, other_tasks)
Esempio n. 24
0
def fetch_actions_json(task_id):
    queue = taskcluster.Queue()
    actions_url = queue.buildUrl("getLatestArtifact", task_id,
                                 'public/actions.json')
    q = requests.get(actions_url)
    q.raise_for_status()
    return q.json()
Esempio n. 25
0
def main():
    parser = argparse.ArgumentParser(description='Build and upload binaries')
    parser.add_argument('taskcluster_auth',
                        help='Path to a file containing Taskcluster client '
                        'ID and authentication token as a JSON file in '
                        'the form {"clientId": "...", "accessToken": "..."}')
    parser.add_argument('--tooltool-auth',
                        help='Path to a file containing a tooltool '
                        'authentication token valid for uploading files')
    parser.add_argument(
        '--local-gecko-clone',
        help='Path to a local Gecko clone whose tooltool '
        'manifests will be updated with the newly-built binaries')
    parser.add_argument('--rust-branch',
                        default='stable',
                        help='Revision of the rust repository to use')
    parser.add_argument('--task', help='Use an existing task')

    args = parser.parse_args()
    tc_auth = read_tc_auth(args.taskcluster_auth)
    queue = taskcluster.Queue({'credentials': tc_auth})
    if args.task:
        task_id, initial_wait = args.task, 0
    else:
        task_id, initial_wait = spawn_task(queue, args), 25
    run_id = wait_for_task(queue, task_id, initial_wait)
    for artifact in fetch_artifacts(queue, task_id, run_id):
        if args.tooltool_auth:
            manifest = upload_to_tooltool(args.tooltool_auth, task_id,
                                          artifact)
        if args.local_gecko_clone:
            update_manifest(artifact, manifest, args.local_gecko_clone)
Esempio n. 26
0
def release(track, commit, tag):
    queue = taskcluster.Queue({ 'baseUrl': 'http://taskcluster/queue/v1' })

    task_graph = {}

    build_task_id, build_task = generate_build_task(tag)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]["task"] = queue.task(build_task_id)

    sign_task_id, sign_task = generate_signing_task(build_task_id)
    lib.tasks.schedule_task(queue, sign_task_id, sign_task)

    task_graph[sign_task_id] = {}
    task_graph[sign_task_id]["task"] = queue.task(sign_task_id)

    push_task_id, push_task = generate_push_task(sign_task_id, track, commit)
    lib.tasks.schedule_task(queue, push_task_id, push_task)

    task_graph[push_task_id] = {}
    task_graph[push_task_id]["task"] = queue.task(push_task_id)

    print json.dumps(task_graph, indent=4, separators=(',', ': '))

    task_graph_path = "task-graph.json"
    with open(task_graph_path, 'w') as token_file:
        token_file.write(json.dumps(task_graph))
Esempio n. 27
0
def raptor(is_staging):
    build_tasks = {}
    signing_tasks = {}
    other_tasks = {}

    mozharness_task_id = fetch_mozharness_task_id()
    gecko_revision = taskcluster.Queue().task(mozharness_task_id)['payload']['env']['GECKO_HEAD_REV']

    variant = get_variant('forPerformanceTest', 'geckoNightly')
    assemble_task_id = taskcluster.slugId()
    build_tasks[assemble_task_id] = BUILDER.craft_assemble_raptor_task(variant)
    signing_task_id = taskcluster.slugId()
    signing_tasks[signing_task_id] = BUILDER.craft_raptor_signing_task(assemble_task_id, variant, is_staging)

    for abi in ('armeabi-v7a', 'arm64-v8a'):
        variant_apk = variant.get_apk(abi)
        all_raptor_craft_functions = [
            BUILDER.craft_raptor_tp6m_cold_task(for_suite=i)
                for i in range(1, 28)
            ] + [
                BUILDER.craft_raptor_youtube_playback_task,
            ]
        for craft_function in all_raptor_craft_functions:
            args = (signing_task_id, mozharness_task_id, variant_apk, gecko_revision, is_staging)
            other_tasks[taskcluster.slugId()] = craft_function(*args)

    return (build_tasks, signing_tasks, other_tasks)
def tc_branches():
    decision_namespace = 'gecko.v2.%s.latest.firefox.decision'

    index = taskcluster.Index()
    queue = taskcluster.Queue()

    result = index.listNamespaces('gecko.v2', dict(limit=1000))

    branches = {
        i['name']: dict(name=i['name'], workerTypes=dict())
        for i in result.get('namespaces', [])
    }

    for branchName, branch in branches.items():

        # decision task might not exist
        try:
            decision_task = index.findTask(decision_namespace % branchName)
            decision_graph = queue.getLatestArtifact(
                decision_task['taskId'], 'public/graph.json')
        except taskcluster.exceptions.TaskclusterRestFailure:
            continue

        for task in decision_graph.get('tasks', []):
            task = task['task']
            task_cache = task.get('payload', dict()).get('cache', dict())

            provisionerId = task.get('provisionerId')
            if provisionerId:
                branch['provisionerId'] = provisionerId

            workerType = task.get('workerType')
            if workerType:
                branch['workerTypes'].setdefault(
                    workerType, dict(name=workerType, caches=[]))

                if len(task_cache) > 0:
                    branch['workerTypes'][workerType]['caches'] = list(set(
                        branch['workerTypes'][workerType]['caches'] +
                        task_cache.keys()
                    ))

    caches_to_skip = current_app.config.get('TASKCLUSTER_CACHES_TO_SKIP', [])

    return [
        rest.TCBranch(
            name=branchName,
            provisionerId=branch.get('provisionerId'),
            workerTypes={
                workerType: rest.TCWorkerType(
                        name=workerType,
                        caches=[
                            cache
                            for cache in branch['workerTypes'][workerType]['caches']
                            if cache not in caches_to_skip
                        ],
                    )
                for workerType in branch['workerTypes']
            })
        for branchName, branch in branches.items()]
Esempio n. 29
0
def release(apks, track, commit, tag):
    queue = taskcluster.Queue({ 'baseUrl': 'http://taskcluster/queue/v1' })

    task_graph = {}

    build_task_id, build_task = generate_build_task(apks, tag)
    lib.tasks.schedule_task(queue, build_task_id, build_task)

    task_graph[build_task_id] = {}
    task_graph[build_task_id]["task"] = queue.task(build_task_id)

    sign_task_id, sign_task = generate_signing_task(build_task_id, apks, tag)
    lib.tasks.schedule_task(queue, sign_task_id, sign_task)

    task_graph[sign_task_id] = {}
    task_graph[sign_task_id]["task"] = queue.task(sign_task_id)

    push_task_id, push_task = generate_push_task(sign_task_id, apks, track, commit)
    lib.tasks.schedule_task(queue, push_task_id, push_task)

    task_graph[push_task_id] = {}
    task_graph[push_task_id]["task"] = queue.task(push_task_id)

    print json.dumps(task_graph, indent=4, separators=(',', ': '))

    task_graph_path = "task-graph.json"
    with open(task_graph_path, 'w') as f:
        json.dump(task_graph, f)

    populate_chain_of_trust_required_but_unused_files()
Esempio n. 30
0
    def __init__(self,
                 root_url,
                 client_id=None,
                 access_token=None,
                 unit_testing_this=False):
        # TODO: remove when backfill tool' soft launch is complete ->
        if not unit_testing_this:
            raise RuntimeError(
                f"Must not instantiate real {self.__class__.__name__} instance "
                f"before backfill tool' soft launch is complete")
        # <- up to here
        options = {'rootUrl': root_url}
        credentials = {}

        if client_id:
            credentials['clientId'] = client_id
        if access_token:
            credentials['accessToken'] = access_token

        # Taskcluster APIs
        self.hooks = taskcluster.Hooks({**options, 'credentials': credentials})

        # Following least-privilege principle, as services
        # bellow don't really need authorization credentials.
        self.queue = taskcluster.Queue(options)
        self.auth = taskcluster.Auth(options)