def get_existing_task_in_index(trust_level, folder_name, folder_hash):
    index = taskcluster.Index()

    route = DOCKER_IMAGE_ROUTE_HASH_PATTERN.format(trust_level=trust_level,
                                                   image_name=folder_name,
                                                   hash=folder_hash)

    route = route.lstrip('index.')

    try:
        return index.findTask(route)['taskId']
    except taskcluster.exceptions.TaskclusterRestFailure as e:
        if e.status_code == 404:
            raise ValueError
        else:
            raise
Esempio n. 2
0
def index_current_task(
    index_path,
    rank=0,
    expires=None,
    data={},
    root_url=PRODUCTION_TASKCLUSTER_ROOT_URL,
):
    if expires is None:
        expires = datetime.datetime.now() + datetime.timedelta(days=1 * 365)

    index_service = taskcluster.Index(get_taskcluster_options())
    index_service.insertTask(
        index_path,
        {
            "data": data,
            "expires": expires,
            "rank": rank,
            "taskId": os.environ["TASK_ID"],
        },
    )
Esempio n. 3
0
    def __init__(self,
                 *,
                 index_prefix="garbage.servo-decisionlib",
                 task_name_template="%s",
                 worker_type="github-worker",
                 docker_image_cache_expiry="1 year",
                 routes_for_all_subtasks=None,
                 scopes_for_all_subtasks=None):
        self.task_name_template = task_name_template
        self.index_prefix = index_prefix
        self.worker_type = worker_type
        self.docker_image_cache_expiry = docker_image_cache_expiry
        self.routes_for_all_subtasks = routes_for_all_subtasks or []
        self.scopes_for_all_subtasks = scopes_for_all_subtasks or []

        # https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features#feature-taskclusterproxy
        self.queue_service = taskcluster.Queue(
            options={"baseUrl": "http://taskcluster/queue/v1/"})
        self.index_service = taskcluster.Index(
            options={"baseUrl": "http://taskcluster/index/v1/"})

        self.now = datetime.datetime.utcnow()
        self.found_or_created_indices = {}
Esempio n. 4
0
import os
import platform
import sys
import tarfile
import zipfile

import requests
import taskcluster

try:
    from urllib.request import urlretrieve
except ImportError:
    from urllib import urlretrieve

index = taskcluster.Index()
queue = taskcluster.Queue()

taskId = index.findTask('gecko.v2.mozilla-central.' +
                        'latest.firefox.linux64-ccov-opt')['taskId']

# Download artifacts
for name in [
        'target.tar.bz2', 'target.code-coverage-gcno.zip', 'chrome-map.json'
]:
    url = queue.buildUrl('getLatestArtifact', taskId,
                         'public/build/{}'.format(name))
    urlretrieve(url, os.path.join('tools', name))

# Extracting coverage build artifact
with tarfile.open('tools/target.tar.bz2', 'r:bz2') as tar:
    tar.extractall(path='tools')
Esempio n. 5
0
File: cli.py Progetto: alex/services
def taskcluster_cache(
    namespace='gecko.v2',
    decision_task_namespace='latest.firefox.decision',
):
    """ TODO: add description
    """

    logger.info('Querying taskcluster for list of branches in "%s" '
                'namespace.' % namespace)

    # taskcluster api's we need to query
    index = taskcluster.Index()
    queue = taskcluster.Queue()

    branches = dict()
    for branch in index.listNamespaces(namespace, dict(limit=1000))\
                       .get('namespaces', []):

        branch_name = branch.get('name')
        if not branch_name:
            logger.error('Name for branch "%s" not found.' % branch)

        # compose decision_task index fro
        branch_decision_task_name = '%s.%s.%s' % (namespace, branch_name,
                                                  decision_task_namespace)

        # fetch decision task from index service
        try:
            branch_decision_task = index.findTask(branch_decision_task_name)
        except taskcluster.exceptions.TaskclusterRestFailure:
            logger.info('Decision task "%s" not found.' %
                        branch_decision_task_name)
            continue

        branches[branch_name] = dict()

        logger.info('Decision task "%s" found.' % branch_decision_task_name)

        # we try to look for all tasks that were scheduled by this decision
        # task. this is stored as artifict in 'public/task-graph.json' or
        # 'public/graph.json'
        branch_tasks = None
        try:
            graph = queue.getLatestArtifact(
                branch_decision_task['taskId'],
                'public/task-graph.json',
            )
            branch_tasks = list(graph.values())
            logger.debug('Tasks for branch "%s" decision task "%s" found in '
                         '"public/task-graph.json" artifact.' % (
                             branch_name,
                             branch_decision_task_name,
                         ))
        except taskcluster.exceptions.TaskclusterRestFailure:
            try:
                graph = queue.getLatestArtifact(
                    branch_decision_task['taskId'],
                    'public/graph.json',
                )
                branch_tasks = graph.get('tasks', [])
                logger.debug(
                    'Tasks for branch "%s" decision task "%s" found in '
                    '"public/graph.json" artifact.' % (
                        branch_name,
                        branch_decision_task_name,
                    ))
            except taskcluster.exceptions.TaskclusterRestFailure:
                logger.error(
                    'Tasks for branch "%s" and its decision task "%s" '
                    'couldn\'t be found.' % (
                        branch_name,
                        branch_decision_task_name,
                    ))
                # we don't throw error but continue and report it in logs
                continue

        # loop through all the tasks and collect caches per worker type
        for branch_task in branch_tasks:

            task = branch_task.get('task')
            if not task:
                logger.error(
                    'Task for branch_task "%s" of branch "%s" couldn\'t be '
                    'found.' % (
                        branch_task,
                        branch_name,
                    ))
                # we don't throw error but continue and report it in logs
                continue

            provisioner_id = task.get('provisionerId')
            if not provisioner_id:
                logger.error(
                    'provisionerId for task "%s" of branch "%s" couldn\'t be '
                    'found.' % (
                        task,
                        branch_name,
                    ))
                # we don't throw error but continue and report it in logs
                continue

            worker_type = task.get('workerType')
            if not worker_type:
                logger.error(
                    'workerType for task "%s" of branch "%s" couldn\'t be '
                    'found.' % (
                        task,
                        branch_name,
                    ))
                # we don't throw error but continue and report it in logs
                continue

            task_payload = task.get('payload')
            if not task_payload:
                logger.error(
                    'payload for task "%s" of branch "%s" couldn\'t be '
                    'found.' % (
                        task,
                        branch_name,
                    ))
                # we don't throw error but continue and report it in logs
                continue

            task_cache_names = list(task_payload.get('cache', dict()).keys())

            branch_caches_id = '%s/%s' % (provisioner_id, worker_type)
            branch_caches = branches[branch_name].get(branch_caches_id)
            if branch_caches:
                branch_caches['caches'] = list(
                    set(branch_caches['caches'] + task_cache_names))
            else:
                branch_caches = dict(
                    provisioner_id=provisioner_id,
                    workerType=worker_type,
                    caches=task_cache_names,
                )

            branches[branch_name][branch_caches_id] = branch_caches

    return branches
Esempio n. 6
0
 def __init__(self, cache_dir, log=None, skip_cache=False):
     CacheManager.__init__(self, cache_dir, 'artifact_url', MAX_CACHED_TASKS, log=log, skip_cache=skip_cache)
     self._index = taskcluster.Index()
     self._queue = taskcluster.Queue()
def download_artifacts(revision=None):
    # Create 'tools/' directory if doesn't exist
    if not os.path.exists('tools'):
        os.makedirs('tools')

    options = get_taskcluster_options()
    index = taskcluster.Index(options)
    queue = taskcluster.Queue(options)

    if revision is None:
        taskId = index.findTask('gecko.v2.mozilla-central.' +
                                'latest.firefox.linux64-ccov-debug')['taskId']
        r = requests.get(
            'https://queue.taskcluster.net/v1/task/{}'.format(taskId))
        task_data = r.json()
        revision = task_data['payload']['env']['GECKO_HEAD_REV']
    else:
        r = requests.get(
            'https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.revision.{}.firefox.linux64-debug'
            .format(revision))
        task_data = r.json()
        taskId = task_data['taskId']

    # Download artifacts
    for name in [
            'target.tar.bz2', 'target.code-coverage-gcno.zip',
            'chrome-map.json', 'target.common.tests.tar.gz'
    ]:
        url = queue.buildUrl('getLatestArtifact', taskId,
                             'public/build/{}'.format(name))
        print('Downloading {}...'.format(url))
        urlretrieve(url, os.path.join('tools', name))

    # Geckodriver base url fot the latest version
    download_url, tag_name = get_github_release_url('mozilla/geckodriver')
    geckodriver_url = download_url + tag_name + '/geckodriver-' + tag_name + '-'

    # Grcov latest version base url
    download_url, tag_name = get_github_release_url('marco-c/grcov')
    grcov_url = download_url + tag_name

    # OS information for correct geckodriver version
    bitness = platform.architecture()[0]

    # Complete urls according to platforms
    if sys.platform.startswith('linux'):
        grcov_url += '/grcov-linux-x86_64.tar.bz2'
        if bitness == '64bit':
            version = 'linux64.tar.gz'
        else:
            version = 'linux32.tar.gz'
    elif sys.platform.startswith('darwin'):
        grcov_url += '/grcov-osx-x86_64.tar.bz2'
        version = 'macos.tar.gz'
    elif sys.platform.startswith('cygwin') or sys.platform.startswith('win32'):
        grcov_url += '/grcov-win-x86_64.tar.bz2'
        if bitness == '64bit':
            version = 'win64.zip'
        else:
            version = 'win32.zip'

    # Download geckodriver
    geckodriver_archive = os.path.join('tools', version)
    geckodriver_url += version
    print('Downloading {}...'.format(geckodriver_url))
    urlretrieve(geckodriver_url, geckodriver_archive)

    # Download grcov
    grcov_archive = os.path.join('tools', 'grcov.tar.bz2')
    print('Downloading {}...'.format(grcov_url))
    urlretrieve(grcov_url, grcov_archive)

    # Extract and delete archives for artifacts
    for filename in [
            'tools/target.code-coverage-gcno.zip', 'tools/target.tar.bz2',
            geckodriver_archive, grcov_archive,
            'tools/target.common.tests.tar.gz'
    ]:
        if filename.endswith('zip'):
            with zipfile.ZipFile(filename, 'r') as zip_ref:
                zip_ref.extractall(path='tools')
        elif filename.endswith('tar.gz') or filename.endswith('tar.bz2'):
            if filename.endswith('tar.gz'):
                mode = 'r:gz'
            else:
                mode = 'r:bz2'
            with tarfile.open(filename, mode) as tar:
                tar.extractall(path='tools')
        os.remove(filename)

    # Download Firefox coverage report
    print('Downloading coverage artifacts...')
    codecoverage.download_coverage_artifacts(taskId, None, None,
                                             'ccov-artifacts')
    print('Generating report...')
    codecoverage.generate_report('tools/grcov', 'coveralls+',
                                 'tests_report.json', 'ccov-artifacts')

    # Download genhtml
    print('Downloading genhtml...')
    codecoverage.download_genhtml()

    # Clone if the repository doesn't exist yet. Otherwise, update.
    print('Cloning/Updating mozilla-central repository...')
    if os.path.isdir('mozilla-central'):
        os.chdir('mozilla-central')
        subprocess.call([
            'hg', 'pull', '--rev', revision,
            'https://hg.mozilla.org/mozilla-central/'
        ])
        subprocess.call(['hg', 'update', '--rev', revision])
    else:
        subprocess.call([
            'hg', 'clone', 'https://hg.mozilla.org/mozilla-central/', '--rev',
            revision
        ])
Esempio n. 8
0
def fetch_mozharness_task_id(geckoview_beta_version):
    raptor_index = 'gecko.v2.mozilla-beta.geckoview-version.{}.mobile.android-x86_64-beta-opt'.format(
        geckoview_beta_version)
    return taskcluster.Index().findTask(raptor_index)['taskId']
Esempio n. 9
0
def fetch_mozharness_task_id():
    # We now want to use the latest available raptor
    raptor_index = 'gecko.v2.mozilla-central.nightly.latest.mobile.android-x86_64-opt'
    return taskcluster.Index().findTask(raptor_index)['taskId']
Esempio n. 10
0
 def __init__(self, fetch_config):
     InfoFetcher.__init__(self, fetch_config)
     options = fetch_config.tk_options()
     self.index = taskcluster.Index(options)
     self.queue = taskcluster.Queue(options)
     self.jpushes = JsonPushes(branch=fetch_config.inbound_branch)
Esempio n. 11
0
def gecko_revision_for_version(geckoview_nightly_version):
    raptor_index = 'gecko.v2.mozilla-central.geckoview-version.{}.mobile.android-x86_64-opt'.format(
        geckoview_nightly_version)
    return taskcluster.Index().findTask(raptor_index)['taskId']
Esempio n. 12
0
    def find_build_info(self, push):
        """
        Find build info for an integration build, given a Push, a changeset or a
        date/datetime.

        if `push` is not an instance of Push (e.g. it is a date, datetime, or
        string representing the changeset), a query to json pushes will be
        done.

        Return a :class:`IntegrationBuildInfo` instance.
        """
        if not isinstance(push, Push):
            try:
                push = self.jpushes.push(push)
            except MozRegressionError as exc:
                raise BuildInfoNotFound(str(exc))

        changeset = push.changeset

        try:
            # taskcluster builds have two possible root urls: we switched
            # from taskcluster.net -> firefox-ci-tc.services.mozilla.com
            # around November 9. to make things faster, we'll iterate through
            # them based on the one that most likely applies to this push
            possible_tc_root_urls = [TC_ROOT_URL, OLD_TC_ROOT_URL]
            if push.utc_date < TC_ROOT_URL_MIGRATION_FLAG_DATE:
                possible_tc_root_urls.reverse()

            task_id = None
            status = None
            for tc_root_url in possible_tc_root_urls:
                LOG.debug("using taskcluster root url %s" % tc_root_url)
                options = self.fetch_config.tk_options(tc_root_url)
                tc_index = taskcluster.Index(options)
                tc_queue = taskcluster.Queue(options)
                tk_routes = self.fetch_config.tk_routes(push)
                stored_failure = None
                for tk_route in tk_routes:
                    LOG.debug("using taskcluster route %r" % tk_route)
                    try:
                        task_id = tc_index.findTask(tk_route)["taskId"]
                    except TaskclusterFailure as ex:
                        LOG.debug("nothing found via route %r" % tk_route)
                        stored_failure = ex
                        continue
                    if task_id:
                        status = tc_queue.status(task_id)["status"]
                        break
                if status:
                    break
            if not task_id:
                raise stored_failure
        except TaskclusterFailure:
            raise BuildInfoNotFound("Unable to find build info using the"
                                    " taskcluster route %r" %
                                    self.fetch_config.tk_route(push))

        # find a completed run for that task
        run_id, build_date = None, None
        for run in reversed(status["runs"]):
            if run["state"] == "completed":
                run_id = run["runId"]
                build_date = datetime.strptime(run["resolved"],
                                               "%Y-%m-%dT%H:%M:%S.%fZ")
                break

        if run_id is None:
            raise BuildInfoNotFound(
                "Unable to find completed runs for task %s" % task_id)
        artifacts = tc_queue.listArtifacts(task_id, run_id)["artifacts"]

        # look over the artifacts of that run
        build_url = None
        for a in artifacts:
            name = os.path.basename(a["name"])
            if self.build_regex.search(name):
                meth = tc_queue.buildUrl
                if self.fetch_config.tk_needs_auth():
                    meth = tc_queue.buildSignedUrl
                build_url = meth("getArtifact", task_id, run_id, a["name"])
                break
        if build_url is None:
            raise BuildInfoNotFound("unable to find a build url for the"
                                    " changeset %r" % changeset)
        return IntegrationBuildInfo(
            self.fetch_config,
            build_url=build_url,
            build_date=build_date,
            changeset=changeset,
            repo_url=self.jpushes.repo_url,
            task_id=task_id,
        )
Esempio n. 13
0
def tc_branches():
    decision_namespace = 'gecko.v2.{branchName}.latest.firefox.decision'

    index = taskcluster.Index()
    queue = taskcluster.Queue()

    result = index.listNamespaces('gecko.v2', {'limit': 1000})

    branches = {
        i['name']: {
            'name': i['name'],
            'workerTypes': {}
        }
        for i in result.get('namespaces', [])
    }

    for branchName, branch in branches.items():

        # decision task might not exist
        try:
            decision_task = index.findTask(
                decision_namespace.format(branchName=branchName))
            decision_graph = queue.getLatestArtifact(decision_task['taskId'],
                                                     'public/graph.json')
        except taskcluster.exceptions.TaskclusterRestFailure:
            continue

        for task in decision_graph.get('tasks', []):
            task = task['task']
            task_cache = task.get('payload', {}).get('cache', {})

            provisionerId = task.get('provisionerId')
            if provisionerId:
                branch['provisionerId'] = provisionerId

            workerType = task.get('workerType')
            if workerType:
                branch['workerTypes'].setdefault(
                    workerType, dict(name=workerType, caches=[]))

                if len(task_cache) > 0:
                    branch['workerTypes'][workerType]['caches'] = list(
                        set(branch['workerTypes'][workerType]['caches'] +
                            task_cache.keys()))

    caches_to_skip = current_app.config.get('TASKCLUSTER_CACHES_TO_SKIP', [])

    return [{
        'name': branchName,
        'provisionerId': branch.get('provisionerId') or '',
        'workerTypes': {
            'workerType': {
                'name':
                workerType,
                'caches': [
                    cache
                    for cache in branch['workerTypes'][workerType]['caches']
                    if cache not in caches_to_skip
                ],
            }
            for workerType in branch['workerTypes']
        }
    } for branchName, branch in branches.items()]
Esempio n. 14
0
def fetch_mozharness_task_id():
    # We now want to use the latest available raptor
    raptor_index = 'gecko.v2.mozilla-central.nightly.latest.mobile.android-x86_64-opt'
    return taskcluster.Index({
      'rootUrl': os.environ.get('TASKCLUSTER_PROXY_URL', 'https://taskcluster.net'),
    }).findTask(raptor_index)['taskId']
Esempio n. 15
0
def find_decision_task_id(project, revision):
    decision_task_route = 'gecko.v2.{project}.revision.{revision}.firefox.decision'.format(
        project=project, revision=revision)
    index = taskcluster.Index()
    return index.findTask(decision_task_route)['taskId']
Esempio n. 16
0
 def _ensure_taskcluster_connection(self, taskcluster_url):
     index = taskcluster.Index(options={'baseUrl': taskcluster_url})
     # This will raise a subclass of TaskclusterFailure if things go wrong.
     index.ping()
Esempio n. 17
0
def find_task_id_from_route(route):
    index = taskcluster.Index()
    return index.findTask(route)["taskId"]
Esempio n. 18
0
import urllib.request
import yaml
from cib import createTask, diskImageManifestHasChanged, machineImageManifestHasChanged, machineImageExists
from azure.identity import ClientSecretCredential
from azure.mgmt.compute import ComputeManagementClient


def extract_pools(config_path):
    return map(lambda p: '{}/{}'.format(p['domain'], p['variant']), yaml.safe_load(open(config_path, 'r'))['manager']['pool'])


taskclusterOptions = { 'rootUrl': os.environ['TASKCLUSTER_PROXY_URL'] }

auth = taskcluster.Auth(taskclusterOptions)
queue = taskcluster.Queue(taskclusterOptions)
index = taskcluster.Index(taskcluster.optionsFromEnvironment())
secrets = taskcluster.Secrets(taskclusterOptions)

secret = secrets.get('project/relops/image-builder/dev')['secret']

azureDeployment = 'azure_gamma'# if 'stage.taskcluster.nonprod' in os.environ['TASKCLUSTER_ROOT_URL'] else 'azure_alpha'
platformClient = {
    'azure': ComputeManagementClient(
        ClientSecretCredential(
            tenant_id=secret[azureDeployment]['tenant_id'],
            client_id=secret[azureDeployment]['app_id'],
            client_secret=secret[azureDeployment]['password']),
        secret[azureDeployment]['subscription_id'])
}

commitSha = os.getenv('GITHUB_HEAD_SHA')
Esempio n. 19
0
#!/usr/bin/python

import taskcluster
import os

index = taskcluster.Index({'baseUrl': 'http://taskcluster/index'})
queue = taskcluster.Queue()

# TESTING
ns = index.listNamespaces('project.nss-nspr.canary-harvester-test', {})
print('namespaces: ' + str(ns))

now = taskcluster.fromNow("0 seconds").strftime('%Y-%m-%d-%H-%M-%S')
taskId = os.environ["TASK_ID"]
namespace = "index.project.nss-nspr.canary-harvester-test." + now
task = queue.task(taskId)
print(str(task))
payload = {
    "taskId": taskId,
    "rank": 0,
    "data": {
        "desc": "canary harvester test"
    },
    "expires": task["expires"]
}
#index.insertTask(namespace, payload)
import os
import slugid
import taskcluster
import yaml
from cib import createTask, diskImageManifestHasChanged, machineImageManifestHasChanged, machineImageExists
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient

taskclusterOptions = {'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']}

auth = taskcluster.Auth(taskclusterOptions)
queue = taskcluster.Queue(taskclusterOptions)
index = taskcluster.Index(taskclusterOptions)
secrets = taskcluster.Secrets(taskclusterOptions)

secret = secrets.get('project/relops/image-builder/dev')['secret']

platformClient = {
    'azure':
    ComputeManagementClient(
        ServicePrincipalCredentials(client_id=secret['azure']['id'],
                                    secret=secret['azure']['key'],
                                    tenant=secret['azure']['account']),
        secret['azure']['subscription'])
}

commitSha = os.getenv('GITHUB_HEAD_SHA')
taskGroupId = os.getenv('TASK_ID')
print('debug: auth.currentScopes')
print(auth.currentScopes())
azurePurgeTaskId = slugid.nice()
Esempio n. 21
0
def find_decision_task_id(project, revision):
    decision_task_route = '{trust_domain}.v2.{project}.revision.{revision}.taskgraph.decision'.format(
        trust_domain=get_trust_domain(project), project=project, revision=revision)
    index = taskcluster.Index()
    return index.findTask(decision_task_route)['taskId']