예제 #1
0
def cache_task(config, tasks):
    if taskgraph.fast:
        for task in tasks:
            yield task
        return

    digests = {}
    for task in config.kind_dependencies_tasks.values():
        if 'cached_task' in task.attributes:
            digests[task.label] = format_task_digest(task.attributes['cached_task'])

    for task in order_tasks(config, tasks):
        cache = task.pop('cache', None)
        if cache is None:
            yield task
            continue

        dependency_digests = []
        for p in task.get('dependencies', {}).values():
            if p in digests:
                dependency_digests.append(digests[p])
            else:
                raise Exception('Cached task {} has uncached parent task: {}'.format(
                    task['label'], p))
        digest_data = cache['digest-data'] + sorted(dependency_digests)
        add_optimization(config, task, cache_type=cache['type'],
                         cache_name=cache['name'], digest_data=digest_data)
        digests[task['label']] = format_task_digest(task['attributes']['cached_task'])

        yield task
예제 #2
0
def windows_toolchain(config, job, taskdesc):
    run = job['run']

    worker = taskdesc['worker']

    worker['artifacts'] = [{
        'path': r'public\build',
        'type': 'directory',
    }]
    worker['chain-of-trust'] = True

    docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)

    env = worker['env']
    env.update({
        'MOZ_BUILD_DATE': config.params['moz_build_date'],
        'MOZ_SCM_LEVEL': config.params['level'],
        'MOZ_AUTOMATION': '1',
    })

    hg = r'c:\Program Files\Mercurial\hg.exe'
    hg_command = ['"{}"'.format(hg)]
    hg_command.append('robustcheckout')
    hg_command.extend(['--sharebase', 'y:\\hg-shared'])
    hg_command.append('--purge')
    hg_command.extend(['--upstream', 'https://hg.mozilla.org/mozilla-unified'])
    hg_command.extend(['--revision', '%GECKO_HEAD_REV%'])
    hg_command.append('%GECKO_HEAD_REPOSITORY%')
    hg_command.append('.\\build\\src')

    # Use `mach` to invoke python scripts so in-tree libraries are available.
    if run['script'].endswith('.py'):
        raise NotImplementedError("Python scripts don't work on Windows")

    args = run.get('arguments', '')
    if args:
        args = ' ' + shell_quote(*args)

    bash = r'c:\mozilla-build\msys\bin\bash'
    worker['command'] = [
        ' '.join(hg_command),
        # do something intelligent.
        r'{} build/src/taskcluster/scripts/misc/{}{}'.format(
            bash, run['script'], args)
    ]

    attributes = taskdesc.setdefault('attributes', {})
    attributes['toolchain-artifact'] = run['toolchain-artifact']
    if 'toolchain-alias' in run:
        attributes['toolchain-alias'] = run['toolchain-alias']

    if not taskgraph.fast:
        name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
        add_optimization(
            config,
            taskdesc,
            cache_type=CACHE_TYPE,
            cache_name=name,
            digest_data=get_digest_data(config, run, taskdesc),
        )
예제 #3
0
def windows_toolchain(config, job, taskdesc):
    run = job['run']

    worker = taskdesc['worker']

    worker['artifacts'] = [{
        'path': r'public\build',
        'type': 'directory',
    }]
    worker['chain-of-trust'] = True

    docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)

    env = worker['env']
    env.update({
        'MOZ_BUILD_DATE': config.params['moz_build_date'],
        'MOZ_SCM_LEVEL': config.params['level'],
        'MOZ_AUTOMATION': '1',
    })

    hg = r'c:\Program Files\Mercurial\hg.exe'
    hg_command = ['"{}"'.format(hg)]
    hg_command.append('robustcheckout')
    hg_command.extend(['--sharebase', 'y:\\hg-shared'])
    hg_command.append('--purge')
    hg_command.extend(['--upstream', 'https://hg.mozilla.org/mozilla-unified'])
    hg_command.extend(['--revision', '%GECKO_HEAD_REV%'])
    hg_command.append('%GECKO_HEAD_REPOSITORY%')
    hg_command.append('.\\build\\src')

    # Use `mach` to invoke python scripts so in-tree libraries are available.
    if run['script'].endswith('.py'):
        raise NotImplementedError("Python scripts don't work on Windows")

    args = run.get('arguments', '')
    if args:
        args = ' ' + shell_quote(*args)

    bash = r'c:\mozilla-build\msys\bin\bash'
    worker['command'] = [
        ' '.join(hg_command),
        # do something intelligent.
        r'{} build/src/taskcluster/scripts/misc/{}{}'.format(
            bash, run['script'], args)
    ]

    attributes = taskdesc.setdefault('attributes', {})
    attributes['toolchain-artifact'] = run['toolchain-artifact']
    if 'toolchain-alias' in run:
        attributes['toolchain-alias'] = run['toolchain-alias']

    if not taskgraph.fast:
        name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
        add_optimization(
            config, taskdesc,
            cache_type=CACHE_TYPE,
            cache_name=name,
            digest_data=get_digest_data(config, run, taskdesc),
        )
예제 #4
0
def windows_toolchain(config, job, taskdesc):
    run = job['run']

    worker = taskdesc['worker']

    worker['artifacts'] = [{
        'path': r'public\build',
        'type': 'directory',
    }]
    worker['chain-of-trust'] = True

    support_vcs_checkout(config, job, taskdesc)

    env = worker['env']
    env.update({
        'MOZ_BUILD_DATE': config.params['moz_build_date'],
        'MOZ_SCM_LEVEL': config.params['level'],
        'MOZ_AUTOMATION': '1',
    })

    hg_command = generic_worker_hg_commands(
        'https://hg.mozilla.org/mozilla-unified',
        env['GECKO_HEAD_REPOSITORY'],
        env['GECKO_HEAD_REV'],
        r'.\build\src')[0]

    # Use `mach` to invoke python scripts so in-tree libraries are available.
    if run['script'].endswith('.py'):
        raise NotImplementedError("Python scripts don't work on Windows")

    args = run.get('arguments', '')
    if args:
        args = ' ' + shell_quote(*args)

    bash = r'c:\mozilla-build\msys\bin\bash'
    worker['command'] = [
        hg_command,
        # do something intelligent.
        r'{} build/src/taskcluster/scripts/misc/{}{}'.format(
            bash, run['script'], args)
    ]

    attributes = taskdesc.setdefault('attributes', {})
    attributes['toolchain-artifact'] = run['toolchain-artifact']
    if 'toolchain-alias' in run:
        attributes['toolchain-alias'] = run['toolchain-alias']

    if not taskgraph.fast:
        name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
        add_optimization(
            config, taskdesc,
            cache_type=CACHE_TYPE,
            cache_name=name,
            digest_data=get_digest_data(config, run, taskdesc),
        )
예제 #5
0
def cache_task(config, tasks):
    if taskgraph.fast:
        for task in tasks:
            yield task
        return

    digests = {}
    for task in config.kind_dependencies_tasks.values():
        if ("cached_task" in task.attributes
                and task.attributes["cached_task"] is not False):
            digests[task.label] = format_task_digest(
                task.attributes["cached_task"])

    for task in order_tasks(config, tasks):
        cache = task.pop("cache", None)
        if cache is None:
            yield task
            continue

        dependency_digests = []
        for p in task.get("dependencies", {}).values():
            if p in digests:
                dependency_digests.append(digests[p])
            else:
                raise Exception(
                    "Cached task {} has uncached parent task: {}".format(
                        task["label"], p))
        digest_data = cache["digest-data"] + sorted(dependency_digests)
        add_optimization(
            config,
            task,
            cache_type=cache["type"],
            cache_name=cache["name"],
            digest_data=digest_data,
        )
        digests[task["label"]] = format_task_digest(
            task["attributes"]["cached_task"])

        yield task
예제 #6
0
def fill_template(config, tasks):
    available_packages = {}
    for task in config.kind_dependencies_tasks:
        if task.kind != 'packages':
            continue
        name = task.label.replace('packages-', '')
        for route in task.task.get('routes', []):
            if route.startswith('index.') and '.hash.' in route:
                # Only keep the hash part of the route.
                h = route.rsplit('.', 1)[1]
                assert DIGEST_RE.match(h)
                available_packages[name] = h
                break

    context_hashes = {}

    for task in order_image_tasks(config, tasks):
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        definition = task.pop('definition', image_name)
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if p not in available_packages:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        # Generating the context hash relies on arguments being set, so we
        # set this now, although it's not the final value (it's a
        # task-reference value, see further below). We add the package routes
        # containing a hash to get the overall docker image hash, so changes
        # to packages will be reflected in the docker image hash.
        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)
        if parent:
            args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(
                parent, context_hashes[parent])

        context_path = os.path.join('taskcluster', 'docker', definition)
        context_hash = generate_context_hash(GECKO, context_path, image_name,
                                             args)
        digest_data = [context_hash]
        context_hashes[image_name] = context_hash

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label':
            'build-docker-image-' + image_name,
            'description':
            description,
            'attributes': {
                'image_name': image_name
            },
            'expires-after':
            '28 days' if config.params.is_try() else '1 year',
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type':
            'aws-provisioner-v1/gecko-{}-images'.format(
                config.params['level']),
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust':
                True,
                'docker-in-docker':
                True,
                'taskcluster-proxy':
                True,
                'max-run-time':
                7200,
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ['funsize-update-generator']:
            taskdesc['worker']['retry-exit-status'] = [-1]

        worker = taskdesc['worker']

        # We use the in-tree image_builder image to build docker images, but
        # that can't be used to build the image_builder image itself,
        # obviously. So we fall back to the last snapshot of the image that
        # was uploaded to docker hub.
        if image_name == 'image_builder':
            worker['docker-image'] = 'taskcluster/image_builder@sha256:' + \
                '24ce54a1602453bc93515aecd9d4ad25a22115fbc4b209ddb5541377e9a37315'
            # Keep in sync with the Dockerfile used to generate the
            # docker image whose digest is referenced above.
            worker['volumes'] = [
                '/builds/worker/checkouts',
                '/builds/worker/workspace',
            ]
            cache_name = 'imagebuilder-v1'
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
            # Force images built against the in-tree image builder to
            # have a different digest by adding a fixed string to the
            # hashed data.
            digest_data.append('image_builder')

        worker['caches'] = [{
            'type':
            'persistent',
            'name':
            'level-{}-{}'.format(config.params['level'], cache_name),
            'mount-point':
            '/builds/worker/checkouts',
        }]

        for k, v in args.items():
            if k == 'DOCKER_IMAGE_PACKAGES':
                worker['env'][k] = {'task-reference': v}
            else:
                worker['env'][k] = v

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)
                digest_data.append(available_packages[p])

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps[parent] = 'build-docker-image-{}'.format(parent)
            worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
                'task-reference': '<{}>'.format(parent)
            }

        if len(digest_data) > 1:
            kwargs = {'digest_data': digest_data}
        else:
            kwargs = {'digest': digest_data[0]}
        add_optimization(config,
                         taskdesc,
                         cache_type="docker-images.v1",
                         cache_name=image_name,
                         **kwargs)

        yield taskdesc
예제 #7
0
def docker_worker_toolchain(config, job, taskdesc):
    run = job['run']

    worker = taskdesc['worker']
    worker['chain-of-trust'] = True

    # Allow the job to specify where artifacts come from, but add
    # public/build if it's not there already.
    artifacts = worker.setdefault('artifacts', [])
    if not any(
            artifact.get('name') == 'public/build' for artifact in artifacts):
        docker_worker_add_public_artifacts(config, job, taskdesc)

    docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)
    support_vcs_checkout(config, job, taskdesc, sparse=True)

    env = worker['env']
    env.update({
        'MOZ_BUILD_DATE': config.params['moz_build_date'],
        'MOZ_SCM_LEVEL': config.params['level'],
        'TOOLS_DISABLE': 'true',
        'MOZ_AUTOMATION': '1',
    })

    if run['tooltool-downloads']:
        internal = run['tooltool-downloads'] == 'internal'
        docker_worker_add_tooltool(config, job, taskdesc, internal=internal)

    # Use `mach` to invoke python scripts so in-tree libraries are available.
    if run['script'].endswith('.py'):
        wrapper = 'workspace/build/src/mach python '
    else:
        wrapper = ''

    args = run.get('arguments', '')
    if args:
        args = ' ' + shell_quote(*args)

    sparse_profile = []
    if run.get('sparse-profile'):
        sparse_profile = [
            '--sparse-profile',
            'build/sparse-profiles/{}'.format(run['sparse-profile'])
        ]

    worker['command'] = [
        '/builds/worker/bin/run-task',
        '--vcs-checkout=/builds/worker/workspace/build/src',
    ] + sparse_profile + [
        '--', 'bash', '-c', 'cd /builds/worker && '
        '{}workspace/build/src/taskcluster/scripts/misc/{}{}'.format(
            wrapper, run['script'], args)
    ]

    attributes = taskdesc.setdefault('attributes', {})
    attributes['toolchain-artifact'] = run['toolchain-artifact']
    if 'toolchain-alias' in run:
        attributes['toolchain-alias'] = run['toolchain-alias']

    if not taskgraph.fast:
        name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
        add_optimization(
            config,
            taskdesc,
            cache_type=CACHE_TYPE,
            cache_name=name,
            digest_data=get_digest_data(config, run, taskdesc),
        )
예제 #8
0
def docker_worker_debian_package(config, job, taskdesc):
    run = job['run']

    name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)

    docker_repo = 'debian'
    arch = run.get('arch', 'amd64')
    if arch != 'amd64':
        docker_repo = '{}/{}'.format(arch, docker_repo)

    worker = taskdesc['worker']
    worker['artifacts'] = []
    worker['docker-image'] = '{repo}:{dist}-{date}'.format(
        repo=docker_repo, dist=run['dist'], date=run['snapshot'][:8])

    add_artifacts(config, job, taskdesc, path='/tmp/artifacts')

    env = worker.setdefault('env', {})
    env['DEBFULLNAME'] = 'Mozilla build team'
    env['DEBEMAIL'] = '*****@*****.**'

    if 'dsc' in run:
        src = run['dsc']
        unpack = 'dpkg-source -x {src_file} {package}'
        package_re = DSC_PACKAGE_RE
    elif 'tarball' in run:
        src = run['tarball']
        unpack = ('mkdir {package} && '
                  'tar -C {package} -axf {src_file} --strip-components=1')
        package_re = SOURCE_PACKAGE_RE
    else:
        raise RuntimeError('Unreachable')
    src_url = src['url']
    src_file = os.path.basename(src_url)
    src_sha256 = src['sha256']
    package = package_re.match(src_file).group(0)
    unpack = unpack.format(src_file=src_file, package=package)

    base_deps = [
        'apt-utils',
        'build-essential',
        'devscripts',
        'fakeroot',
    ]

    resolver = run.get('resolver', 'apt-get')
    if resolver == 'apt-get':
        resolver = 'apt-get -yyq --no-install-recommends'
    elif resolver == 'aptitude':
        resolver = ('aptitude -y --without-recommends -o '
                    'Aptitude::ProblemResolver::Hints::KeepBuildDeps='
                    '"reject {}-build-deps :UNINST"').format(package)
        base_deps.append('aptitude')
    else:
        raise RuntimeError('Unreachable')

    adjust = ''
    if 'patch' in run:
        # We can't depend on docker images, so we don't have robustcheckout or
        # or run-task to get a checkout. So for this one file we'd need
        # from a checkout, download it.
        env['PATCH_URL'] = '{head_repo}/raw-file/{head_rev}/build/debian-packages/{patch}'.format(
            head_repo=config.params['head_repository'],
            head_rev=config.params['head_rev'],
            patch=run['patch'],
        )
        adjust += 'curl -sL $PATCH_URL | patch -p1 && '
    if 'pre-build-command' in run:
        adjust += run['pre-build-command'] + ' && '
    if 'tarball' in run:
        adjust += 'mv ../{src_file} ../{package}_{ver}.orig.tar.gz && '.format(
            src_file=src_file,
            package=package,
            ver=
            '$(dpkg-parsechangelog | awk \'$1=="Version:"{print $2}\' | cut -f 1 -d -)',
        )
    if 'patch' not in run and 'pre-build-command' not in run:
        adjust += ('debchange -l ".{prefix}moz" --distribution "{dist}"'
                   ' "Mozilla backport for {dist}." < /dev/null && ').format(
                       prefix=name.split('-', 1)[0],
                       dist=run['dist'],
                   )

    # We can't depend on docker images (since docker images depend on packages),
    # so we inline the whole script here.
    worker['command'] = [
        'sh',
        '-x',
        '-c',
        # Fill /etc/apt/sources.list with the relevant snapshot repository.
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist} main" > /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist}-updates main" >> /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist}-backports main" >> /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian-security'
        '/{snapshot}/ {dist}/updates main" >> /etc/apt/sources.list && '
        'apt-get update -o Acquire::Check-Valid-Until=false -q && '
        # Add sources for packages coming from other package tasks.
        'apt-get install -yyq apt-transport-https ca-certificates && '
        'for task in $PACKAGES; do '
        '  echo "deb [trusted=yes] https://queue.taskcluster.net/v1/task'
        '/$task/artifacts/public/build/ debian/" '
        '>> /etc/apt/sources.list; '
        'done && '
        # Install the base utilities required to build debian packages.
        'apt-get update -o Acquire::Check-Valid-Until=false -q && '
        'apt-get install -yyq {base_deps} && '
        'cd /tmp && '
        # Get, validate and extract the package source.
        'dget -d -u {src_url} && '
        'echo "{src_sha256}  {src_file}" | sha256sum -c && '
        '{unpack} && '
        'cd {package} && '
        # Optionally apply patch and/or pre-build command.
        '{adjust}'
        # Install the necessary build dependencies.
        'mk-build-deps -i -r debian/control -t \'{resolver}\' && '
        # Build the package
        'DEB_BUILD_OPTIONS="parallel=$(nproc) nocheck" dpkg-buildpackage && '
        # Copy the artifacts
        'mkdir -p {artifacts}/debian && '
        'dcmd cp ../{package}_*.changes {artifacts}/debian/ && '
        'cd {artifacts} && '
        # Make the artifacts directory usable as an APT repository.
        'apt-ftparchive sources debian | gzip -c9 > debian/Sources.gz && '
        'apt-ftparchive packages debian | gzip -c9 > debian/Packages.gz'.
        format(
            package=package,
            snapshot=run['snapshot'],
            dist=run['dist'],
            src_url=src_url,
            src_file=src_file,
            src_sha256=src_sha256,
            unpack=unpack,
            adjust=adjust,
            artifacts='/tmp/artifacts',
            base_deps=' '.join(base_deps),
            resolver=resolver,
        )
    ]

    # Use the command generated above as the base for the index hash.
    # We rely on it not varying depending on the head_repository or head_rev.
    data = list(worker['command'])
    if 'patch' in run:
        data.append(
            hash_path(
                os.path.join(GECKO, 'build', 'debian-packages', run['patch'])))

    if docker_repo != 'debian':
        data.append(docker_repo)

    if run.get('packages'):
        env = worker.setdefault('env', {})
        env['PACKAGES'] = {
            'task-reference':
            ' '.join('<{}>'.format(p) for p in run['packages'])
        }
        deps = taskdesc.setdefault('dependencies', {})
        for p in run['packages']:
            deps[p] = 'packages-{}'.format(p)
            data.append(p)

    add_optimization(config,
                     taskdesc,
                     cache_type='packages.v1',
                     cache_name=name,
                     digest_data=data)
예제 #9
0
def fill_template(config, tasks):
    available_packages = {}
    for task in config.kind_dependencies_tasks:
        if task.kind != 'packages':
            continue
        name = task.label.replace('packages-', '')
        for route in task.task.get('routes', []):
            if route.startswith('index.') and '.hash.' in route:
                # Only keep the hash part of the route.
                h = route.rsplit('.', 1)[1]
                assert DIGEST_RE.match(h)
                available_packages[name] = h
                break

    context_hashes = {}

    for task in order_image_tasks(config, tasks):
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        definition = task.pop('definition', image_name)
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if p not in available_packages:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        # Generating the context hash relies on arguments being set, so we
        # set this now, although it's not the final value (it's a
        # task-reference value, see further below). We add the package routes
        # containing a hash to get the overall docker image hash, so changes
        # to packages will be reflected in the docker image hash.
        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)
        if parent:
            args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(parent, context_hashes[parent])

        context_path = os.path.join('taskcluster', 'docker', definition)
        context_hash = generate_context_hash(
            GECKO, context_path, image_name, args)
        digest_data = [context_hash]
        context_hashes[image_name] = context_hash

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label': 'build-docker-image-' + image_name,
            'description': description,
            'attributes': {'image_name': image_name},
            'expires-after': '28 days' if config.params.is_try() else '1 year',
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type': 'aws-provisioner-v1/gecko-{}-images'.format(
                config.params['level']),
            'worker': {
                'implementation': 'docker-worker',
                'os': 'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust': True,
                'docker-in-docker': True,
                'taskcluster-proxy': True,
                'max-run-time': 7200,
            },
        }

        worker = taskdesc['worker']

        # We use the in-tree image_builder image to build docker images, but
        # that can't be used to build the image_builder image itself,
        # obviously. So we fall back to the last snapshot of the image that
        # was uploaded to docker hub.
        if image_name == 'image_builder':
            worker['docker-image'] = 'taskcluster/image_builder@sha256:' + \
                '24ce54a1602453bc93515aecd9d4ad25a22115fbc4b209ddb5541377e9a37315'
            # Keep in sync with the Dockerfile used to generate the
            # docker image whose digest is referenced above.
            worker['volumes'] = [
                '/builds/worker/checkouts',
                '/builds/worker/workspace',
            ]
            cache_name = 'imagebuilder-v1'
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
            # Force images built against the in-tree image builder to
            # have a different digest by adding a fixed string to the
            # hashed data.
            digest_data.append('image_builder')

        worker['caches'] = [{
            'type': 'persistent',
            'name': 'level-{}-{}'.format(config.params['level'], cache_name),
            'mount-point': '/builds/worker/checkouts',
        }]

        for k, v in args.items():
            if k == 'DOCKER_IMAGE_PACKAGES':
                worker['env'][k] = {'task-reference': v}
            else:
                worker['env'][k] = v

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)
                digest_data.append(available_packages[p])

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps[parent] = 'build-docker-image-{}'.format(parent)
            worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
                'task-reference': '<{}>'.format(parent)
            }

        if len(digest_data) > 1:
            kwargs = {'digest_data': digest_data}
        else:
            kwargs = {'digest': digest_data[0]}
        add_optimization(
            config, taskdesc,
            cache_type="docker-images.v1",
            cache_name=image_name,
            **kwargs
        )

        yield taskdesc
예제 #10
0
def docker_worker_debian_package(config, job, taskdesc):
    run = job['run']

    name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)

    worker = taskdesc['worker']
    worker['artifacts'] = []
    worker['docker-image'] = 'debian:{dist}-{date}'.format(
        dist=run['dist'],
        date=run['snapshot'][:8])

    add_public_artifacts(config, job, taskdesc, path='/tmp/artifacts')

    env = worker.setdefault('env', {})
    env['DEBFULLNAME'] = 'Mozilla build team'
    env['DEBEMAIL'] = '*****@*****.**'

    if 'dsc' in run:
        src = run['dsc']
        unpack = 'dpkg-source -x {src_file} {package}'
        package_re = DSC_PACKAGE_RE
    elif 'tarball' in run:
        src = run['tarball']
        unpack = ('mkdir {package} && '
                  'tar -C {package} -axf {src_file} --strip-components=1')
        package_re = SOURCE_PACKAGE_RE
    else:
        raise RuntimeError('Unreachable')
    src_url = src['url']
    src_file = os.path.basename(src_url)
    src_sha256 = src['sha256']
    package = package_re.match(src_file).group(0)
    unpack = unpack.format(src_file=src_file, package=package)

    base_deps = [
        'apt-utils',
        'build-essential',
        'devscripts',
        'fakeroot',
    ]

    resolver = run.get('resolver', 'apt-get')
    if resolver == 'apt-get':
        resolver = 'apt-get -yyq --no-install-recommends'
    elif resolver == 'aptitude':
        resolver = ('aptitude -y --without-recommends -o '
                    'Aptitude::ProblemResolver::Hints::KeepBuildDeps='
                    '"reject {}-build-deps :UNINST"').format(package)
        base_deps.append('aptitude')
    else:
        raise RuntimeError('Unreachable')

    adjust = ''
    if 'patch' in run:
        # We can't depend on docker images, so we don't have robustcheckout or
        # or run-task to get a checkout. So for this one file we'd need
        # from a checkout, download it.
        env['PATCH_URL'] = '{head_repo}/raw-file/{head_rev}/build/debian-packages/{patch}'.format(
            head_repo=config.params['head_repository'],
            head_rev=config.params['head_rev'],
            patch=run['patch'],
        )
        adjust += 'curl -sL $PATCH_URL | patch -p1 && '
    if 'pre-build-command' in run:
        adjust += run['pre-build-command'] + ' && '
    if 'tarball' in run:
        adjust += 'mv ../{src_file} ../{package}_{ver}.orig.tar.gz && '.format(
            src_file=src_file,
            package=package,
            ver='$(dpkg-parsechangelog | awk \'$1=="Version:"{print $2}\' | cut -f 1 -d -)',
        )
    if 'patch' not in run and 'pre-build-command' not in run:
        adjust += ('debchange -l ".{prefix}moz" --distribution "{dist}"'
                   ' "Mozilla backport for {dist}." < /dev/null && ').format(
            prefix=name.split('-', 1)[0],
            dist=run['dist'],
        )

    # We can't depend on docker images (since docker images depend on packages),
    # so we inline the whole script here.
    worker['command'] = [
        'sh',
        '-x',
        '-c',
        # Fill /etc/apt/sources.list with the relevant snapshot repository.
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist} main" > /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist}-updates main" >> /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian'
        '/{snapshot}/ {dist}-backports main" >> /etc/apt/sources.list && '
        'echo "deb http://snapshot.debian.org/archive/debian-security'
        '/{snapshot}/ {dist}/updates main" >> /etc/apt/sources.list && '
        'apt-get update -o Acquire::Check-Valid-Until=false -q && '
        # Add sources for packages coming from other package tasks.
        'apt-get install -yyq apt-transport-https ca-certificates && '
        'for task in $PACKAGES; do '
        '  echo "deb [trusted=yes] https://queue.taskcluster.net/v1/task'
        '/$task/runs/0/artifacts/public/build/ debian/" '
        '>> /etc/apt/sources.list; '
        'done && '
        # Install the base utilities required to build debian packages.
        'apt-get update -o Acquire::Check-Valid-Until=false -q && '
        'apt-get install -yyq {base_deps} && '
        'cd /tmp && '
        # Get, validate and extract the package source.
        'dget -d -u {src_url} && '
        'echo "{src_sha256}  {src_file}" | sha256sum -c && '
        '{unpack} && '
        'cd {package} && '
        # Optionally apply patch and/or pre-build command.
        '{adjust}'
        # Install the necessary build dependencies.
        'mk-build-deps -i -r debian/control -t \'{resolver}\' && '
        # Build the package
        'DEB_BUILD_OPTIONS="parallel=$(nproc) nocheck" dpkg-buildpackage && '
        # Copy the artifacts
        'mkdir -p {artifacts}/debian && '
        'dcmd cp ../{package}_*.changes {artifacts}/debian/ && '
        'cd {artifacts} && '
        # Make the artifacts directory usable as an APT repository.
        'apt-ftparchive sources debian | gzip -c9 > debian/Sources.gz && '
        'apt-ftparchive packages debian | gzip -c9 > debian/Packages.gz'
        .format(
            package=package,
            snapshot=run['snapshot'],
            dist=run['dist'],
            src_url=src_url,
            src_file=src_file,
            src_sha256=src_sha256,
            unpack=unpack,
            adjust=adjust,
            artifacts='/tmp/artifacts',
            base_deps=' '.join(base_deps),
            resolver=resolver,
        )
    ]

    # Use the command generated above as the base for the index hash.
    # We rely on it not varying depending on the head_repository or head_rev.
    data = list(worker['command'])
    if 'patch' in run:
        data.append(hash_path(os.path.join(GECKO, 'build', 'debian-packages', run['patch'])))

    if run.get('packages'):
        env = worker.setdefault('env', {})
        env['PACKAGES'] = {
            'task-reference': ' '.join('<{}>'.format(p)
                                       for p in run['packages'])
        }
        deps = taskdesc.setdefault('dependencies', {})
        for p in run['packages']:
            deps[p] = 'packages-{}'.format(p)
            data.append(p)

    add_optimization(config, taskdesc, cache_type='packages.v1',
                     cache_name=name, digest_data=data)
예제 #11
0
def docker_worker_toolchain(config, job, taskdesc):
    run = job['run']

    worker = taskdesc['worker']
    worker['chain-of-trust'] = True

    # Allow the job to specify where artifacts come from, but add
    # public/build if it's not there already.
    artifacts = worker.setdefault('artifacts', [])
    if not any(artifact.get('name') == 'public/build' for artifact in artifacts):
        docker_worker_add_public_artifacts(config, job, taskdesc)

    docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)
    support_vcs_checkout(config, job, taskdesc, sparse=True)

    env = worker['env']
    env.update({
        'MOZ_BUILD_DATE': config.params['moz_build_date'],
        'MOZ_SCM_LEVEL': config.params['level'],
        'TOOLS_DISABLE': 'true',
        'MOZ_AUTOMATION': '1',
    })

    if run['tooltool-downloads']:
        internal = run['tooltool-downloads'] == 'internal'
        docker_worker_add_tooltool(config, job, taskdesc, internal=internal)

    # Use `mach` to invoke python scripts so in-tree libraries are available.
    if run['script'].endswith('.py'):
        wrapper = 'workspace/build/src/mach python '
    else:
        wrapper = ''

    args = run.get('arguments', '')
    if args:
        args = ' ' + shell_quote(*args)

    sparse_profile = []
    if run.get('sparse-profile'):
        sparse_profile = ['--sparse-profile',
                          'build/sparse-profiles/{}'.format(run['sparse-profile'])]

    worker['command'] = [
        '/builds/worker/bin/run-task',
        '--vcs-checkout=/builds/worker/workspace/build/src',
    ] + sparse_profile + [
        '--',
        'bash',
        '-c',
        'cd /builds/worker && '
        '{}workspace/build/src/taskcluster/scripts/misc/{}{}'.format(
            wrapper, run['script'], args)
    ]

    attributes = taskdesc.setdefault('attributes', {})
    attributes['toolchain-artifact'] = run['toolchain-artifact']
    if 'toolchain-alias' in run:
        attributes['toolchain-alias'] = run['toolchain-alias']

    if not taskgraph.fast:
        name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
        add_optimization(
            config, taskdesc,
            cache_type=CACHE_TYPE,
            cache_name=name,
            digest_data=get_digest_data(config, run, taskdesc),
        )
예제 #12
0
def process_thirdparty_build(config, jobs):
    """
    Set up a thirdparty library build, caching the built artifacts.
    """
    for job in jobs:
        name = job['name']
        thirdparty = job['thirdparty']

        artifact_name = thirdparty['artifact']

        script = os.path.join(COMM_SCRIPTS, thirdparty['script'])
        args = thirdparty.get('args', [])

        command = [script] + args

        task = make_base_task(config, name, job, script, command)
        merge_to(job['index'], task['index'])
        merge_to(job['treeherder'], task['treeherder'])
        merge_to(job['worker'], task['worker'])

        if 'run' in job:
            merge_to(job['run'], task['run'])
        if 'when' in job:
            merge_to(job['when'], task['when'])
        if 'toolchain' in job:
            task['fetches']['toolchain'] = job['toolchain']

        when = task.pop('when')
        if 'when' in job:
            merge_to(job['when'], when)

        # The files-changed optimization is not actually used because it
        # conflicts with the indexing optimization, but the same list of files
        # is used to look up the revision with the most recent changes in
        # order to calculate a hash for the index.
        files_changed = when['files-changed']

        task['worker'].setdefault('artifacts', []).append({
            'name': 'public/build',
            'path': '/builds/worker/artifacts',
            'type': 'directory',
        })

        if not taskgraph.fast:
            project = config.params['project']

            # Get the most recent revision with changes. files-changed paths
            # are relative to GECKO, so strip 'comm/' off first.
            files_changed = frozenset(map(lambda p: strip_comm_prefix(p), files_changed))
            last_changed_rev = get_last_modified_revision(COMM, files_changed)
            logger.info("Using artifact from rev {}.".format(last_changed_rev))

            cache_name = task['label'].replace('{}-'.format(config.kind), '', 1)

            # This adds the level to the index path automatically.
            add_optimization(
                config,
                task,
                cache_type=CACHE_TYPE,
                cache_name=cache_name,
                # Digest is based on the repo name and revision
                digest_data=command + [project, last_changed_rev, artifact_name],
            )

        yield task