コード例 #1
0
def load_image_by_name(image_name, tag=None):
    context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name)
    context_hash = docker.generate_context_hash(GECKO, context_path, image_name)

    index_path = DOCKER_INDEX.format('level-3', image_name, context_hash)
    task_id = find_task_id(index_path)

    return load_image_by_task_id(task_id, tag)
コード例 #2
0
ファイル: docker.py プロジェクト: lazyparser/gecko-dev
def load_image_by_name(image_name, tag=None):
    context_path = os.path.join(GECKO, 'testing', 'docker', image_name)
    context_hash = docker.generate_context_hash(GECKO, context_path, image_name)

    image_index_url = INDEX_URL.format('level-3', image_name, context_hash)
    print("Fetching", image_index_url)
    task = json.load(urllib2.urlopen(image_index_url))

    return load_image_by_task_id(task['taskId'], tag)
コード例 #3
0
ファイル: docker.py プロジェクト: danhuang1202/gecko-central
def load_image_by_name(image_name):
    context_path = os.path.join(GECKO, 'testing', 'docker', image_name)
    context_hash = docker.generate_context_hash(context_path)

    image_index_url = INDEX_URL.format('mozilla-central', image_name,
                                       context_hash)
    print("Fetching", image_index_url)
    task = json.load(urllib2.urlopen(image_index_url))

    return load_image_by_task_id(task['taskId'])
コード例 #4
0
ファイル: docker.py プロジェクト: interwebLT/browser-f
def load_image_by_name(image_name, tag=None):
    context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name)
    context_hash = docker.generate_context_hash(GECKO, context_path,
                                                image_name)

    image_index_url = INDEX_URL.format('level-3', image_name, context_hash)
    print("Fetching", image_index_url)
    task = json.load(urllib2.urlopen(image_index_url))

    return load_image_by_task_id(task['taskId'], tag)
コード例 #5
0
    def load_tasks(cls, kind, path, config, params, loaded_tasks):
        parameters = {
            'pushlog_id': params.get('pushlog_id', 0),
            'pushdate': params['moz_build_date'],
            'pushtime': params['moz_build_date'][8:],
            'year': params['moz_build_date'][0:4],
            'month': params['moz_build_date'][4:6],
            'day': params['moz_build_date'][6:8],
            'project': params['project'],
            'docker_image': docker_image,
            'base_repository': params['base_repository'] or params['head_repository'],
            'head_repository': params['head_repository'],
            'head_ref': params['head_ref'] or params['head_rev'],
            'head_rev': params['head_rev'],
            'owner': params['owner'],
            'level': params['level'],
            'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
                      .format(repo=params['head_repository'], rev=params['head_rev']),
            'index_image_prefix': INDEX_PREFIX,
            'artifact_path': 'public/image.tar.zst',
        }

        tasks = []
        templates = Templates(path)
        for image_name, image_symbol in config['images'].iteritems():
            context_path = os.path.join('taskcluster', 'docker', image_name)
            context_hash = generate_context_hash(GECKO, context_path, image_name)

            image_parameters = dict(parameters)
            image_parameters['image_name'] = image_name
            image_parameters['context_hash'] = context_hash

            image_task = templates.load('image.yml', image_parameters)
            attributes = {'image_name': image_name}

            # unique symbol for different docker image
            if 'extra' in image_task['task']:
                image_task['task']['extra']['treeherder']['symbol'] = image_symbol

            # As an optimization, if the context hash exists for a high level, that image
            # task ID will be used.  The reasoning behind this is that eventually everything ends
            # up on level 3 at some point if most tasks use this as a common image
            # for a given context hash, a worker within Taskcluster does not need to contain
            # the same image per branch.
            index_paths = ['{}.level-{}.{}.hash.{}'.format(
                                INDEX_PREFIX, level, image_name, context_hash)
                           for level in range(int(params['level']), 4)]

            tasks.append(cls(kind, 'build-docker-image-' + image_name,
                             task=image_task['task'], attributes=attributes,
                             index_paths=index_paths))

        return tasks
コード例 #6
0
ファイル: docker.py プロジェクト: abcwow/gecko-dev
def load_image_by_name(image_name, tag=None):
    context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name)
    context_hash = docker.generate_context_hash(GECKO, context_path, image_name)

    index_path = cached_index_path(
        trust_domain='gecko',
        level=3,
        cache_type='docker-images.v1',
        cache_name=image_name,
        digest=context_hash,
    )
    task_id = find_task_id(index_path)

    return load_image_by_task_id(task_id, tag)
コード例 #7
0
ファイル: docker.py プロジェクト: luke-chang/gecko-1
def load_image_by_name(image_name, tag=None):
    context_path = os.path.join(GECKO, 'taskcluster', 'docker', image_name)
    context_hash = docker.generate_context_hash(GECKO, context_path, image_name)

    index_path = cached_index_path(
        trust_domain='gecko',
        level=3,
        cache_type='docker-images.v1',
        cache_name=image_name,
        digest=context_hash,
    )
    task_id = find_task_id(index_path)

    return load_image_by_task_id(task_id, tag)
コード例 #8
0
 def test_generate_context_hash(self):
     tmpdir = tempfile.mkdtemp()
     try:
         os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
         p = os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile')
         with open(p, 'w') as f:
             f.write("FROM node\nADD a-file\n")
         os.chmod(p, MODE_STANDARD)
         p = os.path.join(tmpdir, 'docker', 'my-image', 'a-file')
         with open(p, 'w') as f:
             f.write("data\n")
         os.chmod(p, MODE_STANDARD)
         self.assertEqual(
             docker.generate_context_hash(
                 tmpdir, os.path.join(tmpdir, 'docker/my-image'),
                 'my-image'),
             'e61e675ce05e8c11424437db3f1004079374c1a5fe6ad6800346cebe137b0797'
         )
     finally:
         shutil.rmtree(tmpdir)
コード例 #9
0
 def test_generate_context_hash(self):
     tmpdir = tempfile.mkdtemp()
     try:
         os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
         p = os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile')
         with open(p, 'w') as f:
             f.write("FROM node\nADD a-file\n")
         os.chmod(p, MODE_STANDARD)
         p = os.path.join(tmpdir, 'docker', 'my-image', 'a-file')
         with open(p, 'w') as f:
             f.write("data\n")
         os.chmod(p, MODE_STANDARD)
         self.assertEqual(
             docker.generate_context_hash(tmpdir,
                                          os.path.join(tmpdir, 'docker/my-image'),
                                          'my-image'),
             'e1649b3427bd7a0387f4508d25057c2e89228748517aad6c70e3df54f47bd13a'
         )
     finally:
         shutil.rmtree(tmpdir)
コード例 #10
0
 def test_generate_context_hash(self):
     tmpdir = tempfile.mkdtemp()
     old_GECKO = docker.GECKO
     docker.GECKO = tmpdir
     try:
         os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
         p = os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile')
         with open(p, 'w') as f:
             f.write("FROM node\nADD a-file\n")
         os.chmod(p, MODE_STANDARD)
         p = os.path.join(tmpdir, 'docker', 'my-image', 'a-file')
         with open(p, 'w') as f:
             f.write("data\n")
         os.chmod(p, MODE_STANDARD)
         self.assertEqual(
             docker.generate_context_hash(docker.GECKO,
                                          os.path.join(docker.GECKO, 'docker/my-image'),
                                          'my-image'),
             'e61e675ce05e8c11424437db3f1004079374c1a5fe6ad6800346cebe137b0797'
         )
     finally:
         docker.GECKO = old_GECKO
         shutil.rmtree(tmpdir)
コード例 #11
0
 def test_generate_context_hash(self):
     tmpdir = tempfile.mkdtemp()
     try:
         os.makedirs(os.path.join(tmpdir, "docker", "my-image"))
         p = os.path.join(tmpdir, "docker", "my-image", "Dockerfile")
         with open(p, "w") as f:
             f.write("FROM node\nADD a-file\n")
         os.chmod(p, MODE_STANDARD)
         p = os.path.join(tmpdir, "docker", "my-image", "a-file")
         with open(p, "w") as f:
             f.write("data\n")
         os.chmod(p, MODE_STANDARD)
         self.assertEqual(
             docker.generate_context_hash(
                 tmpdir,
                 os.path.join(tmpdir, "docker/my-image"),
                 "my-image",
                 {},
             ),
             "680532a33c845e3b4f8ea8a7bd697da579b647f28c29f7a0a71e51e6cca33983",
         )
     finally:
         shutil.rmtree(tmpdir)
コード例 #12
0
def fill_template(config, tasks):
    available_packages = set()
    for task in config.kind_dependencies_tasks:
        if task.kind != 'packages':
            continue
        name = task.label.replace('packages-', '')
        available_packages.add(name)

    context_hashes = {}

    for task in order_image_tasks(config, tasks):
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        definition = task.pop('definition', image_name)
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if p not in available_packages:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        # Generating the context hash relies on arguments being set, so we
        # set this now, although it's not the final value (it's a
        # task-reference value, see further below). We add the package routes
        # containing a hash to get the overall docker image hash, so changes
        # to packages will be reflected in the docker image hash.
        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)
        if parent:
            args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(
                parent, context_hashes[parent])

        args['TASKCLUSTER_ROOT_URL'] = get_root_url(False)

        if not taskgraph.fast:
            context_path = os.path.join('taskcluster', 'docker', definition)
            context_hash = generate_context_hash(GECKO, context_path,
                                                 image_name, args)
        else:
            context_hash = '0' * 40
        digest_data = [context_hash]
        context_hashes[image_name] = context_hash

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label':
            'build-docker-image-' + image_name,
            'description':
            description,
            'attributes': {
                'image_name': image_name
            },
            'expires-after':
            '28 days' if config.params.is_try() else '1 year',
            'scopes': [
                'secrets:get:project/taskcluster/gecko/hgfingerprint',
                'secrets:get:project/taskcluster/gecko/hgmointernal',
            ],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type':
            'images',
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust':
                True,
                'docker-in-docker':
                True,
                'taskcluster-proxy':
                True,
                'max-run-time':
                7200,
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ['funsize-update-generator']:
            taskdesc['worker']['retry-exit-status'] = [-1]

        worker = taskdesc['worker']

        # We use the in-tree image_builder image to build docker images, but
        # that can't be used to build the image_builder image itself,
        # obviously. So we fall back to an image on docker hub, identified
        # by hash.  After the image-builder image is updated, it's best to push
        # and update this hash as well, to keep image-builder builds up to date.
        if image_name == 'image_builder':
            hash = 'sha256:c6622fd3e5794842ad83d129850330b26e6ba671e39c58ee288a616a3a1c4c73'
            worker['docker-image'] = 'taskcluster/image_builder@' + hash
            # Keep in sync with the Dockerfile used to generate the
            # docker image whose digest is referenced above.
            worker['volumes'] = [
                '/builds/worker/checkouts',
                '/builds/worker/workspace',
            ]
            cache_name = 'imagebuilder-v1'
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
            # Force images built against the in-tree image builder to
            # have a different digest by adding a fixed string to the
            # hashed data.
            # Append to this data whenever the image builder's output behavior
            # is changed, in order to force all downstream images to be rebuilt and
            # cached distinctly.
            digest_data.append('image_builder')
            # Updated for squashing images in Bug 1527394
            digest_data.append('squashing layers')

        worker['caches'] = [{
            'type': 'persistent',
            'name': cache_name,
            'mount-point': '/builds/worker/checkouts',
        }]

        for k, v in args.items():
            if k == 'DOCKER_IMAGE_PACKAGES':
                worker['env'][k] = {'task-reference': v}
            else:
                worker['env'][k] = v

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps['parent'] = 'build-docker-image-{}'.format(parent)
            worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
                'task-reference': '<parent>',
            }
        if 'index' in task:
            taskdesc['index'] = task['index']

        if task.get('cache', True) and not taskgraph.fast:
            taskdesc['cache'] = {
                'type': 'docker-images.v2',
                'name': image_name,
                'digest-data': digest_data,
            }

        yield taskdesc
コード例 #13
0
ファイル: docker_image.py プロジェクト: brendandahl/positron
    def load_tasks(cls, kind, path, config, params, loaded_tasks):
        # TODO: make this match the pushdate (get it from a parameter rather than vcs)
        pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime())

        parameters = {
            'pushlog_id': params.get('pushlog_id', 0),
            'pushdate': pushdate,
            'pushtime': pushdate[8:],
            'year': pushdate[0:4],
            'month': pushdate[4:6],
            'day': pushdate[6:8],
            'project': params['project'],
            'docker_image': docker_image,
            'base_repository': params['base_repository'] or params['head_repository'],
            'head_repository': params['head_repository'],
            'head_ref': params['head_ref'] or params['head_rev'],
            'head_rev': params['head_rev'],
            'owner': params['owner'],
            'level': params['level'],
            'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
                      .format(repo=params['head_repository'], rev=params['head_rev']),
        }

        tasks = []
        templates = Templates(path)
        for image_name in config['images']:
            context_path = os.path.join('testing', 'docker', image_name)
            context_hash = generate_context_hash(context_path)

            image_parameters = dict(parameters)
            image_parameters['context_hash'] = context_hash
            image_parameters['context_path'] = context_path
            image_parameters['artifact_path'] = 'public/image.tar'
            image_parameters['image_name'] = image_name

            image_artifact_path = \
                "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
            if os.environ.get('TASK_ID'):
                destination = os.path.join(
                    os.environ['HOME'],
                    "artifacts/decision_task/image_contexts/{}/context.tar.gz".format(image_name))
                image_parameters['context_url'] = ARTIFACT_URL.format(
                    os.environ['TASK_ID'], image_artifact_path)
                cls.create_context_tar(context_path, destination, image_name)
            else:
                # skip context generation since this isn't a decision task
                # TODO: generate context tarballs using subdirectory clones in
                # the image-building task so we don't have to worry about this.
                image_parameters['context_url'] = 'file:///tmp/' + image_artifact_path

            image_task = templates.load('image.yml', image_parameters)

            attributes = {'image_name': image_name}

            # As an optimization, if the context hash exists for mozilla-central, that image
            # task ID will be used.  The reasoning behind this is that eventually everything ends
            # up on mozilla-central at some point if most tasks use this as a common image
            # for a given context hash, a worker within Taskcluster does not need to contain
            # the same image per branch.
            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(
                                project, image_name, context_hash)
                           for project in ['mozilla-central', params['project']]]

            tasks.append(cls(kind, 'build-docker-image-' + image_name,
                             task=image_task['task'], attributes=attributes,
                             index_paths=index_paths))

        return tasks
コード例 #14
0
ファイル: docker_image.py プロジェクト: crackself/iceweasel
def fill_template(config, tasks):
    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop("name")
        job_symbol = task.pop("symbol")
        args = task.pop("args", {})
        packages = task.pop("packages", [])
        parent = task.pop("parent", None)

        for p in packages:
            if "packages-{}".format(p) not in config.kind_dependencies_tasks:
                raise Exception("Missing package job for {}-{}: {}".format(
                    config.kind, image_name, p))

        if not taskgraph.fast:
            context_path = mozpath.relpath(image_path(image_name), GECKO)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR,
                                            "{}.tar.gz".format(image_name))
                logger.info("Writing {} for docker image {}".format(
                    context_file, image_name))
                context_hash = create_context_tar(GECKO, context_path,
                                                  context_file, image_name,
                                                  args)
            else:
                context_hash = generate_context_hash(GECKO, context_path,
                                                     image_name, args)
        else:
            if config.write_artifacts:
                raise Exception(
                    "Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = "0" * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]

        description = "Build the docker image {} for use by dependent tasks".format(
            image_name)

        args["DOCKER_IMAGE_PACKAGES"] = " ".join("<{}>".format(p)
                                                 for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = "3" if int(config.params["level"]) == 1 else "10"

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            "label": "{}-{}".format(config.kind, image_name),
            "description": description,
            "attributes": {
                "image_name": image_name,
                "artifact_prefix": "public",
            },
            "expires-after": "1 year",
            "scopes": [],
            "treeherder": {
                "symbol": job_symbol,
                "platform": "taskcluster-images/opt",
                "kind": "other",
                "tier": 1,
            },
            "run-on-projects": [],
            "worker-type": "images",
            "worker": {
                "implementation":
                "docker-worker",
                "os":
                "linux",
                "artifacts": [{
                    "type": "file",
                    "path": "/workspace/image.tar.zst",
                    "name": "public/image.tar.zst",
                }],
                "env": {
                    "CONTEXT_TASK_ID": {
                        "task-reference": "<decision>"
                    },
                    "CONTEXT_PATH":
                    "public/docker-contexts/{}.tar.gz".format(image_name),
                    "HASH":
                    context_hash,
                    "PROJECT":
                    config.params["project"],
                    "IMAGE_NAME":
                    image_name,
                    "DOCKER_IMAGE_ZSTD_LEVEL":
                    zstd_level,
                    "DOCKER_BUILD_ARGS": {
                        "task-reference": six.ensure_text(json.dumps(args))
                    },
                    "GECKO_BASE_REPOSITORY":
                    config.params["base_repository"],
                    "GECKO_HEAD_REPOSITORY":
                    config.params["head_repository"],
                    "GECKO_HEAD_REV":
                    config.params["head_rev"],
                },
                "chain-of-trust":
                True,
                "max-run-time":
                7200,
                # FIXME: We aren't currently propagating the exit code
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ["funsize-update-generator"]:
            taskdesc["worker"]["retry-exit-status"] = [-1]

        worker = taskdesc["worker"]

        if image_name == "image_builder":
            worker["docker-image"] = IMAGE_BUILDER_IMAGE
            digest_data.append(
                "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE))
        else:
            worker["docker-image"] = {"in-tree": "image_builder"}
            deps = taskdesc.setdefault("dependencies", {})
            deps["docker-image"] = "{}-image_builder".format(config.kind)

        if packages:
            deps = taskdesc.setdefault("dependencies", {})
            for p in sorted(packages):
                deps[p] = "packages-{}".format(p)

        if parent:
            deps = taskdesc.setdefault("dependencies", {})
            deps["parent"] = "{}-{}".format(config.kind, parent)
            worker["env"]["PARENT_TASK_ID"] = {
                "task-reference": "<parent>",
            }
        if "index" in task:
            taskdesc["index"] = task["index"]

        if task.get("cache", True) and not taskgraph.fast:
            taskdesc["cache"] = {
                "type": "docker-images.v2",
                "name": image_name,
                "digest-data": digest_data,
            }

        yield taskdesc
コード例 #15
0
def fill_template(config, tasks):
    available_packages = set()
    for task in config.kind_dependencies_tasks:
        if task.kind != "packages":
            continue
        name = task.label.replace("packages-", "")
        available_packages.add(name)

    context_hashes = {}

    tasks = list(tasks)

    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop("name")
        job_symbol = task.pop("symbol", None)
        args = task.pop("args", {})
        definition = task.pop("definition", image_name)
        packages = task.pop("packages", [])
        parent = task.pop("parent", None)

        for p in packages:
            if p not in available_packages:
                raise Exception(
                    "Missing package job for {}-{}: {}".format(
                        config.kind, image_name, p
                    )
                )

        if not taskgraph.fast:
            context_path = os.path.join("taskcluster", "docker", definition)
            topsrcdir = os.path.dirname(config.graph_config.taskcluster_yml)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR, f"{image_name}.tar.gz")
                logger.info(f"Writing {context_file} for docker image {image_name}")
                context_hash = create_context_tar(
                    topsrcdir,
                    context_path,
                    context_file,
                    args,
                )
            else:
                context_hash = generate_context_hash(topsrcdir, context_path, args)
        else:
            if config.write_artifacts:
                raise Exception("Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = "0" * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]
        context_hashes[image_name] = context_hash

        description = "Build the docker image {} for use by dependent tasks".format(
            image_name
        )

        args["DOCKER_IMAGE_PACKAGES"] = " ".join(f"<{p}>" for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = "3" if int(config.params["level"]) == 1 else "10"

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            "label": "build-docker-image-" + image_name,
            "description": description,
            "attributes": {
                "image_name": image_name,
                "artifact_prefix": "public",
            },
            "expires-after": "28 days" if config.params.is_try() else "1 year",
            "scopes": [],
            "run-on-projects": [],
            "worker-type": "images",
            "worker": {
                "implementation": "docker-worker",
                "os": "linux",
                "artifacts": [
                    {
                        "type": "file",
                        "path": "/workspace/image.tar.zst",
                        "name": "public/image.tar.zst",
                    }
                ],
                "env": {
                    "CONTEXT_TASK_ID": {"task-reference": "<decision>"},
                    "CONTEXT_PATH": "public/docker-contexts/{}.tar.gz".format(
                        image_name
                    ),
                    "HASH": context_hash,
                    "PROJECT": config.params["project"],
                    "IMAGE_NAME": image_name,
                    "DOCKER_IMAGE_ZSTD_LEVEL": zstd_level,
                    "DOCKER_BUILD_ARGS": {
                        "task-reference": json.dumps(args),
                    },
                    "VCS_BASE_REPOSITORY": config.params["base_repository"],
                    "VCS_HEAD_REPOSITORY": config.params["head_repository"],
                    "VCS_HEAD_REV": config.params["head_rev"],
                    "VCS_REPOSITORY_TYPE": config.params["repository_type"],
                },
                "chain-of-trust": True,
                "max-run-time": 7200,
            },
        }
        if "index" in task:
            taskdesc["index"] = task["index"]
        if job_symbol:
            taskdesc["treeherder"] = {
                "symbol": job_symbol,
                "platform": "taskcluster-images/opt",
                "kind": "other",
                "tier": 1,
            }

        worker = taskdesc["worker"]

        worker["docker-image"] = IMAGE_BUILDER_IMAGE
        digest_data.append(f"image-builder-image:{IMAGE_BUILDER_IMAGE}")

        if packages:
            deps = taskdesc.setdefault("dependencies", {})
            for p in sorted(packages):
                deps[p] = f"packages-{p}"

        if parent:
            deps = taskdesc.setdefault("dependencies", {})
            deps["parent"] = f"build-docker-image-{parent}"
            worker["env"]["PARENT_TASK_ID"] = {
                "task-reference": "<parent>",
            }

        if task.get("cache", True) and not taskgraph.fast:
            taskdesc["cache"] = {
                "type": "docker-images.v2",
                "name": image_name,
                "digest-data": digest_data,
            }

        yield taskdesc
コード例 #16
0
def fill_template(config, tasks):
    for task in tasks:
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')

        context_path = os.path.join('taskcluster', 'docker', image_name)
        context_hash = generate_context_hash(GECKO, context_path, image_name)

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        routes = []
        for tpl in ROUTE_TEMPLATES:
            routes.append(tpl.format(
                index_prefix=INDEX_PREFIX,
                level=config.params['level'],
                image_name=image_name,
                project=config.params['project'],
                head_rev=config.params['head_rev'],
                pushlog_id=config.params.get('pushlog_id', 0),
                pushtime=config.params['moz_build_date'][8:],
                year=config.params['moz_build_date'][0:4],
                month=config.params['moz_build_date'][4:6],
                day=config.params['moz_build_date'][6:8],
                context_hash=context_hash,
            ))

        # As an optimization, if the context hash exists for a high level, that image
        # task ID will be used.  The reasoning behind this is that eventually everything ends
        # up on level 3 at some point if most tasks use this as a common image
        # for a given context hash, a worker within Taskcluster does not need to contain
        # the same image per branch.
        optimizations = [['index-search', '{}.level-{}.{}.hash.{}'.format(
            INDEX_PREFIX, level, image_name, context_hash)]
            for level in reversed(range(int(config.params['level']), 4))]

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label': 'build-docker-image-' + image_name,
            'description': description,
            'attributes': {'image_name': image_name},
            'expires-after': '1 year',
            'routes': routes,
            'optimizations': optimizations,
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type': 'aws-provisioner-v1/gecko-images',
            # can't use {in-tree: ..} here, otherwise we might try to build
            # this image..
            'worker': {
                'implementation': 'docker-worker',
                'docker-image': docker_image('image_builder'),
                'caches': [{
                    'type': 'persistent',
                    'name': 'level-{}-imagebuilder-v1'.format(config.params['level']),
                    'mount-point': '/home/worker/checkouts',
                }],
                'artifacts': [{
                    'type': 'file',
                    'path': '/home/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/home/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust': True,
                'docker-in-docker': True,
                'taskcluster-proxy': True,
                'max-run-time': 3600,
            },
        }

        yield taskdesc
コード例 #17
0
ファイル: docker_image.py プロジェクト: patrickdark/gecko-dev
def fill_template(config, tasks):
    for task in tasks:
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')

        context_path = os.path.join('taskcluster', 'docker', image_name)
        context_hash = generate_context_hash(GECKO, context_path, image_name)

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        routes = []
        for tpl in ROUTE_TEMPLATES:
            routes.append(
                tpl.format(
                    index_prefix=INDEX_PREFIX,
                    level=config.params['level'],
                    image_name=image_name,
                    project=config.params['project'],
                    head_rev=config.params['head_rev'],
                    pushlog_id=config.params.get('pushlog_id', 0),
                    pushtime=config.params['moz_build_date'][8:],
                    year=config.params['moz_build_date'][0:4],
                    month=config.params['moz_build_date'][4:6],
                    day=config.params['moz_build_date'][6:8],
                    context_hash=context_hash,
                ))

        # As an optimization, if the context hash exists for a high level, that image
        # task ID will be used.  The reasoning behind this is that eventually everything ends
        # up on level 3 at some point if most tasks use this as a common image
        # for a given context hash, a worker within Taskcluster does not need to contain
        # the same image per branch.
        optimizations = [[
            'index-search',
            '{}.level-{}.{}.hash.{}'.format(INDEX_PREFIX, level, image_name,
                                            context_hash)
        ] for level in reversed(range(int(config.params['level']), 4))]

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label':
            'build-docker-image-' + image_name,
            'description':
            description,
            'attributes': {
                'image_name': image_name
            },
            'expires-after':
            '1 year',
            'routes':
            routes,
            'optimizations':
            optimizations,
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type':
            'aws-provisioner-v1/gecko-{}-images'.format(
                config.params['level']),
            # can't use {in-tree: ..} here, otherwise we might try to build
            # this image..
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'docker-image':
                docker_image('image_builder'),
                'caches': [{
                    'type':
                    'persistent',
                    'name':
                    'level-{}-imagebuilder-v1'.format(config.params['level']),
                    'mount-point':
                    '/builds/worker/checkouts',
                }],
                'volumes': [
                    # Keep in sync with Dockerfile and TASKCLUSTER_VOLUMES
                    '/builds/worker/checkouts',
                    '/builds/worker/workspace',
                ],
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH':
                    '/builds/worker/checkouts/hg-store',
                    'HASH':
                    context_hash,
                    'PROJECT':
                    config.params['project'],
                    'IMAGE_NAME':
                    image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL':
                    zstd_level,
                    'GECKO_BASE_REPOSITORY':
                    config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY':
                    config.params['head_repository'],
                    'GECKO_HEAD_REV':
                    config.params['head_rev'],
                    'TASKCLUSTER_VOLUMES':
                    '/builds/worker/checkouts;/builds/worker/workspace',
                },
                'chain-of-trust':
                True,
                'docker-in-docker':
                True,
                'taskcluster-proxy':
                True,
                'max-run-time':
                7200,
            },
        }

        yield taskdesc
コード例 #18
0
def fill_template(config, tasks):
    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if "packages-{}".format(p) not in config.kind_dependencies_tasks:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        if not taskgraph.fast:
            context_path = mozpath.relpath(image_path(image_name), GECKO)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR,
                                            '{}.tar.gz'.format(image_name))
                logger.info("Writing {} for docker image {}".format(
                    context_file, image_name))
                context_hash = create_context_tar(GECKO, context_path,
                                                  context_file, image_name,
                                                  args)
            else:
                context_hash = generate_context_hash(GECKO, context_path,
                                                     image_name, args)
        else:
            if config.write_artifacts:
                raise Exception(
                    "Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = '0' * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label': '{}-{}'.format(config.kind, image_name),
            'description': description,
            'attributes': {
                'image_name': image_name,
                'artifact_prefix': 'public',
            },
            'expires-after': '28 days' if config.params.is_try() else '1 year',
            'scopes': [],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type': 'images',
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/workspace/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'CONTEXT_TASK_ID': {
                        'task-reference': "<decision>"
                    },
                    'CONTEXT_PATH':
                    "public/docker-contexts/{}.tar.gz".format(image_name),
                    'HASH':
                    context_hash,
                    'PROJECT':
                    config.params['project'],
                    'IMAGE_NAME':
                    image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL':
                    zstd_level,
                    'DOCKER_BUILD_ARGS': {
                        'task-reference': six.ensure_text(json.dumps(args))
                    },
                    'GECKO_BASE_REPOSITORY':
                    config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY':
                    config.params['head_repository'],
                    'GECKO_HEAD_REV':
                    config.params['head_rev'],
                },
                'chain-of-trust':
                True,
                'max-run-time':
                7200,
                # FIXME: We aren't currently propagating the exit code
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ['funsize-update-generator']:
            taskdesc['worker']['retry-exit-status'] = [-1]

        worker = taskdesc['worker']

        if image_name == 'image_builder':
            worker['docker-image'] = IMAGE_BUILDER_IMAGE
            digest_data.append(
                "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE))
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            deps = taskdesc.setdefault('dependencies', {})
            deps['docker-image'] = '{}-image_builder'.format(config.kind)

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps['parent'] = '{}-{}'.format(config.kind, parent)
            worker['env']['PARENT_TASK_ID'] = {
                'task-reference': '<parent>',
            }
        if 'index' in task:
            taskdesc['index'] = task['index']

        if task.get('cache', True) and not taskgraph.fast:
            taskdesc['cache'] = {
                'type': 'docker-images.v2',
                'name': image_name,
                'digest-data': digest_data,
            }

        yield taskdesc
コード例 #19
0
    def load_tasks(cls, kind, path, config, params, loaded_tasks):
        # TODO: make this match the pushdate (get it from a parameter rather than vcs)
        pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime())

        parameters = {
            'pushlog_id':
            params.get('pushlog_id', 0),
            'pushdate':
            pushdate,
            'pushtime':
            pushdate[8:],
            'year':
            pushdate[0:4],
            'month':
            pushdate[4:6],
            'day':
            pushdate[6:8],
            'project':
            params['project'],
            'docker_image':
            docker_image,
            'base_repository':
            params['base_repository'] or params['head_repository'],
            'head_repository':
            params['head_repository'],
            'head_ref':
            params['head_ref'] or params['head_rev'],
            'head_rev':
            params['head_rev'],
            'owner':
            params['owner'],
            'level':
            params['level'],
            'source':
            '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'.format(
                repo=params['head_repository'], rev=params['head_rev']),
        }

        tasks = []
        templates = Templates(path)
        for image_name in config['images']:
            context_path = os.path.join('testing', 'docker', image_name)
            context_hash = generate_context_hash(context_path)

            image_parameters = dict(parameters)
            image_parameters['context_hash'] = context_hash
            image_parameters['context_path'] = context_path
            image_parameters['artifact_path'] = 'public/image.tar'
            image_parameters['image_name'] = image_name

            image_artifact_path = \
                "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
            if os.environ.get('TASK_ID'):
                destination = os.path.join(
                    os.environ['HOME'],
                    "artifacts/decision_task/image_contexts/{}/context.tar.gz".
                    format(image_name))
                image_parameters['context_url'] = ARTIFACT_URL.format(
                    os.environ['TASK_ID'], image_artifact_path)
                cls.create_context_tar(context_path, destination, image_name)
            else:
                # skip context generation since this isn't a decision task
                # TODO: generate context tarballs using subdirectory clones in
                # the image-building task so we don't have to worry about this.
                image_parameters[
                    'context_url'] = 'file:///tmp/' + image_artifact_path

            image_task = templates.load('image.yml', image_parameters)

            attributes = {'image_name': image_name}

            # As an optimization, if the context hash exists for mozilla-central, that image
            # task ID will be used.  The reasoning behind this is that eventually everything ends
            # up on mozilla-central at some point if most tasks use this as a common image
            # for a given context hash, a worker within Taskcluster does not need to contain
            # the same image per branch.
            index_paths = [
                'docker.images.v1.{}.{}.hash.{}'.format(
                    project, image_name, context_hash)
                for project in ['mozilla-central', params['project']]
            ]

            tasks.append(
                cls(kind,
                    'build-docker-image-' + image_name,
                    task=image_task['task'],
                    attributes=attributes,
                    index_paths=index_paths))

        return tasks
コード例 #20
0
def fill_template(config, tasks):
    available_packages = {}
    for task in config.kind_dependencies_tasks:
        if task.kind != 'packages':
            continue
        name = task.label.replace('packages-', '')
        for route in task.task.get('routes', []):
            if route.startswith('index.') and '.hash.' in route:
                # Only keep the hash part of the route.
                h = route.rsplit('.', 1)[1]
                assert DIGEST_RE.match(h)
                available_packages[name] = h
                break

    context_hashes = {}

    for task in order_image_tasks(config, tasks):
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        definition = task.pop('definition', image_name)
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if p not in available_packages:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        # Generating the context hash relies on arguments being set, so we
        # set this now, although it's not the final value (it's a
        # task-reference value, see further below). We add the package routes
        # containing a hash to get the overall docker image hash, so changes
        # to packages will be reflected in the docker image hash.
        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)
        if parent:
            args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(
                parent, context_hashes[parent])

        context_path = os.path.join('taskcluster', 'docker', definition)
        context_hash = generate_context_hash(GECKO, context_path, image_name,
                                             args)
        digest_data = [context_hash]
        context_hashes[image_name] = context_hash

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label':
            'build-docker-image-' + image_name,
            'description':
            description,
            'attributes': {
                'image_name': image_name
            },
            'expires-after':
            '28 days' if config.params.is_try() else '1 year',
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type':
            'aws-provisioner-v1/gecko-{}-images'.format(
                config.params['level']),
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust':
                True,
                'docker-in-docker':
                True,
                'taskcluster-proxy':
                True,
                'max-run-time':
                7200,
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ['funsize-update-generator']:
            taskdesc['worker']['retry-exit-status'] = [-1]

        worker = taskdesc['worker']

        # We use the in-tree image_builder image to build docker images, but
        # that can't be used to build the image_builder image itself,
        # obviously. So we fall back to the last snapshot of the image that
        # was uploaded to docker hub.
        if image_name == 'image_builder':
            worker['docker-image'] = 'taskcluster/image_builder@sha256:' + \
                '24ce54a1602453bc93515aecd9d4ad25a22115fbc4b209ddb5541377e9a37315'
            # Keep in sync with the Dockerfile used to generate the
            # docker image whose digest is referenced above.
            worker['volumes'] = [
                '/builds/worker/checkouts',
                '/builds/worker/workspace',
            ]
            cache_name = 'imagebuilder-v1'
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
            # Force images built against the in-tree image builder to
            # have a different digest by adding a fixed string to the
            # hashed data.
            digest_data.append('image_builder')

        worker['caches'] = [{
            'type':
            'persistent',
            'name':
            'level-{}-{}'.format(config.params['level'], cache_name),
            'mount-point':
            '/builds/worker/checkouts',
        }]

        for k, v in args.items():
            if k == 'DOCKER_IMAGE_PACKAGES':
                worker['env'][k] = {'task-reference': v}
            else:
                worker['env'][k] = v

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)
                digest_data.append(available_packages[p])

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps[parent] = 'build-docker-image-{}'.format(parent)
            worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
                'task-reference': '<{}>'.format(parent)
            }

        if len(digest_data) > 1:
            kwargs = {'digest_data': digest_data}
        else:
            kwargs = {'digest': digest_data[0]}
        add_optimization(config,
                         taskdesc,
                         cache_type="docker-images.v1",
                         cache_name=image_name,
                         **kwargs)

        yield taskdesc
コード例 #21
0
ファイル: docker_image.py プロジェクト: luke-chang/gecko-1
def fill_template(config, tasks):
    available_packages = {}
    for task in config.kind_dependencies_tasks:
        if task.kind != 'packages':
            continue
        name = task.label.replace('packages-', '')
        for route in task.task.get('routes', []):
            if route.startswith('index.') and '.hash.' in route:
                # Only keep the hash part of the route.
                h = route.rsplit('.', 1)[1]
                assert DIGEST_RE.match(h)
                available_packages[name] = h
                break

    context_hashes = {}

    for task in order_image_tasks(config, tasks):
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        definition = task.pop('definition', image_name)
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if p not in available_packages:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        # Generating the context hash relies on arguments being set, so we
        # set this now, although it's not the final value (it's a
        # task-reference value, see further below). We add the package routes
        # containing a hash to get the overall docker image hash, so changes
        # to packages will be reflected in the docker image hash.
        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)
        if parent:
            args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(parent, context_hashes[parent])

        context_path = os.path.join('taskcluster', 'docker', definition)
        context_hash = generate_context_hash(
            GECKO, context_path, image_name, args)
        digest_data = [context_hash]
        context_hashes[image_name] = context_hash

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label': 'build-docker-image-' + image_name,
            'description': description,
            'attributes': {'image_name': image_name},
            'expires-after': '28 days' if config.params.is_try() else '1 year',
            'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type': 'aws-provisioner-v1/gecko-{}-images'.format(
                config.params['level']),
            'worker': {
                'implementation': 'docker-worker',
                'os': 'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/builds/worker/workspace/artifacts/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
                    'HASH': context_hash,
                    'PROJECT': config.params['project'],
                    'IMAGE_NAME': image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
                    'GECKO_BASE_REPOSITORY': config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
                    'GECKO_HEAD_REV': config.params['head_rev'],
                },
                'chain-of-trust': True,
                'docker-in-docker': True,
                'taskcluster-proxy': True,
                'max-run-time': 7200,
            },
        }

        worker = taskdesc['worker']

        # We use the in-tree image_builder image to build docker images, but
        # that can't be used to build the image_builder image itself,
        # obviously. So we fall back to the last snapshot of the image that
        # was uploaded to docker hub.
        if image_name == 'image_builder':
            worker['docker-image'] = 'taskcluster/image_builder@sha256:' + \
                '24ce54a1602453bc93515aecd9d4ad25a22115fbc4b209ddb5541377e9a37315'
            # Keep in sync with the Dockerfile used to generate the
            # docker image whose digest is referenced above.
            worker['volumes'] = [
                '/builds/worker/checkouts',
                '/builds/worker/workspace',
            ]
            cache_name = 'imagebuilder-v1'
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
            # Force images built against the in-tree image builder to
            # have a different digest by adding a fixed string to the
            # hashed data.
            digest_data.append('image_builder')

        worker['caches'] = [{
            'type': 'persistent',
            'name': 'level-{}-{}'.format(config.params['level'], cache_name),
            'mount-point': '/builds/worker/checkouts',
        }]

        for k, v in args.items():
            if k == 'DOCKER_IMAGE_PACKAGES':
                worker['env'][k] = {'task-reference': v}
            else:
                worker['env'][k] = v

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)
                digest_data.append(available_packages[p])

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps[parent] = 'build-docker-image-{}'.format(parent)
            worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
                'task-reference': '<{}>'.format(parent)
            }

        if len(digest_data) > 1:
            kwargs = {'digest_data': digest_data}
        else:
            kwargs = {'digest': digest_data[0]}
        add_optimization(
            config, taskdesc,
            cache_type="docker-images.v1",
            cache_name=image_name,
            **kwargs
        )

        yield taskdesc