def build_image(name, tag, args=None): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError('must provide a Docker image name') image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) tag = tag or docker.docker_image(name, by_tag=True) buf = BytesIO() docker.stream_context_tar(GECKO, image_dir, buf, '', args) docker.post_to_docker(buf.getvalue(), '/build', nocache=1, t=tag) print('Successfully built %s and tagged with %s' % (name, tag)) if tag.endswith(':latest'): print('*' * 50) print('WARNING: no VERSION file found in image directory.') print('Image is not suitable for deploying/pushing.') print('Create an image suitable for deploying/pushing by creating') print('a VERSION file in the image directory.') print('*' * 50)
def build_image(name, tag, args=None): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError("must provide a Docker image name") image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception("image directory does not exist: %s" % image_dir) tag = tag or docker.docker_image(name, by_tag=True) buf = BytesIO() docker.stream_context_tar(".", image_dir, buf, "", args) docker.post_to_docker(buf.getvalue(), "/build", nocache=1, t=tag) print(f"Successfully built {name} and tagged with {tag}") if tag.endswith(":latest"): print("*" * 50) print("WARNING: no VERSION file found in image directory.") print("Image is not suitable for deploying/pushing.") print("Create an image suitable for deploying/pushing by creating") print("a VERSION file in the image directory.") print("*" * 50)
def build_context(name, outputFile, args=None): """Build a context.tar for image with specified name.""" if not name: raise ValueError("must provide a Docker image name") if not outputFile: raise ValueError("must provide a outputFile") image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception("image directory does not exist: %s" % image_dir) docker.create_context_tar(".", image_dir, outputFile, args)
def build_context(name, outputFile, args=None): """Build a context.tar for image with specified name. """ if not name: raise ValueError('must provide a Docker image name') if not outputFile: raise ValueError('must provide a outputFile') image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) docker.create_context_tar(GECKO, image_dir, outputFile, "", args)
def build_image(name, args=None): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError('must provide a Docker image name') image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) tag = docker.docker_image(name, by_tag=True) docker_bin = which.which('docker') # Verify that Docker is working. try: subprocess.check_output([docker_bin, '--version']) except subprocess.CalledProcessError: raise Exception('Docker server is unresponsive. Run `docker ps` and ' 'check that Docker is running') # We obtain a context archive and build from that. Going through the # archive creation is important: it normalizes things like file owners # and mtimes to increase the chances that image generation is # deterministic. fd, context_path = tempfile.mkstemp() os.close(fd) try: docker.create_context_tar(GECKO, image_dir, context_path, name, args) docker.build_from_context(docker_bin, context_path, name, tag) finally: os.unlink(context_path) print('Successfully built %s and tagged with %s' % (name, tag)) if tag.endswith(':latest'): print('*' * 50) print('WARNING: no VERSION file found in image directory.') print('Image is not suitable for deploying/pushing.') print('Create an image suitable for deploying/pushing by creating') print('a VERSION file in the image directory.') print('*' * 50)
def fill_template(config, tasks): if not taskgraph.fast and config.write_artifacts: if not os.path.isdir(CONTEXTS_DIR): os.makedirs(CONTEXTS_DIR) for task in tasks: image_name = task.pop("name") job_symbol = task.pop("symbol") args = task.pop("args", {}) packages = task.pop("packages", []) parent = task.pop("parent", None) for p in packages: if "packages-{}".format(p) not in config.kind_dependencies_tasks: raise Exception("Missing package job for {}-{}: {}".format( config.kind, image_name, p)) if not taskgraph.fast: context_path = mozpath.relpath(image_path(image_name), GECKO) if config.write_artifacts: context_file = os.path.join(CONTEXTS_DIR, "{}.tar.gz".format(image_name)) logger.info("Writing {} for docker image {}".format( context_file, image_name)) context_hash = create_context_tar(GECKO, context_path, context_file, image_name, args) else: context_hash = generate_context_hash(GECKO, context_path, image_name, args) else: if config.write_artifacts: raise Exception( "Can't write artifacts if `taskgraph.fast` is set.") context_hash = "0" * 40 digest_data = [context_hash] digest_data += [json.dumps(args, sort_keys=True)] description = "Build the docker image {} for use by dependent tasks".format( image_name) args["DOCKER_IMAGE_PACKAGES"] = " ".join("<{}>".format(p) for p in packages) # Adjust the zstandard compression level based on the execution level. # We use faster compression for level 1 because we care more about # end-to-end times. We use slower/better compression for other levels # because images are read more often and it is worth the trade-off to # burn more CPU once to reduce image size. zstd_level = "3" if int(config.params["level"]) == 1 else "10" # include some information that is useful in reconstructing this task # from JSON taskdesc = { "label": "{}-{}".format(config.kind, image_name), "description": description, "attributes": { "image_name": image_name, "artifact_prefix": "public", }, "expires-after": "1 year", "scopes": [], "treeherder": { "symbol": job_symbol, "platform": "taskcluster-images/opt", "kind": "other", "tier": 1, }, "run-on-projects": [], "worker-type": "images", "worker": { "implementation": "docker-worker", "os": "linux", "artifacts": [{ "type": "file", "path": "/workspace/image.tar.zst", "name": "public/image.tar.zst", }], "env": { "CONTEXT_TASK_ID": { "task-reference": "<decision>" }, "CONTEXT_PATH": "public/docker-contexts/{}.tar.gz".format(image_name), "HASH": context_hash, "PROJECT": config.params["project"], "IMAGE_NAME": image_name, "DOCKER_IMAGE_ZSTD_LEVEL": zstd_level, "DOCKER_BUILD_ARGS": { "task-reference": six.ensure_text(json.dumps(args)) }, "GECKO_BASE_REPOSITORY": config.params["base_repository"], "GECKO_HEAD_REPOSITORY": config.params["head_repository"], "GECKO_HEAD_REV": config.params["head_rev"], }, "chain-of-trust": True, "max-run-time": 7200, # FIXME: We aren't currently propagating the exit code }, } # Retry for 'funsize-update-generator' if exit status code is -1 if image_name in ["funsize-update-generator"]: taskdesc["worker"]["retry-exit-status"] = [-1] worker = taskdesc["worker"] if image_name == "image_builder": worker["docker-image"] = IMAGE_BUILDER_IMAGE digest_data.append( "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE)) else: worker["docker-image"] = {"in-tree": "image_builder"} deps = taskdesc.setdefault("dependencies", {}) deps["docker-image"] = "{}-image_builder".format(config.kind) if packages: deps = taskdesc.setdefault("dependencies", {}) for p in sorted(packages): deps[p] = "packages-{}".format(p) if parent: deps = taskdesc.setdefault("dependencies", {}) deps["parent"] = "{}-{}".format(config.kind, parent) worker["env"]["PARENT_TASK_ID"] = { "task-reference": "<parent>", } if "index" in task: taskdesc["index"] = task["index"] if task.get("cache", True) and not taskgraph.fast: taskdesc["cache"] = { "type": "docker-images.v2", "name": image_name, "digest-data": digest_data, } yield taskdesc
def fill_template(config, tasks): if not taskgraph.fast and config.write_artifacts: if not os.path.isdir(CONTEXTS_DIR): os.makedirs(CONTEXTS_DIR) for task in tasks: image_name = task.pop('name') job_symbol = task.pop('symbol') args = task.pop('args', {}) packages = task.pop('packages', []) parent = task.pop('parent', None) for p in packages: if "packages-{}".format(p) not in config.kind_dependencies_tasks: raise Exception('Missing package job for {}-{}: {}'.format( config.kind, image_name, p)) if not taskgraph.fast: context_path = mozpath.relpath(image_path(image_name), GECKO) if config.write_artifacts: context_file = os.path.join(CONTEXTS_DIR, '{}.tar.gz'.format(image_name)) logger.info("Writing {} for docker image {}".format( context_file, image_name)) context_hash = create_context_tar(GECKO, context_path, context_file, image_name, args) else: context_hash = generate_context_hash(GECKO, context_path, image_name, args) else: if config.write_artifacts: raise Exception( "Can't write artifacts if `taskgraph.fast` is set.") context_hash = '0' * 40 digest_data = [context_hash] digest_data += [json.dumps(args, sort_keys=True)] description = 'Build the docker image {} for use by dependent tasks'.format( image_name) args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p) for p in packages) # Adjust the zstandard compression level based on the execution level. # We use faster compression for level 1 because we care more about # end-to-end times. We use slower/better compression for other levels # because images are read more often and it is worth the trade-off to # burn more CPU once to reduce image size. zstd_level = '3' if int(config.params['level']) == 1 else '10' # include some information that is useful in reconstructing this task # from JSON taskdesc = { 'label': '{}-{}'.format(config.kind, image_name), 'description': description, 'attributes': { 'image_name': image_name, 'artifact_prefix': 'public', }, 'expires-after': '28 days' if config.params.is_try() else '1 year', 'scopes': [], 'treeherder': { 'symbol': job_symbol, 'platform': 'taskcluster-images/opt', 'kind': 'other', 'tier': 1, }, 'run-on-projects': [], 'worker-type': 'images', 'worker': { 'implementation': 'docker-worker', 'os': 'linux', 'artifacts': [{ 'type': 'file', 'path': '/workspace/image.tar.zst', 'name': 'public/image.tar.zst', }], 'env': { 'CONTEXT_TASK_ID': { 'task-reference': "<decision>" }, 'CONTEXT_PATH': "public/docker-contexts/{}.tar.gz".format(image_name), 'HASH': context_hash, 'PROJECT': config.params['project'], 'IMAGE_NAME': image_name, 'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level, 'DOCKER_BUILD_ARGS': { 'task-reference': six.ensure_text(json.dumps(args)) }, 'GECKO_BASE_REPOSITORY': config.params['base_repository'], 'GECKO_HEAD_REPOSITORY': config.params['head_repository'], 'GECKO_HEAD_REV': config.params['head_rev'], }, 'chain-of-trust': True, 'max-run-time': 7200, # FIXME: We aren't currently propagating the exit code }, } # Retry for 'funsize-update-generator' if exit status code is -1 if image_name in ['funsize-update-generator']: taskdesc['worker']['retry-exit-status'] = [-1] worker = taskdesc['worker'] if image_name == 'image_builder': worker['docker-image'] = IMAGE_BUILDER_IMAGE digest_data.append( "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE)) else: worker['docker-image'] = {'in-tree': 'image_builder'} deps = taskdesc.setdefault('dependencies', {}) deps['docker-image'] = '{}-image_builder'.format(config.kind) if packages: deps = taskdesc.setdefault('dependencies', {}) for p in sorted(packages): deps[p] = 'packages-{}'.format(p) if parent: deps = taskdesc.setdefault('dependencies', {}) deps['parent'] = '{}-{}'.format(config.kind, parent) worker['env']['PARENT_TASK_ID'] = { 'task-reference': '<parent>', } if 'index' in task: taskdesc['index'] = task['index'] if task.get('cache', True) and not taskgraph.fast: taskdesc['cache'] = { 'type': 'docker-images.v2', 'name': image_name, 'digest-data': digest_data, } yield taskdesc