def test_docker_image_default_registry_by_tag(self): files = {} files["{}/REGISTRY".format(docker.IMAGE_DIR)] = "mozilla" files["{}/myimage/VERSION".format(docker.IMAGE_DIR)] = "1.2.3" files["{}/myimage/HASH".format(docker.IMAGE_DIR)] = "sha256:434..." with MockedOpen(files): self.assertEqual(docker.docker_image('myimage', by_tag=True), "mozilla/myimage:1.2.3")
def build_image(name, tag, args=None): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError("must provide a Docker image name") image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception("image directory does not exist: %s" % image_dir) tag = tag or docker.docker_image(name, by_tag=True) buf = BytesIO() docker.stream_context_tar(".", image_dir, buf, "", args) docker.post_to_docker(buf.getvalue(), "/build", nocache=1, t=tag) print(f"Successfully built {name} and tagged with {tag}") if tag.endswith(":latest"): print("*" * 50) print("WARNING: no VERSION file found in image directory.") print("Image is not suitable for deploying/pushing.") print("Create an image suitable for deploying/pushing by creating") print("a VERSION file in the image directory.") print("*" * 50)
def build_image(name, tag, args=None): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError('must provide a Docker image name') image_dir = docker.image_path(name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) tag = tag or docker.docker_image(name, by_tag=True) buf = BytesIO() docker.stream_context_tar(GECKO, image_dir, buf, '', args) docker.post_to_docker(buf.getvalue(), '/build', nocache=1, t=tag) print('Successfully built %s and tagged with %s' % (name, tag)) if tag.endswith(':latest'): print('*' * 50) print('WARNING: no VERSION file found in image directory.') print('Image is not suitable for deploying/pushing.') print('Create an image suitable for deploying/pushing by creating') print('a VERSION file in the image directory.') print('*' * 50)
def test_docker_image_explicit_registry(self): files = {} files["{}/myimage/REGISTRY".format(docker.IMAGE_DIR)] = "cool-images" files["{}/myimage/VERSION".format(docker.IMAGE_DIR)] = "1.2.3" files["{}/myimage/HASH".format(docker.IMAGE_DIR)] = "sha256:434..." with MockedOpen(files): self.assertEqual(docker.docker_image('myimage'), "cool-images/myimage@sha256:434...")
def test_docker_image_explicit_registry_by_tag(self): files = {} files["{}/myimage/REGISTRY".format(docker.IMAGE_DIR)] = "myreg" files["{}/myimage/VERSION".format(docker.IMAGE_DIR)] = "1.2.3" files["{}/myimage/HASH".format(docker.IMAGE_DIR)] = "sha256:434..." with MockedOpen(files): self.assertEqual(docker.docker_image("myimage", by_tag=True), "myreg/myimage:1.2.3")
def build_image(name): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError('must provide a Docker image name') image_dir = os.path.join(IMAGE_DIR, name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) tag = docker.docker_image(name, default_version='latest') docker_bin = which.which('docker') # Verify that Docker is working. try: subprocess.check_output([docker_bin, '--version']) except subprocess.CalledProcessError: raise Exception('Docker server is unresponsive. Run `docker ps` and ' 'check that Docker is running') # We obtain a context archive and build from that. Going through the # archive creation is important: it normalizes things like file owners # and mtimes to increase the chances that image generation is # deterministic. fd, context_path = tempfile.mkstemp() os.close(fd) try: docker.create_context_tar(GECKO, image_dir, context_path, name) docker.build_from_context(docker_bin, context_path, name, tag) finally: os.unlink(context_path) print('Successfully built %s and tagged with %s' % (name, tag)) if tag.endswith(':latest'): print('*' * 50) print('WARNING: no VERSION file found in image directory.') print('Image is not suitable for deploying/pushing.') print('Create an image suitable for deploying/pushing by creating') print('a VERSION file in the image directory.') print('*' * 50)
def build_image(name): """Build a Docker image of specified name. Output from image building process will be printed to stdout. """ if not name: raise ValueError('must provide a Docker image name') image_dir = os.path.join(docker.IMAGE_DIR, name) if not os.path.isdir(image_dir): raise Exception('image directory does not exist: %s' % image_dir) tag = docker.docker_image(name, by_tag=True) docker_bin = which.which('docker') # Verify that Docker is working. try: subprocess.check_output([docker_bin, '--version']) except subprocess.CalledProcessError: raise Exception('Docker server is unresponsive. Run `docker ps` and ' 'check that Docker is running') # We obtain a context archive and build from that. Going through the # archive creation is important: it normalizes things like file owners # and mtimes to increase the chances that image generation is # deterministic. fd, context_path = tempfile.mkstemp() os.close(fd) try: docker.create_context_tar(GECKO, image_dir, context_path, name) docker.build_from_context(docker_bin, context_path, name, tag) finally: os.unlink(context_path) print('Successfully built %s and tagged with %s' % (name, tag)) if tag.endswith(':latest'): print('*' * 50) print('WARNING: no VERSION file found in image directory.') print('Image is not suitable for deploying/pushing.') print('Create an image suitable for deploying/pushing by creating') print('a VERSION file in the image directory.') print('*' * 50)
def fill_template(config, tasks): for task in tasks: image_name = task.pop('name') job_symbol = task.pop('symbol') context_path = os.path.join('taskcluster', 'docker', image_name) context_hash = generate_context_hash(GECKO, context_path, image_name) description = 'Build the docker image {} for use by dependent tasks'.format( image_name) routes = [] for tpl in ROUTE_TEMPLATES: routes.append(tpl.format( index_prefix=INDEX_PREFIX, level=config.params['level'], image_name=image_name, project=config.params['project'], head_rev=config.params['head_rev'], pushlog_id=config.params.get('pushlog_id', 0), pushtime=config.params['moz_build_date'][8:], year=config.params['moz_build_date'][0:4], month=config.params['moz_build_date'][4:6], day=config.params['moz_build_date'][6:8], context_hash=context_hash, )) # As an optimization, if the context hash exists for a high level, that image # task ID will be used. The reasoning behind this is that eventually everything ends # up on level 3 at some point if most tasks use this as a common image # for a given context hash, a worker within Taskcluster does not need to contain # the same image per branch. optimizations = [['index-search', '{}.level-{}.{}.hash.{}'.format( INDEX_PREFIX, level, image_name, context_hash)] for level in reversed(range(int(config.params['level']), 4))] # Adjust the zstandard compression level based on the execution level. # We use faster compression for level 1 because we care more about # end-to-end times. We use slower/better compression for other levels # because images are read more often and it is worth the trade-off to # burn more CPU once to reduce image size. zstd_level = '3' if int(config.params['level']) == 1 else '10' # include some information that is useful in reconstructing this task # from JSON taskdesc = { 'label': 'build-docker-image-' + image_name, 'description': description, 'attributes': {'image_name': image_name}, 'expires-after': '1 year', 'routes': routes, 'optimizations': optimizations, 'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'], 'treeherder': { 'symbol': job_symbol, 'platform': 'taskcluster-images/opt', 'kind': 'other', 'tier': 1, }, 'run-on-projects': [], 'worker-type': 'aws-provisioner-v1/gecko-images', # can't use {in-tree: ..} here, otherwise we might try to build # this image.. 'worker': { 'implementation': 'docker-worker', 'docker-image': docker_image('image_builder'), 'caches': [{ 'type': 'persistent', 'name': 'level-{}-imagebuilder-v1'.format(config.params['level']), 'mount-point': '/home/worker/checkouts', }], 'artifacts': [{ 'type': 'file', 'path': '/home/worker/workspace/artifacts/image.tar.zst', 'name': 'public/image.tar.zst', }], 'env': { 'HG_STORE_PATH': '/home/worker/checkouts/hg-store', 'HASH': context_hash, 'PROJECT': config.params['project'], 'IMAGE_NAME': image_name, 'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level, 'GECKO_BASE_REPOSITORY': config.params['base_repository'], 'GECKO_HEAD_REPOSITORY': config.params['head_repository'], 'GECKO_HEAD_REV': config.params['head_rev'], }, 'chain-of-trust': True, 'docker-in-docker': True, 'taskcluster-proxy': True, 'max-run-time': 3600, }, } yield taskdesc
def build_callback_action_task(parameters): if not available(parameters): return None match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', parameters['head_repository']) if not match: raise Exception('Unrecognized head_repository') repo_scope = 'assume:repo:{}/{}:*'.format(match.group(1), match.group(2)) return { 'created': { '$fromNow': '' }, 'deadline': { '$fromNow': '12 hours' }, 'expires': { '$fromNow': '14 days' }, 'metadata': { 'owner': '*****@*****.**', 'source': '{}raw-file/{}/{}'.format( parameters['head_repository'], parameters['head_rev'], source_path, ), 'name': 'Action: {}'.format(title), 'description': 'Task executing callback for action.\n\n---\n' + description, }, 'workerType': 'gecko-decision', 'provisionerId': 'aws-provisioner-v1', 'scopes': [ repo_scope, ], 'tags': { 'createdForUser': parameters['owner'], 'kind': 'action-callback', }, 'routes': [ 'tc-treeherder.v2.{}.{}.{}'.format( parameters['project'], parameters['head_rev'], parameters['pushlog_id']), 'tc-treeherder-stage.v2.{}.{}.{}'.format( parameters['project'], parameters['head_rev'], parameters['pushlog_id']), ], 'payload': { 'env': { 'GECKO_BASE_REPOSITORY': 'https://hg.mozilla.org/mozilla-unified', 'GECKO_HEAD_REPOSITORY': parameters['head_repository'], 'GECKO_HEAD_REF': parameters['head_ref'], 'GECKO_HEAD_REV': parameters['head_rev'], 'HG_STORE_PATH': '/home/worker/checkouts/hg-store', 'ACTION_TASK_GROUP_ID': { '$eval': 'taskGroupId' }, 'ACTION_TASK_ID': { '$dumps': { '$eval': 'taskId' } }, 'ACTION_TASK': { '$dumps': { '$eval': 'task' } }, 'ACTION_INPUT': { '$dumps': { '$eval': 'input' } }, 'ACTION_CALLBACK': cb.__name__, 'ACTION_PARAMETERS': { '$dumps': { '$eval': 'parameters' } }, }, 'cache': { 'level-{}-checkouts'.format(parameters['level']): '/home/worker/checkouts', }, 'features': { 'taskclusterProxy': True, 'chainOfTrust': True, }, 'image': docker_image('decision'), 'maxRunTime': 1800, 'command': [ '/home/worker/bin/run-task', '--vcs-checkout=/home/worker/checkouts/gecko', '--', 'bash', '-cx', """\ cd /home/worker/checkouts/gecko && ln -s /home/worker/artifacts artifacts && ./mach --log-no-times taskgraph action-callback""", ], }, 'extra': { 'treeherder': { 'groupName': 'action-callback', 'groupSymbol': 'AC', 'symbol': symbol, }, }, }
def fill_template(config, tasks): for task in tasks: image_name = task.pop('name') job_symbol = task.pop('symbol') context_path = os.path.join('taskcluster', 'docker', image_name) context_hash = generate_context_hash(GECKO, context_path, image_name) description = 'Build the docker image {} for use by dependent tasks'.format( image_name) routes = [] for tpl in ROUTE_TEMPLATES: routes.append( tpl.format( index_prefix=INDEX_PREFIX, level=config.params['level'], image_name=image_name, project=config.params['project'], head_rev=config.params['head_rev'], pushlog_id=config.params.get('pushlog_id', 0), pushtime=config.params['moz_build_date'][8:], year=config.params['moz_build_date'][0:4], month=config.params['moz_build_date'][4:6], day=config.params['moz_build_date'][6:8], context_hash=context_hash, )) # As an optimization, if the context hash exists for a high level, that image # task ID will be used. The reasoning behind this is that eventually everything ends # up on level 3 at some point if most tasks use this as a common image # for a given context hash, a worker within Taskcluster does not need to contain # the same image per branch. optimizations = [[ 'index-search', '{}.level-{}.{}.hash.{}'.format(INDEX_PREFIX, level, image_name, context_hash) ] for level in reversed(range(int(config.params['level']), 4))] # Adjust the zstandard compression level based on the execution level. # We use faster compression for level 1 because we care more about # end-to-end times. We use slower/better compression for other levels # because images are read more often and it is worth the trade-off to # burn more CPU once to reduce image size. zstd_level = '3' if int(config.params['level']) == 1 else '10' # include some information that is useful in reconstructing this task # from JSON taskdesc = { 'label': 'build-docker-image-' + image_name, 'description': description, 'attributes': { 'image_name': image_name }, 'expires-after': '1 year', 'routes': routes, 'optimizations': optimizations, 'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'], 'treeherder': { 'symbol': job_symbol, 'platform': 'taskcluster-images/opt', 'kind': 'other', 'tier': 1, }, 'run-on-projects': [], 'worker-type': 'aws-provisioner-v1/gecko-{}-images'.format( config.params['level']), # can't use {in-tree: ..} here, otherwise we might try to build # this image.. 'worker': { 'implementation': 'docker-worker', 'os': 'linux', 'docker-image': docker_image('image_builder'), 'caches': [{ 'type': 'persistent', 'name': 'level-{}-imagebuilder-v1'.format(config.params['level']), 'mount-point': '/builds/worker/checkouts', }], 'volumes': [ # Keep in sync with Dockerfile and TASKCLUSTER_VOLUMES '/builds/worker/checkouts', '/builds/worker/workspace', ], 'artifacts': [{ 'type': 'file', 'path': '/builds/worker/workspace/artifacts/image.tar.zst', 'name': 'public/image.tar.zst', }], 'env': { 'HG_STORE_PATH': '/builds/worker/checkouts/hg-store', 'HASH': context_hash, 'PROJECT': config.params['project'], 'IMAGE_NAME': image_name, 'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level, 'GECKO_BASE_REPOSITORY': config.params['base_repository'], 'GECKO_HEAD_REPOSITORY': config.params['head_repository'], 'GECKO_HEAD_REV': config.params['head_rev'], 'TASKCLUSTER_VOLUMES': '/builds/worker/checkouts;/builds/worker/workspace', }, 'chain-of-trust': True, 'docker-in-docker': True, 'taskcluster-proxy': True, 'max-run-time': 7200, }, } yield taskdesc
def build_callback_action_task(parameters): if not available(parameters): return None match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', parameters['head_repository']) if not match: raise Exception('Unrecognized head_repository') repo_scope = 'assume:repo:{}/{}:*'.format( match.group(1), match.group(2)) return { 'created': {'$fromNow': ''}, 'deadline': {'$fromNow': '12 hours'}, 'expires': {'$fromNow': '14 days'}, 'metadata': { 'owner': '*****@*****.**', 'source': '{}raw-file/{}/{}'.format( parameters['head_repository'], parameters['head_rev'], source_path, ), 'name': 'Action: {}'.format(title), 'description': 'Task executing callback for action.\n\n---\n' + description, }, 'workerType': 'gecko-decision', 'provisionerId': 'aws-provisioner-v1', 'scopes': [ repo_scope, ], 'tags': { 'createdForUser': parameters['owner'], 'kind': 'action-callback', }, 'routes': [ 'tc-treeherder.v2.{}.{}.{}'.format( parameters['project'], parameters['head_rev'], parameters['pushlog_id']), 'tc-treeherder-stage.v2.{}.{}.{}'.format( parameters['project'], parameters['head_rev'], parameters['pushlog_id']), ], 'payload': { 'env': { 'GECKO_BASE_REPOSITORY': 'https://hg.mozilla.org/mozilla-unified', 'GECKO_HEAD_REPOSITORY': parameters['head_repository'], 'GECKO_HEAD_REF': parameters['head_ref'], 'GECKO_HEAD_REV': parameters['head_rev'], 'HG_STORE_PATH': '/home/worker/checkouts/hg-store', 'ACTION_TASK_GROUP_ID': {'$eval': 'taskGroupId'}, 'ACTION_TASK_ID': {'$dumps': {'$eval': 'taskId'}}, 'ACTION_TASK': {'$dumps': {'$eval': 'task'}}, 'ACTION_INPUT': {'$dumps': {'$eval': 'input'}}, 'ACTION_CALLBACK': cb.__name__, 'ACTION_PARAMETERS': {'$dumps': {'$eval': 'parameters'}}, }, 'cache': { 'level-{}-checkouts'.format(parameters['level']): '/home/worker/checkouts', }, 'features': { 'taskclusterProxy': True, 'chainOfTrust': True, }, 'image': docker_image('decision'), 'maxRunTime': 1800, 'command': [ '/home/worker/bin/run-task', '--vcs-checkout=/home/worker/checkouts/gecko', '--', 'bash', '-cx', """\ cd /home/worker/checkouts/gecko && ln -s /home/worker/artifacts artifacts && ./mach --log-no-times taskgraph action-callback""", ], }, 'extra': { 'treeherder': { 'groupName': 'action-callback', 'groupSymbol': 'AC', 'symbol': symbol, }, }, }