def append(self): if notebook_helper.is_in_notebook(): notebook_helper.export_notebook_to_tar_gz( self.notebook_file, TEMP_TAR_GZ_FILENAME, converted_filename=self.get_python_entrypoint()) else: utils.generate_context_tarball(".", TEMP_TAR_GZ_FILENAME) transport = transport_pool.Http(httplib2.Http, size=_THREADS) src = docker_name.Tag(self.base_image) creds = docker_creds.DefaultKeychain.Resolve(src) with v2_2_image.FromRegistry(src, creds, transport) as src_image: with open(TEMP_TAR_GZ_FILENAME, 'rb') as f: new_img = append.Layer(src_image, f.read()) if self.image_tag is None: self.image_tag = new_img.digest().split(":")[1] dst = docker_name.Tag(self.full_image_name()) creds = docker_creds.DefaultKeychain.Resolve(dst) with docker_session.Push(dst, creds, transport, threads=_THREADS, mount=[src.as_repository()]) as session: logger.warn("Uploading {}".format(self.full_image_name())) session.upload(new_img) os.remove(TEMP_TAR_GZ_FILENAME) logger.warn("Pushed image {}".format(self.full_image_name()))
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) unseen_strings = set() overrides = {} # TODO(mattmoor): Execute these in a threadpool and # aggregate the results as they complete. for spec in args.image_spec or []: parts = spec.split(';') kwargs = dict([x.split('=', 2) for x in parts]) (tag, digest) = Publish(transport, args.image_chroot, **kwargs) overrides[tag] = digest unseen_strings.add(tag) with open(args.template, 'r') as f: inputs = f.read() def _StringToDigest(t): if t in unseen_strings: unseen_strings.remove(t) return StringToDigest(t, overrides, transport) content = _DOCUMENT_DELIMITER.join([ Resolve(x, _StringToDigest) for x in inputs.split(_DOCUMENT_DELIMITER) ]) if len(unseen_strings) > 0: print('The following image references were not found: [%s]' % "\n".join([str(x) for x in unseen_strings]), file=sys.stderr) sys.exit(1) print(content)
def main(): args = parser.parse_args() if not args.name or not args.tarball: raise Exception('--name and --tarball are required arguments.') transport = transport_pool.Http(httplib2.Http, size=8) if '@' in args.name: name = docker_name.Digest(args.name) else: name = docker_name.Tag(args.name) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with tarfile.open(name=args.tarball, mode='w') as tar: with v2_2_image.FromRegistry(name, creds, transport) as v2_2_img: if v2_2_img.exists(): save.tarball(_make_tag_if_digest(name), v2_2_img, tar) return with v2_image.FromRegistry(name, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: save.tarball(_make_tag_if_digest(name), v2_2_img, tar) return
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) transport = transport_pool.Http(httplib2.Http, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. src = docker_name.Tag(args.src_image) dst = docker_name.Tag(args.dst_image) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(src) logging.info('Pulling v2.2 image from %r ...', src) with v2_2_image.FromRegistry(src, creds, transport) as src_image: with open(args.tarball, 'rb') as f: new_img = append.Layer(src_image, f.read()) creds = docker_creds.DefaultKeychain.Resolve(dst) with docker_session.Push(dst, creds, transport, threads=_THREADS, mount=[src.as_repository()]) as session: logging.info('Starting upload ...') session.upload(new_img) digest = new_img.digest() print(('{name} was published with digest: {digest}'.format( name=dst, digest=digest)))
def main(): args = parser.parse_args() if not args.name or not args.directory: raise Exception('--name and --directory are required arguments.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) if '@' in args.name: name = docker_name.Digest(args.name) else: name = docker_name.Tag(args.name) # OCI Image Manifest is compatible with Docker Image Manifest Version 2, # Schema 2. We indicate support for both formats by passing both media types # as 'Accept' headers. # # For reference: # OCI: https://github.com/opencontainers/image-spec # Docker: https://docs.docker.com/registry/spec/manifest-v2-2/ accept = docker_http.SUPPORTED_MANIFEST_MIMES # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with v2_2_image.FromRegistry(name, creds, transport, accept) as v2_2_img: if v2_2_img.exists(): save.fast(v2_2_img, args.directory, threads=_THREADS) return with v2_image.FromRegistry(name, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: save.fast(v2_2_img, args.directory, threads=_THREADS) return
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.name or not args.tarball: raise Exception('--name and --tarball are required arguments.') retry_factory = retry.Factory() retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http) transport = transport_pool.Http(retry_factory.Build, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: logging.info('Reading v2.2 image from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as v2_2_img: logging.info('Starting upload ...') if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: session.upload(oci_img) digest = oci_img.digest() else: session.upload(v2_2_img) digest = v2_2_img.digest() print('{name} was published with digest: {digest}'.format( name=name, digest=digest))
def main(): args = parser.parse_args() if not args.name or not args.directory: raise Exception('--name and --directory are required arguments.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) if '@' in args.name: name = docker_name.Digest(args.name) else: name = docker_name.Tag(args.name) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with v2_2_image.FromRegistry(name, creds, transport) as v2_2_img: if v2_2_img.exists(): save.fast(v2_2_img, args.directory, threads=_THREADS) return with v2_image.FromRegistry(name, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: save.fast(v2_2_img, args.directory, threads=_THREADS) return
def main(): args = parser.parse_args() if not args.src_image or not args.tarball or not args.dst_image: raise Exception('--src-image, --dst-image and --tarball are required ' 'arguments.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. src = docker_name.Tag(args.src_image) dst = docker_name.Tag(args.dst_image) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(src) with v2_2_image.FromRegistry(src, creds, transport) as src_image: with open(args.tarball, 'rb') as f: new_img = append.Layer(src_image, f.read()) creds = docker_creds.DefaultKeychain.Resolve(dst) with docker_session.Push(dst, creds, transport, threads=_THREADS) as session: session.upload(new_img)
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.name: raise Exception('--name is a required arguments.') # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) if not args.config and (args.layer or args.digest): raise Exception( 'Using --layer or --digest requires --config to be specified.') if not args.config and not args.tarball: raise Exception('Either --config or --tarball must be specified.') # If config is specified, use that. Otherwise, fallback on reading # the config from the tarball. config = args.config if args.config: logging.info('Reading config from %r', args.config) with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: logging.info('Reading config from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() if len(args.digest or []) != len(args.layer or []): raise Exception('--digest and --layer must have matching lengths.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: logging.info('Loading v2.2 image from disk ...') with v2_2_image.FromDisk(config, zip(args.digest or [], args.layer or []), legacy_base=args.tarball) as v2_2_img: logging.info('Starting upload ...') if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: session.upload(oci_img) digest = oci_img.digest() else: session.upload(v2_2_img) digest = v2_2_img.digest() print('{name} was published with digest: {digest}'.format( name=name, digest=digest))
def _get_digests(self, repo): name = docker_name.Repository(repo) creds = docker_creds.DefaultKeychain.Resolve(name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(name, creds, transport) as img: digests = [d[len('sha256:'):] for d in img.manifests()] return digests raise AssertionError('Unable to get digests from {0}'.format(repo))
def build(self): """Will be called when the build needs to start""" transport = transport_pool.Http(httplib2.Http) src = docker_name.Tag(self.base_image, strict=False) logger.warn("Building image...") start = timer() new_img = self._build(transport, src) end = timer() logger.warn("Image successfully built in {}s.".format(end-start)) self.timed_push(transport, src, new_img)
def _fetch_lyr_shas(self, img_name): name = docker_name.Tag(img_name) creds = docker_creds.DefaultKeychain.Resolve(name) transport = transport_pool.Http(httplib2.Http, size=_THREADS) with docker_image.FromRegistry(name, creds, transport) as img: lyrs = json.loads(img.manifest())['layers'] lyr_shas = [] for lyr in lyrs: lyr_shas.append(lyr['digest']) return set(lyr_shas)
def _get_tags(self, repo, digest): full_digest = repo + '@sha256:' + digest name = docker_name.Digest(full_digest) creds = docker_creds.DefaultKeychain.Resolve(name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(name, creds, transport) as img: return img.tags() raise AssertionError('Unable to get tags from {0}'.format(full_digest))
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.name or not args.tarball: logging.fatal('--name and --tarball are required arguments.') sys.exit(1) retry_factory = retry.Factory() retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http) transport = transport_pool.Http(retry_factory.Build, size=8) if '@' in args.name: name = docker_name.Digest(args.name) else: name = docker_name.Tag(args.name) # OCI Image Manifest is compatible with Docker Image Manifest Version 2, # Schema 2. We indicate support for both formats by passing both media types # as 'Accept' headers. # # For reference: # OCI: https://github.com/opencontainers/image-spec # Docker: https://docs.docker.com/registry/spec/manifest-v2-2/ accept = docker_http.SUPPORTED_MANIFEST_MIMES # Resolve the appropriate credential to use based on the standard Docker # client logic. try: creds = docker_creds.DefaultKeychain.Resolve(name) # pylint: disable=broad-except except Exception as e: logging.fatal('Error resolving credentials for %s: %s', name, e) sys.exit(1) try: with tarfile.open(name=args.tarball, mode='w') as tar: logging.info('Pulling v2.2 image from %r ...', name) with v2_2_image.FromRegistry(name, creds, transport, accept) as v2_2_img: if v2_2_img.exists(): save.tarball(_make_tag_if_digest(name), v2_2_img, tar) return logging.info('Pulling v2 image from %r ...', name) with v2_image.FromRegistry(name, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: save.tarball(_make_tag_if_digest(name), v2_2_img, tar) return # pylint: disable=broad-except except Exception as e: logging.fatal('Error pulling and saving image %s: %s', name, e) sys.exit(1)
def main(): digest = 'fake.gcr.io/test/test@sha256:' + DIGEST tag = 'fake.gcr.io/test/test:tag' src_name = docker_name.Digest(digest) dest_name = docker_name.Tag(tag) creds = docker_creds.DefaultKeychain.Resolve(src_name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(src_name, creds, transport) as src_img: if src_img.exists(): creds = docker_creds.DefaultKeychain.Resolve(dest_name) with docker_session.Push(dest_name, creds, transport) as push: push.upload(src_img)
def _mk_transport_pool( size=8, disable_ssl_certificate_validation=False, ): # XXX: should cache transport-pools iff image-references refer to same oauth-domain # XXX: pass `disable_ssl_certificate_validation`-arg from calling functions Http_ctor = functools.partial( httplib2.Http, disable_ssl_certificate_validation=disable_ssl_certificate_validation) retry_factory = retry.Factory() retry_factory = retry_factory.WithSourceTransportCallable(Http_ctor) transport = transport_pool.Http(retry_factory.Build, size=size) return transport
def get_digest_from_prefix(self, repo, prefix): name = docker_name.Repository(repo) creds = docker_creds.DefaultKeychain.Resolve(name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(name, creds, transport) as img: digests = [d[len('sha256:'):] for d in img.manifests()] matches = [d for d in digests if d.startswith(prefix)] if len(matches) == 1: return matches[0] if len(matches) == 0: raise AssertionError( '{0} is not a valid prefix'.format(prefix)) raise AssertionError( '{0} is not a unique digest prefix'.format(prefix))
def get_existing_tags(self, full_repo, digest): full_digest = full_repo + '@sha256:' + digest existing_tags = [] name = docker_name.Digest(full_digest) creds = docker_creds.DefaultKeychain.Resolve(name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(name, creds, transport) as img: if img.exists(): existing_tags = img.tags() else: logging.debug("""Unable to get existing tags for {0} as the image can't be found""".format(full_digest)) return existing_tags
def _del_img_from_gcr(self, img_name): img_tag = docker_name.Tag(img_name) creds = docker_creds.DefaultKeychain.Resolve(img_tag) transport = transport_pool.Http(httplib2.Http, size=_THREADS) with docker_image.FromRegistry(img_tag, creds, transport) as base_image: img_digest = docker_name.Digest(''.join( [self._name.split(":")[0], "@", str(base_image.digest())])) logging.info('Deleting tag {0}'.format(img_tag)) docker_session.Delete(img_tag, creds, transport) logging.info('Deleting image {0}'.format(img_digest)) docker_session.Delete(img_digest, creds, transport) return
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) unseen_strings = set() overrides = {} # generate stamp info from workspace_status_files stamp_files = _read_stamp_files(args.stamp_info_file) stamp_info = _generate_stamp_info(stamp_files) # TODO(mattmoor): Execute these in a threadpool and # aggregate the results as they complete. for spec in args.image_spec or []: parts = spec.split(';') kwargs = dict([x.split('=', 2) for x in parts]) try: (tag, published_tag, digest) = Publish(transport, args.image_chroot, stamp_info, **kwargs) overrides[tag] = digest unseen_strings.add(tag) except Exception as e: logging.fatal('Error publishing provided image: %s', e) sys.exit(1) with open(args.template, 'r') as f: inputs = f.read() def _StringToDigest(t): if t in unseen_strings: unseen_strings.remove(t) return StringToDigest(t, overrides, transport) content = Resolve(inputs, _StringToDigest) if len(unseen_strings) > 0 and not args.allow_unused_images: print('ERROR: The following image references were not found in %r:' % args.template, file=sys.stderr) for ref in unseen_strings: print(' %s' % ref, file=sys.stderr) sys.exit(1) print(content)
def main(): args = parser.parse_args() if not args.name: raise Exception('--name is a required arguments.') # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) if not args.config and (args.layer or args.digest): raise Exception( 'Using --layer or --digest requires --config to be specified.') if not args.config and not args.tarball: raise Exception('Either --config or --tarball must be specified.') # If config is specified, use that. Otherwise, fallback on reading # the config from the tarball. config = args.config if args.config: with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() if len(args.digest or []) != len(args.layer or []): raise Exception('--digest and --layer must have matching lengths.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: with v2_2_image.FromDisk(config, zip(args.digest or [], args.layer or []), legacy_base=args.tarball) as v2_2_img: session.upload(v2_2_img) print '%s=%s' % ( name, docker_name.Digest('{repository}@{digest}'.format( repository=name.as_repository(), digest=util.Digest(v2_2_img.manifest()))))
def __init__(self, ctx, cache_namespace, args, descriptor_files): super(RuntimeBase, self).__init__(ctx) self._cache_namespace = cache_namespace if args.entrypoint: args.entrypoint = args.entrypoint.split(" ") if args.sh_c_prefix: args.entrypoint = ['bash', '-c', " ".join(args.entrypoint)] if args.exposed_ports: args.exposed_ports = args.exposed_ports.split(",") args.cache_key_version = "%s %s" % (args.cache_key_version, args.cache_salt) self._args = args self._base_name = docker_name.Tag(self._args.base, strict=False) self._base_creds = docker_creds.DefaultKeychain.Resolve( self._base_name) self._target_image = docker_name.Tag(self._args.name, strict=False) self._target_creds = docker_creds.DefaultKeychain.Resolve( self._target_image) self._transport = transport_pool.Http( httplib2.Http, size=constants.THREADS) if args.tar_base_image_path: self._base_image = docker_image.FromTarball( args.tar_base_image_path) else: self._base_image = docker_image.FromRegistry( self._base_name, self._base_creds, self._transport) self._base_image.__enter__() cache_repo = args.cache_repository if not cache_repo: cache_repo = self._target_image.as_repository() if args.ttl: ttl = args.ttl else: ttl = ftl_util.get_ttl(descriptor_files, ctx) self._cache = cache.Registry( repo=cache_repo, namespace=self._cache_namespace, creds=self._target_creds, transport=self._transport, ttl=ttl, threads=constants.THREADS, mount=[self._base_name], use_global=args.global_cache, should_cache=args.cache, should_upload=args.upload) self._descriptor_files = descriptor_files
def main(args): args = parser.parse_args(args) logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity]) logging.basicConfig( format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # TODO(mattmoor): Support digest base images. base_name = docker_name.Tag(args.base) base_creds = docker_creds.DefaultKeychain.Resolve(base_name) target_image = docker_name.Tag(args.name) target_creds = docker_creds.DefaultKeychain.Resolve(target_image) ctx = context.Workspace(args.directory) cash = cache.Registry(target_image.as_repository(), target_creds, transport, threads=_THREADS, mount=[base_name]) bldr = builder.From(ctx) with docker_image.FromRegistry(base_name, base_creds, transport) as base_image: # Create (or pull from cache) the base image with the # package descriptor installation overlaid. logging.info('Generating dependency layer...') with bldr.CreatePackageBase(base_image, cash, args.cache) as deps: # Construct the application layer from the context. logging.info('Generating app layer...') app_layer, diff_id = bldr.BuildAppLayer() with append.Layer(deps, app_layer, diff_id=diff_id) as app_image: if args.output_path: with tarfile.open(name=args.output_path, mode='w') as tar: save.tarball(target_image, app_image, tar) logging.info("{0} tarball located at {1}".format( str(target_image), args.output_path)) return with docker_session.Push(target_image, target_creds, transport, threads=_THREADS, mount=[base_name]) as session: logging.info('Pushing final image...') session.upload(app_image)
def build(self): """Will be called when the build needs to start""" transport = transport_pool.Http(httplib2.Http) src = docker_name.Tag(self.base_image, strict=False) logger.warning("Building image using Append builder...") start = timer() new_img = self._build(transport, src) end = timer() logger.warning("Image successfully built in {}s.".format(end - start)) dst = docker_name.Tag(self.full_image_name(self.context_hash), strict=False) if self.push: self.timed_push(transport, src, new_img, dst) else: # TODO(r2d4): # Load image into local daemon. This wouldn't provide any speedup # over using the docker daemon directly. pass
def main(root_dir, dst_image): transport = transport_pool.Http(httplib2.Http, size=32) cache_dir = os.path.join(root_dir, '.cache') if not os.path.exists(cache_dir): os.makedirs(cache_dir) base = docker_name.Tag('gcr.io/google-appengine/nodejs:latest') creds = docker_creds.DefaultKeychain.Resolve(base) with docker_image.FromRegistry(base, creds, transport) as base_img: npm = NPM(root_dir, base_img) for i in range(10): start = time.time() dst = docker_name.Tag(dst_image) creds = docker_creds.DefaultKeychain.Resolve(dst) with docker_session.Push(dst, creds, transport, threads=32) as session: img = handle_app(npm, cache_dir) session.upload(img) print time.time() - start
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = docker_name.Tag(args.name) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: with v2_2_image.FromTarball(args.tarball) as v2_2_img: session.upload(v2_2_img)
def add_tags(self, digest, tag, dry_run): if dry_run: logging.debug('Would have tagged {0} with {1}'.format(digest, tag)) return src_name = docker_name.Digest(digest) dest_name = docker_name.Tag(tag) creds = docker_creds.DefaultKeychain.Resolve(src_name) transport = transport_pool.Http(httplib2.Http) with docker_image.FromRegistry(src_name, creds, transport) as src_img: if src_img.exists(): creds = docker_creds.DefaultKeychain.Resolve(dest_name) logging.debug('Tagging {0} with {1}'.format(digest, tag)) with docker_session.Push(dest_name, creds, transport) as push: push.upload(src_img) else: logging.debug("""Unable to tag {0} as the image can't be found""".format(digest))
def main(): args = parser.parse_args() creds = docker_creds.Anonymous() transport = transport_pool.Http(httplib2.Http, size=8) name = docker_name.Tag(args.name) with tarfile.open(name=args.tarball, mode='w') as tar: with v2_2_image.FromRegistry(name, creds, transport) as v2_2_img: if v2_2_img.exists(): with v2_compat.V2FromV22(v2_2_img) as v2_img: with v1_compat.V1FromV2(v2_img) as v1_img: v1_image.save(name, v1_img, tar) return with v2_image.FromRegistry(name, creds, transport) as v2_img: with v1_compat.V1FromV2(v2_img) as v1_img: v1_image.save(name, v1_img, tar) return
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) # TODO(mattmoor): Support digest base images. base_name = docker_name.Tag(args.base) base_creds = docker_creds.DefaultKeychain.Resolve(base_name) target_image = docker_name.Tag(args.name) target_creds = docker_creds.DefaultKeychain.Resolve(target_image) with context.Workspace(args.directory) as ctx: with cache.Registry(target_image.as_repository(), target_creds, transport, threads=_THREADS, mount=[base_name]) as cash: with builder.From(ctx) as bldr: with docker_image.FromRegistry(base_name, base_creds, transport) as base_image: # Create (or pull from cache) the base image with the package # descriptor installation overlaid. with bldr.CreatePackageBase(base_image, cash) as base_with_deps: # Construct the application layer from the context. app_layer = bldr.BuildAppLayer() with append.Layer(base_with_deps, app_layer) as app_image: with docker_session.Push(target_image, target_creds, transport, threads=_THREADS, mount=[base_name ]) as session: session.upload(app_image) print("Hi, One Build.")
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) overrides = {} # TODO(mattmoor): Execute these in a threadpool and # aggregate the results as they complete. for spec in args.image_spec or []: parts = spec.split(';') kwargs = dict([x.split('=', 2) for x in parts]) (tag, digest) = Publish(transport, **kwargs) overrides[tag] = digest with open(args.template, 'r') as f: inputs = f.read() print( _DOCUMENT_DELIMITER.join([ Resolve(x, lambda t: TagToDigest(t, overrides, transport)) for x in inputs.split(_DOCUMENT_DELIMITER) ]))