def __init__(self, ctx, cache_namespace, args, descriptor_files): super(RuntimeBase, self).__init__(ctx) self._cache_namespace = cache_namespace if args.entrypoint: args.entrypoint = args.entrypoint.split(" ") if args.sh_c_prefix: args.entrypoint = ['bash', '-c', " ".join(args.entrypoint)] if args.exposed_ports: args.exposed_ports = args.exposed_ports.split(",") args.cache_key_version = "%s %s" % (args.cache_key_version, args.cache_salt) self._args = args self._base_name = docker_name.Tag(self._args.base, strict=False) self._base_creds = docker_creds.DefaultKeychain.Resolve( self._base_name) self._target_image = docker_name.Tag(self._args.name, strict=False) self._target_creds = docker_creds.DefaultKeychain.Resolve( self._target_image) self._transport = transport_pool.Http( httplib2.Http, size=constants.THREADS) if args.tar_base_image_path: self._base_image = docker_image.FromTarball( args.tar_base_image_path) else: self._base_image = docker_image.FromRegistry( self._base_name, self._base_creds, self._transport) self._base_image.__enter__() cache_repo = args.cache_repository if not cache_repo: cache_repo = self._target_image.as_repository() if args.ttl: ttl = args.ttl else: ttl = ftl_util.get_ttl(descriptor_files, ctx) self._cache = cache.Registry( repo=cache_repo, namespace=self._cache_namespace, creds=self._target_creds, transport=self._transport, ttl=ttl, threads=constants.THREADS, mount=[self._base_name], use_global=args.global_cache, should_cache=args.cache, should_upload=args.upload) self._descriptor_files = descriptor_files
def main(args): args = parser.parse_args(args) logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity]) logging.basicConfig( format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # TODO(mattmoor): Support digest base images. base_name = docker_name.Tag(args.base) base_creds = docker_creds.DefaultKeychain.Resolve(base_name) target_image = docker_name.Tag(args.name) target_creds = docker_creds.DefaultKeychain.Resolve(target_image) ctx = context.Workspace(args.directory) cash = cache.Registry(target_image.as_repository(), target_creds, transport, threads=_THREADS, mount=[base_name]) bldr = builder.From(ctx) with docker_image.FromRegistry(base_name, base_creds, transport) as base_image: # Create (or pull from cache) the base image with the # package descriptor installation overlaid. logging.info('Generating dependency layer...') with bldr.CreatePackageBase(base_image, cash, args.cache) as deps: # Construct the application layer from the context. logging.info('Generating app layer...') app_layer, diff_id = bldr.BuildAppLayer() with append.Layer(deps, app_layer, diff_id=diff_id) as app_image: if args.output_path: with tarfile.open(name=args.output_path, mode='w') as tar: save.tarball(target_image, app_image, tar) logging.info("{0} tarball located at {1}".format( str(target_image), args.output_path)) return with docker_session.Push(target_image, target_creds, transport, threads=_THREADS, mount=[base_name]) as session: logging.info('Pushing final image...') session.upload(app_image)
def __init__(self, ctx, namespace, args, cache_version_str, descriptor_files): super(RuntimeBase, self).__init__(ctx) self._namespace = namespace if args.entrypoint: args.entrypoint = args.entrypoint.split(" ") if args.exposed_ports: args.exposed_ports = args.exposed_ports.split(",") self._args = args self._base_name = docker_name.Tag(self._args.base, strict=False) self._base_creds = docker_creds.DefaultKeychain.Resolve( self._base_name) self._target_image = docker_name.Tag(self._args.name, strict=False) self._target_creds = docker_creds.DefaultKeychain.Resolve( self._target_image) self._transport = transport_pool.Http(httplib2.Http, size=_THREADS) if args.tar_base_image_path: self._base_image = docker_image.FromTarball( args.tar_base_image_path) else: self._base_image = docker_image.FromRegistry( self._base_name, self._base_creds, self._transport) self._base_image.__enter__() cache_repo = args.cache_repository if not cache_repo: cache_repo = self._target_image.as_repository() self._cache = cache.Registry(repo=cache_repo, namespace=self._namespace, base_image=self._base_image, creds=self._target_creds, transport=self._transport, cache_version=cache_version_str, threads=_THREADS, mount=[self._base_name], use_global=args.global_cache, should_cache=args.cache, should_upload=args.upload) self._descriptor_files = descriptor_files
def main(args): args = parser.parse_args(args) logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity]) transport = transport_pool.Http(httplib2.Http, size=_THREADS) # TODO(mattmoor): Support digest base images. base_name = docker_name.Tag(args.base) base_creds = docker_creds.DefaultKeychain.Resolve(base_name) target_image = docker_name.Tag(args.name) target_creds = docker_creds.DefaultKeychain.Resolve(target_image) ctx = context.Workspace(args.directory) cash = cache.Registry(target_image.as_repository(), target_creds, transport, threads=_THREADS, mount=[base_name]) bldr = builder.From(ctx) with docker_image.FromRegistry(base_name, base_creds, transport) as base_image: # Create (or pull from cache) the base image with the # package descriptor installation overlaid. logging.info('Generating dependency layer...') with bldr.CreatePackageBase(base_image, cash) as deps: # Construct the application layer from the context. logging.info('Generating app layer...') app_layer, diff_id = bldr.BuildAppLayer() with append.Layer(deps, app_layer, diff_id=diff_id) as app_image: with docker_session.Push(target_image, target_creds, transport, threads=_THREADS, mount=[base_name]) as session: logging.info('Pushing final image...') session.upload(app_image)