def test_windows_image_manifest_with_foreign_layers(self): imgPath = TestRunfilePath( "tests/container/basic_windows_image_go_join_layers.tar") with v2_2_image.FromTarball(imgPath) as img: # Ensure the image manifest in the tarball includes the foreign layer. self.assertIn("https://go.microsoft.com/fwlink/?linkid=873595", img.manifest())
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) # If config is specified, use that. Otherwise, fall back on reading # the config from the tarball. if args.config: logging.info('Reading config from %r', args.config) with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: logging.info('Reading config from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() else: config = args.config layers = list(zip(args.digest or [], args.layer or [])) uncompressed_layers = list( zip(args.diff_id or [], args.uncompressed_layer or [])) logging.info('Loading v2.2 image From Disk ...') with v2_2_image.FromDisk(config_file=config, layers=layers, uncompressed_layers=uncompressed_layers, legacy_base=args.tarball) as v2_2_img: with tarfile.open(args.filesystem, 'w') as tar: v2_2_image.extract(v2_2_img, tar) with open(args.metadata, 'w') as f: f.write(v2_2_img.config_file())
def _push_image(image_reference: str, image_file: str, threads=8): import ci.util ci.util.not_none(image_reference) ci.util.existing_file(image_file) transport = _mk_transport() image_reference = normalise_image_reference(image_reference) image_reference = _parse_image_reference(image_reference) creds = _mk_credentials( image_reference=image_reference, privileges=Privileges.READ_WRITE, ) with v2_2_image.FromTarball(image_file) as v2_2_img: try: with docker_session.Push( image_reference, creds, transport, threads=threads, ) as session: session.upload(v2_2_img) digest = v2_2_img.digest() logger.info( f'{image_reference} was uploaded - digest: {digest}') except Exception as e: import traceback traceback.print_exc() raise e
def test_publish_fast_stamping(self): td = TestData( 'io_bazel_rules_k8s/examples/hellogrpc/cc/server/server.tar') # name = docker_name.Tag('fake.gcr.io/foo/bar:{STABLE_GIT_COMMIT}') name = "fake.gcr.io/foo/bar:{STABLE_GIT_COMMIT}" stamp_info = {"STABLE_GIT_COMMIT": "9428a3b3"} expected_tag = 'fake.gcr.io/foo/bar:9428a3b3' with v2_2_image.FromTarball(td) as img: (config_path, layer_data) = save.fast(img, self._tmpdir, threads=16) expected_digest = img.digest() print(expected_digest) with mock.patch.object(v2_2_session, 'Push', return_value=NopPush()): (tag, published_tag, digest) = resolver.Publish( _BAD_TRANSPORT, None, stamp_info, name=name, config=config_path, digest=','.join([h for (h, unused) in layer_data]), layer=','.join([layer for (unused, layer) in layer_data])) self.assertEqual(tag, name) self.assertEqual(published_tag, expected_tag) self.assertEqual(digest.digest, expected_digest)
def main(): args = parser.parse_args() base_json = '{}' if args.base: with v2_2_image.FromTarball(args.base) as v2_2_img: base_json = v2_2_img.config_file() data = json.loads(base_json) layers = [] for layer in args.layer: layers.append(utils.ExtractValue(layer)) labels = KeyValueToDict(args.labels) for label, value in labels.iteritems(): if value.startswith('@'): with open(value[1:], 'r') as f: labels[label] = f.read() output = v2_2_metadata.Override(data, v2_2_metadata.Overrides( author='Bazel', created_by='bazel build ...', layers=layers, entrypoint=args.entrypoint, cmd=args.command, user=args.user, labels=labels, env=KeyValueToDict(args.env), ports=args.ports, volumes=args.volumes, workdir=args.workdir), architecture=_PROCESSOR_ARCHITECTURE, operating_system=_OPERATING_SYSTEM) with open(args.output, 'w') as fp: json.dump(output, fp, sort_keys=True) fp.write('\n')
def main(): args = parser.parse_args() if not args.config and (args.layer or args.digest): raise Exception( 'Using --layer or --digest requires --config to be specified.') if not args.filesystem or not args.metadata: raise Exception('--filesystem and --metadata are required flags.') if not args.config and not args.tarball: raise Exception('Either --config or --tarball must be specified.') # If config is specified, use that. Otherwise, fall back on reading # the config from the tarball. if args.config: with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() else: config = args.config if len(args.digest or []) != len(args.layer or []): raise Exception('--digest and --layer must have matching lengths.') with v2_2_image.FromDisk(config, zip(args.digest or [], args.layer or []), legacy_base=args.tarball) as v2_2_img: with tarfile.open(args.filesystem, 'w') as tar: v2_2_image.extract(v2_2_img, tar) with open(args.metadata, 'w') as f: f.write(v2_2_img.config_file())
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.name or not args.tarball: raise Exception('--name and --tarball are required arguments.') retry_factory = retry.Factory() retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http) transport = transport_pool.Http(retry_factory.Build, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: logging.info('Reading v2.2 image from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as v2_2_img: logging.info('Starting upload ...') if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: session.upload(oci_img) digest = oci_img.digest() else: session.upload(v2_2_img) digest = v2_2_img.digest() print('{name} was published with digest: {digest}'.format( name=name, digest=digest))
def test_img_exists(self): with docker_image.FromTarball('testing/lib/test.tar') as img: self.registry.setImage('gcr.io/foobar/baz', img) with docker_image.FromRegistry('gcr.io/foobar/baz') as img: self.assertTrue(img.exists()) with docker_image.FromRegistry('does_not_exist') as img: self.assertFalse(img.exists())
def main(): args = parser.parse_args() if not args.tarball or not args.directory: raise Exception('--tarball and --directory are required arguments.') with v2_2_image.FromTarball(args.tarball) as v2_2_img: save.fast(v2_2_img, args.directory, threads=_THREADS)
def main(): args = parser.parse_args() with docker_image.FromTarball(args.tarball) as img: with open(args.output, 'w') as f: f.write(img.config_file()) with open(args.manifestoutput, 'w') as f: f.write(img.manifest())
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.name: raise Exception('--name is a required arguments.') # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) if not args.config and (args.layer or args.digest): raise Exception( 'Using --layer or --digest requires --config to be specified.') if not args.config and not args.tarball: raise Exception('Either --config or --tarball must be specified.') # If config is specified, use that. Otherwise, fallback on reading # the config from the tarball. config = args.config if args.config: logging.info('Reading config from %r', args.config) with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: logging.info('Reading config from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() if len(args.digest or []) != len(args.layer or []): raise Exception('--digest and --layer must have matching lengths.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: logging.info('Loading v2.2 image from disk ...') with v2_2_image.FromDisk(config, zip(args.digest or [], args.layer or []), legacy_base=args.tarball) as v2_2_img: logging.info('Starting upload ...') if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: session.upload(oci_img) digest = oci_img.digest() else: session.upload(v2_2_img) digest = v2_2_img.digest() print('{name} was published with digest: {digest}'.format( name=name, digest=digest))
def Publish(transport, image_chroot, name=None, tarball=None, config=None, digest=None, layer=None): if not name: raise Exception('Expected "name" kwarg') if not config and (layer or digest): raise Exception( name + ': Using "layer" or "digest" requires "config" to be specified.') if config: with open(config, 'r') as reader: config = reader.read() elif tarball: with v2_2_image.FromTarball(tarball) as base: config = base.config_file() else: raise Exception(name + ': Either "config" or "tarball" must be specified.') if digest or layer: digest = digest.split(',') layer = layer.split(',') if len(digest) != len(layer): raise Exception( name + ': "digest" and "layer" must have matching lengths.') else: digest = [] layer = [] name_to_replace = name if image_chroot: name_to_publish = docker_name.Tag(os.path.join(image_chroot, name), strict=False) else: # Without a chroot, the left-hand-side must be a valid tag. name_to_publish = docker_name.Tag(name, strict=False) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name_to_publish) with v2_2_session.Push(name_to_publish, creds, transport, threads=_THREADS) as session: with v2_2_image.FromDisk(config, zip(digest or [], layer or []), legacy_base=tarball) as v2_2_img: session.upload(v2_2_img) return (name_to_replace, docker_name.Digest('{repository}@{digest}'.format( repository=name_to_publish.as_repository(), digest=v2_2_img.digest())))
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.config and (args.layer or args.digest): logging.fatal( 'Using --layer or --digest requires --config to be specified.') sys.exit(1) if not args.config and not args.tarball: logging.fatal('Either --config or --tarball must be specified.') sys.exit(1) # If config is specified, use that. Otherwise, fallback on reading # the config from the tarball. config = args.config manifest = args.manifest if args.config: logging.info('Reading config from %r', args.config) with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: logging.info('Reading config from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() if args.manifest: with open(args.manifest, 'r') as reader: manifest = reader.read() if len(args.digest or []) != len(args.layer or []): logging.fatal('--digest and --layer must have matching lengths.') sys.exit(1) logging.info('Loading v2.2 image from disk ...') with v2_2_image.FromDisk(config, list(zip(args.digest or [], args.layer or [])), legacy_base=args.tarball, foreign_layers_manifest=manifest) as v2_2_img: try: if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: digest = oci_img.digest() else: digest = v2_2_img.digest() with open(args.output_digest, 'w+') as digest_file: digest_file.write(digest) # pylint: disable=broad-except except Exception as e: logging.fatal('Error getting digest: %s', e) sys.exit(1)
def test_push(self): with docker_image.FromTarball('testing/lib/test.tar') as img: self.registry.setImage('gcr.io/foo/bar:latest', img) with docker_image.FromRegistry('gcr.io/foo/bar:latest') as img: with docker_session.Push('gcr.io/foo/bar:testing', "", None) as push: push.upload(img) self.AssertPushed(self.registry, 'gcr.io/foo/bar:testing')
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) if not args.tarball or not args.directory: raise Exception('--tarball and --directory are required arguments.') logging.info('Reading v2.2 image from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as v2_2_img: save.uncompressed(v2_2_img, args.directory, threads=_THREADS)
def test_from_registry_and_push(self): with docker_image.FromTarball('testing/lib/test.tar') as img: self.registry.setImage('gcr.io/foobar/baz', img) with docker_image.FromRegistry('gcr.io/foobar/baz') as img: with docker_session.Push('gcr.io/foo/bar:testing', "", None) as push: push.upload(img) with docker_image.FromRegistry('gcr.io/foo/bar:testing') as img: self.assertTrue(img.exists())
def main(): args = parser.parse_args() with docker_image.FromTarball(args.tarball) as v2_2_img: with open(args.output_id, 'w') as f: f.write(hashlib.sha256(v2_2_img.config_file()).hexdigest()) with v2_compat.V2FromV22(v2_2_img) as v2_img: with v1_compat.V1FromV2(v2_img) as v1_img: with open(args.output_name, 'w') as f: f.write(v1_img.top())
def main(): logging_setup.DefineCommandLineArgs(parser) args = parser.parse_args() logging_setup.Init(args=args) method = save.uncompressed if args.format == 'tar.gz': method = save.fast logging.info('Reading v2.2 image from tarball %r', args.tarball) with v2_2_image.FromTarball(args.tarball) as v2_2_img: method(v2_2_img, args.directory, threads=_THREADS)
def test_publish_legacy(self): td = TestData( 'io_bazel_rules_k8s/examples/hellogrpc/cc/server/server.tar') name = docker_name.Tag('fake.gcr.io/foo/bar:baz') with mock.patch.object(v2_2_session, 'Push', return_value=NopPush()): (tag, digest) = resolver.Publish(_BAD_TRANSPORT, name=str(name), tarball=td) self.assertEqual(tag, name) with v2_2_image.FromTarball(td) as img: self.assertEqual(digest.digest, img.digest())
def Get(self, base_image, namespace, checksum): entry = self._tag(base_image, namespace, checksum) if self._registry.existsImage(entry): return self._registry.getImage(entry) tarball = os.path.join(self._directory, str(entry)) if os.path.isfile(tarball): logging.info('Found cached base image: %s.' % entry) self._registry.setImage(entry, docker_image.FromTarball(tarball)) return self._registry.getImage(entry) logging.info('No cached base image found for entry: %s.' % entry) self._cache_miss += 1 return None
def test_tag_to_digest_not_cached(self): with v2_2_image.FromTarball( TestData( 'io_bazel_rules_k8s/examples/hellogrpc/cc/server/server.tar' )) as img: # Add a fake exists method to look like FromRegistry img.exists = lambda: True with mock.patch.object(v2_2_image, 'FromRegistry', return_value=img): tag = docker_name.Tag('gcr.io/foo/bar:baz') expected_digest = docker_name.Digest('gcr.io/foo/bar@' + img.digest()) actual_digest = resolver.TagToDigest(tag, {}, _BAD_TRANSPORT) self.assertEqual(actual_digest, str(expected_digest))
def main(): args = parser.parse_args() if not args.name: raise Exception('--name is a required arguments.') # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = Tag(args.name, args.stamp_info_file) if not args.config and (args.layer or args.digest): raise Exception( 'Using --layer or --digest requires --config to be specified.') if not args.config and not args.tarball: raise Exception('Either --config or --tarball must be specified.') # If config is specified, use that. Otherwise, fallback on reading # the config from the tarball. config = args.config if args.config: with open(args.config, 'r') as reader: config = reader.read() elif args.tarball: with v2_2_image.FromTarball(args.tarball) as base: config = base.config_file() if len(args.digest or []) != len(args.layer or []): raise Exception('--digest and --layer must have matching lengths.') transport = transport_pool.Http(httplib2.Http, size=_THREADS) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: with v2_2_image.FromDisk(config, zip(args.digest or [], args.layer or []), legacy_base=args.tarball) as v2_2_img: session.upload(v2_2_img) print '%s=%s' % ( name, docker_name.Digest('{repository}@{digest}'.format( repository=name.as_repository(), digest=util.Digest(v2_2_img.manifest()))))
def __init__(self, ctx, cache_namespace, args, descriptor_files): super(RuntimeBase, self).__init__(ctx) self._cache_namespace = cache_namespace if args.entrypoint: args.entrypoint = args.entrypoint.split(" ") if args.sh_c_prefix: args.entrypoint = ['bash', '-c', " ".join(args.entrypoint)] if args.exposed_ports: args.exposed_ports = args.exposed_ports.split(",") args.cache_key_version = "%s %s" % (args.cache_key_version, args.cache_salt) self._args = args self._base_name = docker_name.Tag(self._args.base, strict=False) self._base_creds = docker_creds.DefaultKeychain.Resolve( self._base_name) self._target_image = docker_name.Tag(self._args.name, strict=False) self._target_creds = docker_creds.DefaultKeychain.Resolve( self._target_image) self._transport = transport_pool.Http( httplib2.Http, size=constants.THREADS) if args.tar_base_image_path: self._base_image = docker_image.FromTarball( args.tar_base_image_path) else: self._base_image = docker_image.FromRegistry( self._base_name, self._base_creds, self._transport) self._base_image.__enter__() cache_repo = args.cache_repository if not cache_repo: cache_repo = self._target_image.as_repository() if args.ttl: ttl = args.ttl else: ttl = ftl_util.get_ttl(descriptor_files, ctx) self._cache = cache.Registry( repo=cache_repo, namespace=self._cache_namespace, creds=self._target_creds, transport=self._transport, ttl=ttl, threads=constants.THREADS, mount=[self._base_name], use_global=args.global_cache, should_cache=args.cache, should_upload=args.upload) self._descriptor_files = descriptor_files
def main(): args = parser.parse_args() transport = transport_pool.Http(httplib2.Http, size=_THREADS) # This library can support push-by-digest, but the likelihood of a user # correctly providing us with the digest without using this library # directly is essentially nil. name = docker_name.Tag(args.name) # Resolve the appropriate credential to use based on the standard Docker # client logic. creds = docker_creds.DefaultKeychain.Resolve(name) with docker_session.Push(name, creds, transport, threads=_THREADS) as session: with v2_2_image.FromTarball(args.tarball) as v2_2_img: session.upload(v2_2_img)
def test_publish_fast(self): td = TestData( 'io_bazel_rules_k8s/examples/hellogrpc/cc/server/server.tar') name = docker_name.Tag('fake.gcr.io/foo/bar:baz') with v2_2_image.FromTarball(td) as img: (config_path, layer_data) = save.fast(img, self._tmpdir, threads=16) expected_digest = img.digest() with mock.patch.object(v2_2_session, 'Push', return_value=NopPush()): (tag, digest) = resolver.Publish( _BAD_TRANSPORT, name=str(name), config=config_path, digest=','.join([h for (h, unused) in layer_data]), layer=','.join([layer for (unused, layer) in layer_data])) self.assertEqual(tag, name) self.assertEqual(digest.digest, expected_digest)
def test_main(self, append_layer_mock): with docker_image.FromTarball('ftl/node_builder_base.tar') as img: self.registry.setImage('fake.gcr.io/base/image:initial', img) with docker_image.FromRegistry( 'fake.gcr.io/base/image:initial') as img: self.registry.setImage('fake.gcr.io/base/image:appended', img) self.AssertPushed(self.registry, 'fake.gcr.io/base/image:initial') self.AssertPushed(self.registry, 'fake.gcr.io/base/image:appended') append_layer_mock.return_value = self.registry.getImage( 'fake.gcr.io/base/image:appended') args = [ "--base=fake.gcr.io/base/image:initial", "--name=fake.gcr.io/base/image:latest", "--directory= " ] main.main(args) self.AssertPushed(self.registry, 'fake.gcr.io/base/image:latest')
def _push_image(image_reference: str, image_file: str, credentials_lookup: typing.Callable[ [image_reference, oa.Privileges, bool], oa.OciConfig], threads=8): if not image_reference: raise ValueError(image_reference) if not os.path.isfile(image_file): raise ValueError(f'not an exiting file: {image_file=}') transport = _mk_transport_pool() image_reference = ou.normalise_image_reference(image_reference) image_reference = docker_name.from_string(image_reference) creds = _mk_credentials( image_reference=image_reference, credentials_lookup=credentials_lookup, privileges=oa.Privileges.READWRITE, ) # XXX fail if no creds were found with v2_2_image.FromTarball(image_file) as v2_2_img: try: with docker_session.Push( image_reference, creds, transport, threads=threads, ) as session: session.upload(v2_2_img) digest = v2_2_img.digest() logger.info( f'{image_reference} was uploaded - digest: {digest}') except Exception as e: import traceback traceback.print_exc() raise e
def __init__(self, ctx, namespace, args, cache_version_str, descriptor_files): super(RuntimeBase, self).__init__(ctx) self._namespace = namespace if args.entrypoint: args.entrypoint = args.entrypoint.split(" ") if args.exposed_ports: args.exposed_ports = args.exposed_ports.split(",") self._args = args self._base_name = docker_name.Tag(self._args.base, strict=False) self._base_creds = docker_creds.DefaultKeychain.Resolve( self._base_name) self._target_image = docker_name.Tag(self._args.name, strict=False) self._target_creds = docker_creds.DefaultKeychain.Resolve( self._target_image) self._transport = transport_pool.Http(httplib2.Http, size=_THREADS) if args.tar_base_image_path: self._base_image = docker_image.FromTarball( args.tar_base_image_path) else: self._base_image = docker_image.FromRegistry( self._base_name, self._base_creds, self._transport) self._base_image.__enter__() cache_repo = args.cache_repository if not cache_repo: cache_repo = self._target_image.as_repository() self._cache = cache.Registry(repo=cache_repo, namespace=self._namespace, base_image=self._base_image, creds=self._target_creds, transport=self._transport, cache_version=cache_version_str, threads=_THREADS, mount=[self._base_name], use_global=args.global_cache, should_cache=args.cache, should_upload=args.upload) self._descriptor_files = descriptor_files
def _push_image(image_reference: str, image_file: str, threads=8): import util util.not_none(image_reference) util.existing_file(image_file) transport = _mk_transport() image_reference = normalise_image_reference(image_reference) name = _parse_image_reference(image_reference) try: # first try container_registry cfgs from available cfg creds = _credentials(image_reference=image_reference, privileges=Privileges.READ_WRITE) if not creds: print('could not find rw-creds') # fall-back to default docker lookup creds = docker_creds.DefaultKeychain.Resolve(name) except Exception as e: util.fail('Error resolving credentials for {name}: {e}'.format( name=name, e=e)) with v2_2_image.FromTarball(image_file) as v2_2_img: try: with docker_session.Push( name, creds, transport, threads=threads, ) as session: session.upload(v2_2_img) digest = v2_2_img.digest() print(f'{name} was uploaded - digest: {digest}') except Exception as e: import traceback traceback.print_exc() raise e
def TestBundleImage(name, image_name): return v2_2_image.FromTarball( TestData(name + '.tar'), name=docker_name.Tag(image_name, strict=False))