def _populate_manifest_and_blobs(self): """Populates self._manifest and self._blob_names.""" config_blob = docker_digest.SHA256(self.config_file().encode('utf8')) manifest = { 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'schemaVersion': 2, 'config': { 'digest': config_blob, 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(self.config_file()) }, 'layers': [ # Populated below ] } blob_names = {} for layer in self._layers: content = self._gzipped_content(layer) name = docker_digest.SHA256(content) blob_names[name] = layer manifest['layers'].append({ 'digest': name, # TODO(user): Do we need to sniff the file to detect this? 'mediaType': docker_http.LAYER_MIME, 'size': len(content), }) with self._lock: self._manifest = manifest self._blob_names = blob_names self._config_blob = config_blob
def _populate_manifest_and_blobs(self): """Populates self._manifest and self._blob_names.""" config_blob = docker_digest.SHA256(self.config_file().encode('utf8')) manifest = { 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'schemaVersion': 2, 'config': { 'digest': config_blob, 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(self.config_file()) }, 'layers': [ # Populated below ] } blob_names = {} config = json.loads(self.config_file()) diff_ids = config['rootfs']['diff_ids'] for i, layer in enumerate(self._layers): name = None diff_id = diff_ids[i] media_type = docker_http.LAYER_MIME size = 0 urls = [] if diff_id in self._layer_sources: # _layer_sources contains foreign layers from the base image name = self._layer_sources[diff_id]['digest'] media_type = self._layer_sources[diff_id]['mediaType'] size = self._layer_sources[diff_id]['size'] if 'urls' in self._layer_sources[diff_id]: urls = self._layer_sources[diff_id]['urls'] else: content = self._gzipped_content(layer) name = docker_digest.SHA256(content) size = len(content) blob_names[name] = layer layer_manifest = { 'digest': name, 'mediaType': media_type, 'size': size, } if urls: layer_manifest['urls'] = urls manifest['layers'].append(layer_manifest) with self._lock: self._manifest = manifest self._blob_names = blob_names self._config_blob = config_blob
def _populate_manifest(self): base_layers = [] if self._legacy_base: base_layers = json.loads(self._legacy_base.manifest())['layers'] elif self._foreign_layers_manifest: # Manifest files found in tar files are actually a json list. # This code iterates through that collection and appends any foreign # layers described in the order found in the config file. base_layers += self._get_foreign_layers() # TODO(user): Update mimes here for oci_compat. self._manifest = json.dumps( { 'schemaVersion': 2, 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'config': { 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(self.config_file()), 'digest': docker_digest.SHA256(self.config_file().encode('utf8')) }, 'layers': base_layers + [{ 'mediaType': docker_http.LAYER_MIME, 'size': self.blob_size(digest), 'digest': digest } for digest in self._layers] }, sort_keys=True)
def _ProcessImage(self): """Constructs schema 2 manifest from schema 1 manifest.""" raw_manifest_schema1 = self._v2_image.manifest() manifest_schema1 = json.loads(raw_manifest_schema1) self._config_file = config_file([ json.loads(history.get('v1Compatibility', '{}')) for history in reversed(manifest_schema1.get('history', [])) ], [ self._GetDiffId(digest) for digest in reversed(self._v2_image.fs_layers()) ]) config_bytes = self._config_file.encode('utf8') config_descriptor = { 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(config_bytes), 'digest': docker_digest.SHA256(config_bytes) } manifest_schema2 = { 'schemaVersion': 2, 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'config': config_descriptor, 'layers': [{ 'mediaType': docker_http.LAYER_MIME, 'size': self._v2_image.blob_size(digest), 'digest': digest } for digest in reversed(self._v2_image.fs_layers())] } self._manifest = json.dumps(manifest_schema2, sort_keys=True)
def manifest(self): """The JSON manifest referenced by the tag/digest. Returns: The raw json manifest """ if self._manifest is None: content = self.config_file().encode('utf-8') self._manifest = json.dumps( { 'schemaVersion': 2, 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'config': { 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(content), 'digest': docker_digest.SHA256(content) }, 'layers': [{ 'mediaType': docker_http.LAYER_MIME, 'size': self.blob_size(digest), 'digest': digest } for digest in self._digest_to_blob] }, sort_keys=True) return self._manifest
def _populate_manifest(self): base_layers = [] if self._legacy_base: base_layers = json.loads(self._legacy_base.manifest())['layers'] # TODO(user): Update mimes here for oci_compat. self._manifest = json.dumps( { 'schemaVersion': 2, 'mediaType': docker_http.MANIFEST_SCHEMA2_MIME, 'config': { 'mediaType': docker_http.CONFIG_JSON_MIME, 'size': len(self.config_file()), 'digest': docker_digest.SHA256(self.config_file().encode('utf8')) }, 'layers': base_layers + [{ 'mediaType': docker_http.LAYER_MIME, 'size': self.blob_size(digest), 'digest': digest } for digest in self._layers] }, sort_keys=True)
def _diff_id(v1_img, blob): try: unzipped = v1_img.uncompressed_layer(blob) return docker_digest.SHA256(unzipped) except IOError: # For foreign layers, we do not have the layer.tar return v1_img.diff_id(blob)
def __init__(self, base, tar_gz, diff_id = None, overrides = None): """Creates a new layer on top of a base with optional tar.gz. Args: base: a base DockerImage for a new layer. tar_gz: an optional gzipped tarball passed as a bytes with filesystem changeset. diff_id: an optional string containing the digest of the uncompressed tar_gz. overrides: an optional metadata.Overrides object of properties to override on the base image. """ self._base = base manifest = json.loads(self._base.manifest()) config_file = json.loads(self._base.config_file()) overrides = overrides or metadata.Overrides() overrides = overrides.Override(created_by=docker_name.USER_AGENT) if tar_gz: self._blob = tar_gz self._blob_sum = docker_digest.SHA256(self._blob) manifest['layers'].append({ 'digest': self._blob_sum, 'mediaType': docker_http.LAYER_MIME, 'size': len(self._blob), }) if not diff_id: diff_id = docker_digest.SHA256(self.uncompressed_blob(self._blob_sum)) # Takes naked hex. overrides = overrides.Override(layers=[diff_id[len('sha256:'):]]) else: # The empty layer. overrides = overrides.Override(layers=[docker_digest.SHA256(b'', '')]) config_file = metadata.Override(config_file, overrides) self._config_file = json.dumps(config_file, sort_keys=True) utf8_encoded_config = self._config_file.encode('utf8') manifest['config']['digest'] = docker_digest.SHA256(utf8_encoded_config) manifest['config']['size'] = len(utf8_encoded_config) self._manifest = json.dumps(manifest, sort_keys=True)
def _diff_id(v1_img, blob): diff_id = v1_img.diff_id(blob) if not diff_id: unzipped = v1_img.uncompressed_layer(blob) diff_id = docker_digest.SHA256(unzipped) return diff_id
def _gen_digest_to_blob_and_u_blob(self, blob_lst, u_layer_lst): digest_to_blob = {} digest_to_u_blob = {} for blob, u_layer in zip(blob_lst, u_layer_lst): digest = docker_digest.SHA256(blob) digest_to_blob[digest] = blob digest_to_u_blob[digest] = u_layer return digest_to_blob, digest_to_u_blob
def blob(self, digest): """Override.""" # GET server1/v2/<name>/blobs/<digest> c = self._content('blobs/' + digest, cache=False) computed = docker_digest.SHA256(c) if digest != computed: raise DigestMismatchedError( 'The returned content\'s digest did not match its content-address, ' '%s vs. %s' % (digest, computed if c else '(content was empty)')) return c
def _GenerateV1LayerId(self, digest, parent, raw_config=None): parts = digest.rsplit(':', 1) if len(parts) != 2: raise BadDigestException('Invalid Digest: %s, must be in ' 'algorithm : blobSumHex format.' % (digest)) data = parts[1] + ' ' + parent if raw_config: data += ' ' + raw_config return docker_digest.SHA256(data.encode('utf8'), '')
def manifest(self, validate=True): """Override.""" # GET server1/v2/<name>/manifests/<tag_or_digest> if isinstance(self._name, docker_name.Tag): return self._content('manifests/' + self._name.tag, self._accepted_mimes) else: assert isinstance(self._name, docker_name.Digest) c = self._content('manifests/' + self._name.digest, self._accepted_mimes) computed = docker_digest.SHA256(c) if validate and computed != self._name.digest: raise DigestMismatchedError( 'The returned manifest\'s digest did not match requested digest, ' '%s vs. %s' % (self._name.digest, computed)) return c
def digest(self): """The digest of the manifest.""" return docker_digest.SHA256(self.manifest().encode('utf8'))
def _diff_id(v1_img, blob): unzipped = v1_img.uncompressed_layer(blob) return docker_digest.SHA256(unzipped)
def GetCacheKey(self): return docker_digest.SHA256(self.GetCacheKeyRaw())
def digest(self): """The digest of the manifest.""" return docker_digest.SHA256(self.manifest())
def _gen_diff_id_to_u_layer(self, u_layer_lst): diff_id_to_u_layer = {} for u_layer in u_layer_lst: diff_id_to_u_layer[docker_digest.SHA256(u_layer)] = u_layer return diff_id_to_u_layer
def multi_image_tarball(tag_to_image, tar, tag_to_v1_image=None): """Produce a "docker save" compatible tarball from the DockerImages. Args: tag_to_image: A dictionary of tags to the images they label. tar: the open tarfile into which we are writing the image tarball. tag_to_v1_image: A dictionary of tags to the v1 form of the images they label. If this isn't provided, the image is simply converted. """ def add_file(filename, contents): contents_bytes = contents.encode('utf8') info = tarfile.TarInfo(filename) info.size = len(contents_bytes) tar.addfile(tarinfo=info, fileobj=io.BytesIO(contents_bytes)) tag_to_v1_image = tag_to_v1_image or {} # The manifest.json file contains a list of the images to load # and how to tag them. Each entry consists of three fields: # - Config: the name of the image's config_file() within the # saved tarball. # - Layers: the list of filenames for the blobs constituting # this image. The order is the reverse of the v1 # ancestry ordering. # - RepoTags: the list of tags to apply to this image once it # is loaded. manifests = [] for (tag, image) in six.iteritems(tag_to_image): # The config file is stored in a blob file named with its digest. digest = docker_digest.SHA256(image.config_file().encode('utf8'), '') add_file(digest + '.json', image.config_file()) cfg = json.loads(image.config_file()) diffs = set(cfg.get('rootfs', {}).get('diff_ids', [])) v1_img = tag_to_v1_image.get(tag) if not v1_img: v2_img = v2_compat.V2FromV22(image) v1_img = v1_compat.V1FromV2(v2_img) tag_to_v1_image[tag] = v1_img # Add the manifests entry for this image. manifest = { 'Config': digest + '.json', 'Layers': [ layer_id + '/layer.tar' # We don't just exclude the empty tar because we leave its diff_id # in the set when coming through v2_compat.V22FromV2 for layer_id in reversed(v1_img.ancestry(v1_img.top())) if _diff_id(v1_img, layer_id) in diffs and not json.loads(v1_img.json(layer_id)).get('throwaway') ], 'RepoTags': [str(tag)] } layer_sources = {} input_manifest = json.loads(image.manifest()) input_layers = input_manifest['layers'] for input_layer in input_layers: if input_layer['mediaType'] == docker_http.FOREIGN_LAYER_MIME: diff_id = image.digest_to_diff_id(input_layer['digest']) layer_sources[diff_id] = input_layer if layer_sources: manifest['LayerSources'] = layer_sources manifests.append(manifest) # v2.2 tarballs are a superset of v1 tarballs, so delegate # to v1 to save itself. v1_save.multi_image_tarball(tag_to_v1_image, tar) add_file('manifest.json', json.dumps(manifests, sort_keys=True))
def _diff_id(v1_img, blob): try: return v1_img.diff_id(blob) except ValueError: unzipped = v1_img.uncompressed_layer(blob) return docker_digest.SHA256(unzipped)
def valid(cached_layer, digest): with io.open(cached_layer, u'rb') as f: current_digest = docker_digest.SHA256(f.read(), '') return current_digest == digest
def _GetDiffId(self, digest): """Hash the uncompressed layer blob.""" return docker_digest.SHA256(self._v2_image.uncompressed_blob(digest))