def upload(self, image): """Upload the layers of the given image. Args: image: the image to upload. """ # If the manifest (by digest) exists, then avoid N layer existence # checks (they must exist). if self._manifest_exists(image): if isinstance(self._name, docker_name.Tag): manifest_digest = util.Digest(image.manifest()) if self._remote_tag_digest() == manifest_digest: logging.info('Tag points to the right manifest, skipping push.') return logging.info('Manifest exists, skipping blob uploads and pushing tag.') else: logging.info('Manifest exists, skipping upload.') elif self._threads == 1: for digest in image.blob_set(): self._upload_one(image, digest) else: with concurrent.futures.ThreadPoolExecutor( max_workers=self._threads) as executor: future_to_params = { executor.submit(self._upload_one, image, digest): (image, digest) for digest in image.blob_set()} for future in concurrent.futures.as_completed(future_to_params): future.result() # This should complete the upload by uploading the manifest. self._put_manifest(image)
def ResolveV2Tag(tag): with v2_image.FromRegistry( basic_creds=CredentialProvider(), name=tag, transport=http.Http()) as v2_img: if v2_img.exists(): return v2_util.Digest(v2_img.manifest()) return None
def walk_string(s): try: as_tag = docker_name.Tag(s) if as_tag in overrides: return str(overrides[as_tag]) # Resolve the tag to digest using the standard # Docker keychain logic. creds = docker_creds.DefaultKeychain.Resolve(as_tag) with v2_2_image.FromRegistry(as_tag, creds, transport) as img: if img.exists(): digest = str( docker_name.Digest('{repository}@{digest}'.format( repository=as_tag.as_repository(), digest=util.Digest(img.manifest())))) else: # If the tag doesn't exists as v2.2, then try as v2. with v2_image.FromRegistry(as_tag, creds, transport) as img: digest = str( docker_name.Digest('{repository}@{digest}'.format( repository=as_tag.as_repository(), digest=v2_util.Digest(img.manifest())))) # Make sure we consistently resolve all instances of a tag, # since it is technically possible to have a race here. overrides[as_tag] = digest return digest except: return s
def _manifest_exists(self, image): """Check the remote for the given manifest by digest.""" manifest_digest = util.Digest(image.manifest()) # GET the manifest by digest, and check for 200 resp, unused_content = self._transport.Request( '{base_url}/manifests/{digest}'.format( base_url=self._base_url(), digest=manifest_digest), method='GET', accepted_codes=[httplib.OK, httplib.NOT_FOUND]) return resp.status == httplib.OK # pytype: disable=attribute-error
def manifest(self, validate=True): """Override.""" # GET server1/v2/<name>/manifests/<tag_or_digest> if isinstance(self._name, docker_name.Tag): return self._content('manifests/' + self._name.tag) else: assert isinstance(self._name, docker_name.Digest) c = self._content('manifests/' + self._name.digest) # v2 removes signatures to compute the manifest digest, this is hard. computed = util.Digest(c) if validate and computed != self._name.digest: raise DigestMismatchedError( 'The returned manifest\'s digest did not match requested digest, ' '%s vs. %s' % (self._name.digest, computed)) return c
def upload(self, image): """Upload the layers of the given image. Args: image: docker_image.DockerImage, the image to upload. """ # If the manifest (by digest) exists, then avoid N layer existence # checks (they must exist). if self._manifest_exists(image): manifest_digest = util.Digest(image.manifest()) if self._remote_tag_digest() == manifest_digest: logging.info( 'Tag points to the right manifest, skipping push.') return logging.info( 'Manifest exists, skipping blob uploads and pushing tag.') else: # TODO(user): Parallelize this loop (e.g. futures.ThreadPoolExecutor) for digest in image.blob_set(): self._upload_one(image, digest) # This should complete the upload by uploading the manifest. self._put_manifest(image)
def digest(self): """The digest of the manifest.""" return util.Digest(self.manifest())