def create_callback(encoder: MultipartEncoder): """Create a callback suitable for upload_file.""" with emit.progress_bar("Uploading...", encoder.len, delta=False) as progress: def progress_callback(monitor: MultipartEncoderMonitor): progress.advance(monitor.bytes_read) return progress_callback
def push_file(self, filepath) -> str: """Push the bytes from filepath to the Storage.""" emit.progress(f"Starting to push {str(filepath)!r}") with filepath.open("rb") as fh: encoder = MultipartEncoder(fields={ "binary": (filepath.name, fh, "application/octet-stream") }) # create a monitor (so that progress can be displayed) as call the real pusher monitor = MultipartEncoderMonitor(encoder) with emit.progress_bar("Uploading...", monitor.len, delta=False) as progress: monitor.callback = lambda mon: progress.advance(mon.bytes_read) response = self._storage_push(monitor) result = response.json() if not result["successful"]: raise CommandError( "Server error while pushing file: {}".format(result)) upload_id = result["upload_id"] emit.progress(f"Uploading bytes ended, id {upload_id}") return upload_id
def upload_from_local(self, digest: str) -> Union[str, None]: """Upload the image from the local registry. Returns the new remote digest, or None if the image was not found locally. """ dockerd = LocalDockerdInterface() # validate the image is present locally emit.progress("Checking image is present locally") image_info = dockerd.get_image_info(digest) if image_info is None: return local_image_size = image_info["Size"] emit.progress(f"Getting the image from the local repo; size={local_image_size}") response = dockerd.get_streamed_image_content(digest) tmp_exported = tempfile.NamedTemporaryFile(mode="wb", delete=False) with emit.progress_bar("Reading image...", local_image_size) as progress: for chunk in response.iter_content(CHUNK_SIZE): progress.advance(len(chunk)) tmp_exported.file.write(chunk) tmp_exported.close() # open the image tar and inspect it to get the config and layers from the only one # manifest inside (as it's a list of one) image_tar = tarfile.open(tmp_exported.name) local_manifest = json.load(image_tar.extractfile("manifest.json")) (local_manifest,) = local_manifest config_name = local_manifest.get("Config") layer_names = local_manifest["Layers"] manifest = { "mediaType": MANIFEST_V2_MIMETYPE, "schemaVersion": 2, } if config_name is not None: fpath, size, digest = self._extract_file(image_tar, config_name) emit.progress(f"Uploading config blob, size={size}, digest={digest}") self._upload_blob(fpath, size, digest) manifest["config"] = { "digest": digest, "mediaType": CONFIG_MIMETYPE, "size": size, } manifest["layers"] = manifest_layers = [] len_layers = len(layer_names) for idx, layer_name in enumerate(layer_names, 1): fpath, size, digest = self._extract_file(image_tar, layer_name, compress=True) emit.progress(f"Uploading layer blob {idx}/{len_layers}, size={size}, digest={digest}") self._upload_blob(fpath, size, digest) manifest_layers.append( { "digest": digest, "mediaType": LAYER_MIMETYPE, "size": size, } ) # remove the temp tar file os.unlink(tmp_exported.name) # upload the manifest manifest_data = json.dumps(manifest) digest = "sha256:{}".format(hashlib.sha256(manifest_data.encode("utf8")).hexdigest()) self.registry.upload_manifest(manifest_data, digest) return digest
def upload_blob(self, filepath, size, digest): """Upload the blob from a file.""" # get the first URL to start pushing the blob emit.progress("Getting URL to push the blob") url = self._get_url("blobs/uploads/") response = self._hit("POST", url) assert_response_ok(response, expected_status=202) upload_url = response.headers["Location"] range_from, range_to_inclusive = [int(x) for x in response.headers["Range"].split("-")] emit.progress(f"Got upload URL ok with range {range_from}-{range_to_inclusive}") if range_from != 0: raise CraftError( "Server error: bad range received", details=f"Range={response.headers['Range']!r}" ) # this `range_to_inclusive` alteration is a side effect of the range being inclusive. The # server tells us that it already has "0-80", means that it has 81 bytes (from 0 to 80 # inclusive), we set from_position in 81 and read from there. Going down, "0-1" would mean # it has bytes 0 and 1; But "0-0" is special, it's what the server returns when it does # not have ANY bytes at all. So we comply with Range parameter, but addressing this # special case; worst think it could happen is that we start from 0 when the server # has 1 byte already, which is not a problem. if range_to_inclusive == 0: range_to_inclusive = -1 from_position = range_to_inclusive + 1 # start the chunked upload with open(filepath, "rb") as fh: with emit.progress_bar("Uploading...", size) as progress: if from_position: fh.seek(from_position) progress.advance(from_position) while True: chunk = fh.read(CHUNK_SIZE) if not chunk: break progress.advance(len(chunk)) end_position = from_position + len(chunk) headers = { "Content-Length": str(len(chunk)), "Content-Range": "{}-{}".format(from_position, end_position), "Content-Type": OCTET_STREAM_MIMETYPE, } response = self._hit( "PATCH", upload_url, headers=headers, data=chunk, log=False ) assert_response_ok(response, expected_status=202) upload_url = response.headers["Location"] from_position += len(chunk) headers = { "Content-Length": "0", "Connection": "close", } emit.progress("Closing the upload") closing_url = "{}&digest={}".format(upload_url, digest) response = self._hit("PUT", closing_url, headers=headers, data="") assert_response_ok(response, expected_status=201) emit.progress("Upload finished OK") if response.headers["Docker-Content-Digest"] != digest: raise CraftError("Server error: the upload is corrupted")