Esempio n. 1
0
def test_none_read():
    class NoneReader(object):
        def read(self, size=None):
            return None

    stream = StreamSlice(NoneReader(), 0)
    assert stream.read(-1) == None
    assert stream.tell() == 0
Esempio n. 2
0
    def upload_chunk(self, app_config, input_fp, start_offset=0, length=-1):
        """
        Uploads a chunk of data found in the given input file-like interface. start_offset and
        length are optional and should match a range header if any was given.

        Returns the total number of bytes uploaded after this upload has completed. Raises a
        BlobUploadException if the upload failed.
        """
        assert start_offset is not None
        assert length is not None

        if start_offset > 0 and start_offset > self.blob_upload.byte_count:
            logger.error(
                "start_offset provided greater than blob_upload.byte_count")
            raise BlobRangeMismatchException()

        # Ensure that we won't go over the allowed maximum size for blobs.
        max_blob_size = bitmath.parse_string_unsafe(
            self.settings.maximum_blob_size)
        uploaded = bitmath.Byte(length + start_offset)
        if length > -1 and uploaded > max_blob_size:
            raise BlobTooLargeException(uploaded=uploaded.bytes,
                                        max_allowed=max_blob_size.bytes)

        location_set = {self.blob_upload.location_name}
        upload_error = None
        with CloseForLongOperation(app_config):
            if start_offset > 0 and start_offset < self.blob_upload.byte_count:
                # Skip the bytes which were received on a previous push, which are already stored and
                # included in the sha calculation
                overlap_size = self.blob_upload.byte_count - start_offset
                input_fp = StreamSlice(input_fp, overlap_size)

                # Update our upload bounds to reflect the skipped portion of the overlap
                start_offset = self.blob_upload.byte_count
                length = max(length - overlap_size, 0)

            # We use this to escape early in case we have already processed all of the bytes the user
            # wants to upload.
            if length == 0:
                return self.blob_upload.byte_count

            input_fp = wrap_with_handler(input_fp,
                                         self.blob_upload.sha_state.update)

            if self.extra_blob_stream_handlers:
                for handler in self.extra_blob_stream_handlers:
                    input_fp = wrap_with_handler(input_fp, handler)

            # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the
            # stream so we can determine the uncompressed size. We'll throw out this data if another chunk
            # comes in, but in the common case the docker client only sends one chunk.
            size_info = None
            if start_offset == 0 and self.blob_upload.chunk_count == 0:
                size_info, fn = calculate_size_handler()
                input_fp = wrap_with_handler(input_fp, fn)

            start_time = time.time()
            length_written, new_metadata, upload_error = self.storage.stream_upload_chunk(
                location_set,
                self.blob_upload.upload_id,
                start_offset,
                length,
                input_fp,
                self.blob_upload.storage_metadata,
                content_type=BLOB_CONTENT_TYPE,
            )

            if upload_error is not None:
                logger.error("storage.stream_upload_chunk returned error %s",
                             upload_error)
                raise BlobUploadException(upload_error)

            # Update the chunk upload time and push bytes metrics.
            chunk_upload_duration.labels(
                list(location_set)[0]).observe(time.time() - start_time)
            pushed_bytes_total.inc(length_written)

        # Ensure we have not gone beyond the max layer size.
        new_blob_bytes = self.blob_upload.byte_count + length_written
        new_blob_size = bitmath.Byte(new_blob_bytes)
        if new_blob_size > max_blob_size:
            raise BlobTooLargeException(uploaded=new_blob_size,
                                        max_allowed=max_blob_size.bytes)

        # If we determined an uncompressed size and this is the first chunk, add it to the blob.
        # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
        uncompressed_byte_count = self.blob_upload.uncompressed_byte_count
        if size_info is not None and self.blob_upload.chunk_count == 0 and size_info.is_valid:
            uncompressed_byte_count = size_info.uncompressed_size
        elif length_written > 0:
            # Otherwise, if we wrote some bytes and the above conditions were not met, then we don't
            # know the uncompressed size.
            uncompressed_byte_count = None

        self.blob_upload = registry_model.update_blob_upload(
            self.blob_upload,
            uncompressed_byte_count,
            new_metadata,
            new_blob_bytes,
            self.blob_upload.chunk_count + 1,
            self.blob_upload.sha_state,
        )
        if self.blob_upload is None:
            raise BlobUploadException("Could not complete upload of chunk")

        return new_blob_bytes
Esempio n. 3
0
def test_slice_explictread():
    fileobj = StringIO('this is a cool test')
    stream = StreamSlice(fileobj, 5, 9)
    assert stream.read(2) == 'is'
    assert stream.read(5) == ' a'
    assert len('is a') == stream.tell()
Esempio n. 4
0
def test_startindex_limitedread():
    fileobj = StringIO('this is a cool test')
    stream = StreamSlice(fileobj, 5)
    assert stream.read(4) == 'is a'
    assert 4 == stream.tell()
Esempio n. 5
0
def test_startindex():
    fileobj = StringIO('this is a cool test')
    stream = StreamSlice(fileobj, 5)
    assert stream.read(-1) == 'is a cool test'
    assert len('is a cool test') == stream.tell()
Esempio n. 6
0
def test_noslice():
    fileobj = StringIO('this is a cool test')
    stream = StreamSlice(fileobj, 0)
    assert stream.read(-1) == 'this is a cool test'
    assert len('this is a cool test') == stream.tell()
Esempio n. 7
0
def test_startindex_limitedread():
    fileobj = BytesIO(b"this is a cool test")
    stream = StreamSlice(fileobj, 5)
    assert stream.read(4) == b"is a"
    assert 4 == stream.tell()
Esempio n. 8
0
def test_slice_explictread():
    fileobj = BytesIO(b"this is a cool test")
    stream = StreamSlice(fileobj, 5, 9)
    assert stream.read(2) == b"is"
    assert stream.read(5) == b" a"
    assert len(b"is a") == stream.tell()
Esempio n. 9
0
def test_startindex():
    fileobj = BytesIO(b"this is a cool test")
    stream = StreamSlice(fileobj, 5)
    assert stream.read(-1) == b"is a cool test"
    assert len(b"is a cool test") == stream.tell()
Esempio n. 10
0
def test_noslice():
    fileobj = BytesIO(b"this is a cool test")
    stream = StreamSlice(fileobj, 0)
    assert stream.read(-1) == b"this is a cool test"
    assert len(b"this is a cool test") == stream.tell()
Esempio n. 11
0
def test_slice_explictread():
    fileobj = StringIO("this is a cool test")
    stream = StreamSlice(fileobj, 5, 9)
    assert stream.read(2) == "is"
    assert stream.read(5) == " a"
    assert len("is a") == stream.tell()
Esempio n. 12
0
def test_startindex():
    fileobj = StringIO("this is a cool test")
    stream = StreamSlice(fileobj, 5)
    assert stream.read(-1) == "is a cool test"
    assert len("is a cool test") == stream.tell()
Esempio n. 13
0
def test_noslice():
    fileobj = StringIO("this is a cool test")
    stream = StreamSlice(fileobj, 0)
    assert stream.read(-1) == "this is a cool test"
    assert len("this is a cool test") == stream.tell()