Exemplo n.º 1
0
    def upload_blob(self, container: Container, filename: FileLike,
                    blob_name: str = None, acl: str = None,
                    meta_data: MetaData = None, content_type: str = None,
                    content_disposition: str = None, cache_control: str = None,
                    chunk_size: int = 1024,
                    extra: ExtraOptions = None) -> Blob:
        meta_data = {} if meta_data is None else meta_data
        extra = {} if extra is None else extra

        blob_name = blob_name or validate_file_or_path(filename)

        if not content_type:
            if isinstance(filename, str):
                content_type = file_content_type(filename)
            else:
                content_type = file_content_type(blob_name)

        if isinstance(filename, str):
            self.client.fput_object(container.name,
                                    blob_name,
                                    filename,
                                    content_type=content_type,
                                    metadata=meta_data)
        else:
            length = extra.pop('length', len(filename.read()))
            filename.seek(0)
            self.client.put_object(container.name,
                                   blob_name,
                                   filename,
                                   length,
                                   content_type=content_type,
                                   metadata=meta_data)
        return self.get_blob(container, blob_name)
Exemplo n.º 2
0
    def download_blob(self, blob: Blob, destination: FileLike) -> None:
        data = self.client.get_object(blob.container.name, blob.name)

        if isinstance(destination, str):
            with open(destination, "wb") as blob_data:
                for d in data.stream(4096):
                    blob_data.write(d)
        else:
            for d in data.stream(4096):
                destination.write(d)
Exemplo n.º 3
0
    def download_blob(self, blob: Blob, destination: FileLike) -> None:
        try:
            data = self.object_store.download_object(
                obj=blob.name, container=blob.container.name)

            if isinstance(destination, str):
                with open(destination, 'wb') as out:
                    out.write(data)
            else:
                destination.write(data)
        except ResourceNotFound:
            raise NotFoundError(messages.BLOB_NOT_FOUND %
                                (blob.name, blob.container.name))
Exemplo n.º 4
0
def file_checksum(filename: FileLike,
                  hash_type: str = 'md5',
                  block_size: int = 4096) -> HASH:
    """Returns checksum for file.

    .. code-block:: python

        from cloudstorage.helpers import file_checksum

        picture_path = '/path/picture.png'
        file_checksum(picture_path, hash_type='sha256')
        # '03ef90ba683795018e541ddfb0ae3e958a359ee70dd4fccc7e747ee29b5df2f8'

    Source: `get-md5-hash-of-big-files-in-python <https://stackoverflow.com/
    questions/1131220/get-md5-hash-of-big-files-in-python>`_

    :param filename: File path or stream.
    :type filename: str or FileLike

    :param hash_type: Hash algorithm function name.
    :type hash_type:  str

    :param block_size: (optional) Chunk size.
    :type block_size: int

    :return: Hash of file.
    :rtype: :class:`_hashlib.HASH`

    :raise RuntimeError: If the hash algorithm is not found in :mod:`hashlib`.

    .. versionchanged:: 0.4
      Returns :class:`_hashlib.HASH` instead of `HASH.hexdigest()`.
    """
    try:
        file_hash = getattr(hashlib, hash_type)()
    except AttributeError:
        raise RuntimeError('Invalid or unsupported hash type: %s' % hash_type)

    if isinstance(filename, str):
        with open(filename, 'rb') as file_:
            for chunk in read_in_chunks(file_, block_size=block_size):
                file_hash.update(chunk)
    else:
        for chunk in read_in_chunks(filename, block_size=block_size):
            file_hash.update(chunk)
        # rewind the stream so it can be re-read later
        if filename.seekable():
            filename.seek(0)

    return file_hash
Exemplo n.º 5
0
    def download_blob(self, blob: Blob, destination: FileLike) -> None:
        blob_path = self._get_file_path(blob)

        if isinstance(destination, str):
            base_name = os.path.basename(destination)
            if not base_name and not os.path.exists(destination):
                raise CloudStorageError("Path %s does not exist." %
                                        destination)

            if not base_name:
                file_path = os.path.join(destination, blob.name)
            else:
                file_path = destination

            shutil.copy(blob_path, file_path)
        else:
            with open(blob_path, "rb") as blob_file:
                for data in read_in_chunks(blob_file):
                    destination.write(data)
def read_in_chunks(file_object: FileLike,
                   block_size: int = 4096) -> Generator[bytes, None, None]:
    """Return a generator which yields data in chunks.

    Source: `read-file-in-chunks-ram-usage-read-strings-from-binary-file
    <https://stackoverflow.com/questions/17056382/
    read-file-in-chunks-ram-usage-read-strings-from-binary-files>`_

    :param file_object: File object to read in chunks.
    :type file_object: file object

    :param block_size: (optional) Chunk size.
    :type block_size: int

    :yield: The next chunk in file object.
    :yield type: `bytes`
    """
    for chunk in iter(lambda: file_object.read(block_size), b''):
        yield chunk