def save(self, name, fobj, max_length=None, blob_object=None): if not blob_object: blob = Blob(name, self.bucket) else: blob = blob_object buffer = None # set a max-age of 5 if we're uploading to content/databases if self.is_database_file(name): blob.cache_control = "private, max-age={}, no-transform".format( CONTENT_DATABASES_MAX_AGE) # Compress the database file so that users can save bandwith and download faster. buffer = BytesIO() compressed = GzipFile(fileobj=buffer, mode="w") compressed.write(fobj.read()) compressed.close() blob.content_encoding = "gzip" fobj = buffer # determine the current file's mimetype based on the name # import determine_content_type lazily in here, so we don't get into an infinite loop with circular dependencies from contentcuration.utils.storage_common import determine_content_type content_type = determine_content_type(name) # force the current file to be at file location 0, to # because that's what google wants fobj.seek(0) if self._is_file_empty(fobj): logging.warning( "Stopping the upload of an empty file: {}".format(name)) return name blob.upload_from_file( fobj, content_type=content_type, ) # Close StringIO object and discard memory buffer if created if buffer: buffer.close() return name