def _save(self, name, content): cleaned_name = clean_name(name) name = self._get_valid_path(name) guessed_type, content_encoding = mimetypes.guess_type(name) content_type = ( _content_type(content) or guessed_type or self.default_content_type) # Unwrap django file (wrapped by parent's save call) if isinstance(content, File): content = content.file content.seek(0) self.service.create_blob_from_stream( container_name=self.azure_container, blob_name=name, stream=content, content_settings=ContentSettings( content_type=content_type, content_encoding=content_encoding, cache_control=self.cache_control), max_connections=self.upload_max_conn, timeout=self.timeout) return cleaned_name
def save(self, name, content, save=True): """ The default FieldFile implementation of save uploads a field to storage. In our case we do not want to to save the file to the storage, because there is none. Instead, we generate a presigned url which can be used by frontend to upload directly to the storage. We do want to keep using the rest of the logic of saving the instance, such as the logic to see if a name is available on s3. """ name = self.field.generate_filename(self.instance, name) # Get the proper name for the file, as it will actually be saved. if name is None: name = content.name if not hasattr(content, 'chunks'): content = File(content, name) # Below is from core.files.storage.save. name = self.storage.get_available_name( name, max_length=self.field.max_length) # Above is from core.files.storage.save. # Below is from backends.s3boto._save minus the actual storing to s3 with _save_content. self.name = clean_name(name) # Above is from backends.s3boto._save. setattr(self.instance, self.field.name, self.name) self._committed = True # Save the object because it has changed, unless save is False. if save: self.instance.save()
def save(self, name, content, save=True): """ The default FieldFile implementation of save uploads a field to storage. In our case we do not want to to save the file to the storage, because there is none. Instead, we generate a presigned url which can be used by frontend to upload directly to the storage. We do want to keep using the rest of the logic of saving the instance, such as the logic to see if a name is available on s3. """ name = self.field.generate_filename(self.instance, name) # Get the proper name for the file, as it will actually be saved. if name is None: name = content.name if not hasattr(content, 'chunks'): content = File(content, name) # Below is from core.files.storage.save. name = self.storage.get_available_name(name, max_length=self.field.max_length) # Above is from core.files.storage.save. # Below is from backends.s3boto._save minus the actual storing to s3 with _save_content. self.name = clean_name(name) # Above is from backends.s3boto._save. setattr(self.instance, self.field.name, self.name) self._committed = True # Save the object because it has changed, unless save is False. if save: self.instance.save()
def size(self, name): name = self._normalize_name(clean_name(name)) return ( BackblazeB2File.objects .values_list("content_length", flat=True) .get(name=name) )
def modified_time(self, name): name = self._normalize_name(clean_name(name)) return timezone.make_naive( BackblazeB2File.objects .values_list("modified_time", flat=True) .get(name=name) )
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) blob_params = self.get_object_parameters(name) no_signed_url = ( blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: return '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b"/~"), ) elif not self.custom_endpoint: return blob.generate_signed_url( expiration=self.expiration, version="v4" ) else: return blob.generate_signed_url( bucket_bound_hostname=self.custom_endpoint, expiration=self.expiration, version="v4", )
def exists(self, name): name = self._normalize_name(clean_name(name)) return ( BackblazeB2File.objects .filter(name=name) .exists() )
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) no_signed_url = (self.default_acl == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: return '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b"/~"), ) elif not self.custom_endpoint: return blob.generate_signed_url(self.expiration) else: return blob.generate_signed_url( expiration=self.expiration, api_access_endpoint=self.custom_endpoint, )
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name file = GoogleCloudFile(name, 'rw', self) upload_params = {} blob_params = self.get_object_parameters(name, content) if 'cache_control' not in blob_params and self.cache_control: warnings.warn( 'The GS_CACHE_CONTROL setting is deprecated. Use GS_OBJECT_PARAMETERS to set any ' 'writable blob property or override GoogleCloudStorage.get_object_parameters to ' 'vary the parameters per object.', DeprecationWarning) blob_params['cache_control'] = self.cache_control upload_params['predefined_acl'] = blob_params.pop( 'acl', self.default_acl) if CONTENT_TYPE not in blob_params: upload_params[CONTENT_TYPE] = file.mime_type for prop, val in blob_params.items(): setattr(file.blob, prop, val) file.blob.upload_from_file(content, rewind=True, size=content.size, **upload_params) return cleaned_name
def _save(self, name, content): extention = name.split('.')[-1] name_without_extention = "".join(name.split('.')[:-1]) path = "" if len(name_without_extention.split('/')) > 1: path = '/'.join(name_without_extention.split('/')[:-1]) + '/' name = "%s%s.%s" % (path, str(uuid.uuid4()).replace('-', ''), extention) cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name if 'image' in content.__dict__: opened_image = PIL.Image.open(content.file) if opened_image.width > 1280: opened_image.thumbnail( (1280, (opened_image.height / opened_image.width) * 1280)) new_file_object = io.BytesIO() opened_image.save(new_file_object, format=opened_image.format) content.file = new_file_object content.image = opened_image content.size = len(new_file_object.getbuffer()) file = GoogleCloudFile(name, 'rw', self) file.blob.cache_control = self.cache_control # pylint: disable=no-member file.blob.upload_from_file(content, rewind=True, size=content.size, content_type=file.mime_type, predefined_acl=self.default_acl) # pylint: disable=no-member return cleaned_name
def _open(self, name, mode="rb"): name = self._normalize_name(clean_name(name)) # TODO: Use streaming content = self.b2api.download_file(self.self.get_b2_id(name)) content_buffer = BytesIO() content_buffer.write(content) content_buffer.seek(0) return File(content_buffer, name)
def url(self, name): # Preserve the trailing slash after normalizing the path. name = self._normalize_name(clean_name(name)) if self.base_url: return self.base_url + name else: blob = self._get_blob(self._encode_name(name)) return blob.public_url
def url(self, name): # Preserve the trailing slash after normalizing the path. name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) if self.expiry_time: client = Client.from_service_account_json(self.keyfile_path) if self.keyfile_path else None return blob.generate_signed_url(datetime.timedelta(seconds=self.expiry_time), client=client) return blob.public_url
def get_created_time(self, name): name = self._normalize_name(clean_name(name)) created = ( BackblazeB2File.objects .values_list("created_time", flat=True) .get(name=name) ) return created if setting("USE_TZ") else timezone.make_naive(created)
def url(self, name): """ The parent implementation calls GoogleCloudStorage on every request once for oauth and again to get the file. Since we're proxying requests through nginx's proxy_pass to GoogleCloudStorage we don't need their url nonsense or to use the actual domain or bucket name. """ name = self._normalize_name(clean_name(name)) return f"/{name.lstrip('/')}"
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name encoded_name = self._encode_name(name) file = GoogleCloudFile(encoded_name, 'rw', self) file.blob.upload_from_file(content, size=content.size) return cleaned_name
def get_created_time(self, name): """ Return the creation time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) created = blob.time_created return created if setting('USE_TZ') else timezone.make_naive(created)
def get_available_name(self, name, max_length=_AZURE_NAME_MAX_LEN): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ name = clean_name(name) if self.overwrite_files: return get_available_overwrite_name(name, max_length) return super(AzureStorage, self).get_available_name(name, max_length)
def exists(self, name): if not name: # root element aka the bucket try: self.client.get_bucket(self.bucket) return True except NotFound: return False name = self._normalize_name(clean_name(name)) return bool(self.bucket.get_blob(name))
def exists(self, name): if not name: # root element aka the bucket try: self.bucket return True except ImproperlyConfigured: return False name = self._normalize_name(clean_name(name)) return bool(self.bucket.get_blob(self._encode_name(name)))
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(self._encode_name(name)) name = self._normalize_name(clean_name(name)) if self.default_acl == 'publicRead': object_url = blob.public_url filepath = urlparse(object_url).path.lstrip('/') prefix = 'tophatch/media/' if filepath.startswith(prefix): filepath = filepath[len(prefix):] return setting('MEDIA_URL') + filepath return blob.generate_signed_url(self.expiration)
def exists(self, name): if not name: # root element aka the bucket try: self.bucket return True except ImproperlyConfigured: return False name = self._normalize_name(clean_name(name)) return bool(Image.objects.filter(link=self._encode_name(name)).first())
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name file = GoogleCloudFile(name, 'rw', self) file.blob.cache_control = self.cache_control file.blob.upload_from_file( content, rewind=True, size=content.size, content_type=file.mime_type, predefined_acl=self.default_acl) return cleaned_name
def url(self, name): # Preserve the trailing slash after normalizing the path. name = self._normalize_name(clean_name(name)) # using standard request uri we can avoid an API call to GCS if setting('GS_USE_STANDARD_REQUEST_URI'): return 'https://%s.storage.googleapis.com/%s' % ( self.bucket_name, filepath_to_uri(name)) blob = self._get_blob(self._encode_name(name)) return blob.public_url
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(self._encode_name(name)) if self.default_acl == 'publicRead': return blob.public_url return blob.generate_signed_url(self.expiration)
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name encoded_name = self._encode_name(name) gcloud_file = GoogleCloudFile(encoded_name, 'rw', self) gcloud_file.blob.upload_from_file(content, size=content.size, content_type=gcloud_file.mime_type) if self.public_blob: gcloud_file.blob.make_public() return cleaned_name
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name encoded_name = self._encode_name(name) file = GoogleCloudFile(encoded_name, 'rw', self) file.blob.upload_from_file(content, size=content.size, content_type=file.mime_type) file.blob.acl.save_predefined(self.auto_create_acl) return cleaned_name
def _save(self, name, content): cleaned_name = clean_name(name) name = self._normalize_name(cleaned_name) content.name = cleaned_name encoded_name = self._encode_name(name) file = GoogleCloudFile(encoded_name, 'rw', self) file.blob.cache_control = self.cache_control content.seek(0) file.blob.upload_from_file(content, size=content.size, content_type=file.mime_type) if self.default_acl: file.blob.acl.save_predefined(self.default_acl) return cleaned_name
def _save(self, name, content): cleaned_name = clean_name(name) name = self._get_valid_path(name) params = self._get_content_settings_parameters(name, content) # Unwrap django file (wrapped by parent's save call) if isinstance(content, File): content = content.file content.seek(0) self.client.upload_blob(name, content, content_settings=ContentSettings(**params), max_concurrency=self.upload_max_conn, timeout=self.timeout, overwrite=self.overwrite_files) return cleaned_name
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) custom_endpoint = setting("GS_CUSTOM_ENDPOINT") if custom_endpoint == None: return blob.public_url else: custom_url = '{storage_base_url}/{quoted_name}'.format( storage_base_url=custom_endpoint, quoted_name=_quote(name, safe=b"/~")) print(custom_url) return custom_url
def url(self, name): """ Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. """ name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(self._encode_name(name)) if self.default_acl == 'publicRead': url = blob.public_url else: url = blob.generate_signed_url(self.expiration) return url.replace( MyBlob._API_ACCESS_ENDPOINT + "/" + self.bucket_name, self.custom_url)
def listdir(self, name): name = self._normalize_name(clean_name(name)) # for the bucket.list and logic below name needs to end in / # But for the root path "" we leave it as an empty string if name and not name.endswith('/'): name += '/' files_list = list(self.bucket.list_blobs(prefix=self._encode_name(name))) files = [] dirs = set() base_parts = name.split("/")[:-1] for item in files_list: parts = item.name.split("/") parts = parts[len(base_parts):] if len(parts) == 1 and parts[0]: # File files.append(parts[0]) elif len(parts) > 1 and parts[0]: # Directory dirs.add(parts[0]) return list(dirs), files
def _open(self, name, mode='rb'): name = self._normalize_name(clean_name(name)) file_object = GoogleCloudFile(name, mode, self) if not file_object.blob: raise IOError(u'File does not exist: %s' % name) return file_object
def test_clean_name(self): """ Test the base case of clean_name """ path = utils.clean_name("path/to/somewhere") self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self): """ Test the normalization of clean_name """ path = utils.clean_name("path/to/../somewhere") self.assertEqual(path, "path/somewhere")
def _clean_name(self, name): """ Cleans the name so that Windows style paths work """ return clean_name(name)
def test_clean_name_trailing_slash(self): """ Test the clean_name when the path has a trailing slash """ path = utils.clean_name("path/to/somewhere/") self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self): """ Test the clean_name when the path has a trailing slash """ path = utils.clean_name("path\\to\\somewhere") self.assertEqual(path, "path/to/somewhere")
def _clean_name_dance(name): # `get_valid_path` may return `foo/../bar` name = name.replace('\\', '/') return clean_name(_get_valid_path(clean_name(name)))
def delete(self, name): name = self._normalize_name(clean_name(name)) self.bucket.delete_blob(self._encode_name(name))
def get_available_name(self, name, max_length=None): if self.file_overwrite: name = clean_name(name) return name return super(GoogleCloudStorage, self).get_available_name(name, max_length)
def url(self, name): # Preserve the trailing slash after normalizing the path. name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) return blob.public_url
def get_modified_time(self, name): name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) updated = blob.updated return updated if setting('USE_TZ') else timezone.make_naive(updated)
def modified_time(self, name): name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) return timezone.make_naive(blob.updated)
def size(self, name): name = self._normalize_name(clean_name(name)) blob = self._get_blob(self._encode_name(name)) return blob.size