Example #1
0
 def __init__(self, oauth2_access_token=None, root_path=None):
     oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')
     self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')
     if oauth2_access_token is None:
         raise ImproperlyConfigured("You must configure a token auth at"
                                    "'settings.DROPBOX_OAUTH2_TOKEN'.")
     self.client = DropboxClient(oauth2_access_token)
Example #2
0
    def __init__(self, host=None, params=None, interactive=None, file_mode=None,
                 dir_mode=None, uid=None, gid=None, known_host_file=None,
                 root_path=None, base_url=None):
        self._host = host or setting('SFTP_STORAGE_HOST')

        self._params = params or setting('SFTP_STORAGE_PARAMS', {})
        self._interactive = setting('SFTP_STORAGE_INTERACTIVE', False) \
            if interactive is None else interactive
        self._file_mode = setting('SFTP_STORAGE_FILE_MODE') \
            if file_mode is None else file_mode
        self._dir_mode = setting('SFTP_STORAGE_DIR_MODE') if \
            dir_mode is None else dir_mode

        self._uid = setting('SFTP_STORAGE_UID') if uid is None else uid
        self._gid = setting('SFTP_STORAGE_GID') if gid is None else gid
        self._known_host_file = setting('SFTP_KNOWN_HOST_FILE') \
            if known_host_file is None else known_host_file

        self._root_path = setting('SFTP_STORAGE_ROOT', '') \
            if root_path is None else root_path
        self._base_url = setting('MEDIA_URL') if base_url is None else base_url

        # for now it's all posix paths.  Maybe someday we'll support figuring
        # out if the remote host is windows.
        self._pathmod = posixpath
Example #3
0
    def url(self, name, headers=None, response_headers=None):
        url = '{}/{}'.format(self.base_url, name)

        expires = int(time.time()) + setting('AWS_CLOUDFRONT_LINK_EXPIRES_TIME')
        if setting('AWS_CLOUDFRONT_SIGNED_URL'):
            return self.cf_dist.create_signed_url(
                url,
                setting('AWS_CLOUDFRONT_KEY_PAIR_ID'),
                expires,
                private_key_file=setting('AWS_CLOUDFRONT_PRIV_KEY_FILE')
            )
        return url
Example #4
0
    def __init__(self, acl=None, bucket=None, bucket_alias=None, **settings):
        # check if some of the settings we've provided as class attributes
        # need to be overwritten with values passed in here
        for name, value in settings.items():
            if hasattr(self, name):
                setattr(self, name, value)

        # For backward-compatibility of old differing parameter names
        if acl is not None:
            self.default_acl = acl
        if bucket is not None:
            self.bucket_name = bucket
        if bucket_alias is not None:
            self.bucket_name = setting('AWS_STORAGE_BUCKET_ALIASES', {})[bucket_alias]

        self.location = (self.location or '').lstrip('/')
        # Backward-compatibility: given the anteriority of the SECURE_URL setting
        # we fall back to https if specified in order to avoid the construction
        # of unsecure urls.
        if self.secure_urls:
            self.url_protocol = 'https:'

        self._entries = {}
        self._bucket = None
        self._connection = None

        if not self.access_key and not self.secret_key:
            self.access_key, self.secret_key = self._get_access_keys()
Example #5
0
 def url(self, name):
     if hasattr(self.connection, 'make_blob_url'):
         return self.connection.make_blob_url(
             container_name=self.azure_container,
             blob_name=name,
             protocol=self.azure_protocol,
         )
     else:
         return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
Example #6
0
 def url(self, name):
     cdn_url = setting('AZURE_CDN_URL')
     if cdn_url:
        full_url = "{}{}/{}".format(cdn_url, self.azure_container, name)
     elif hasattr(self.connection, 'make_blob_url'):
         return self.connection.make_blob_url(
             container_name=self.azure_container,
             blob_name=name,
             protocol=self.azure_protocol,
         )
     else:
         full_url = "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
     key = setting('AZURE_CDN_TOKEN_KEY')
     timeout = setting('AZURE_CDN_TOKEN_TIMEOUT')
     if key and timeout:
         # Get GMT timestamp.
         timestamp = int(datetime(*(time.gmtime()[:-3])).strftime("%s"))
         full_url += '?{}'.format(encrypt_v3(key, 'ec_expire={}'.format(timestamp + timeout)))
     return full_url
Example #7
0
 def __init__(self, location=None, base_url=None):
     location = location or setting("FTP_STORAGE_LOCATION")
     if location is None:
         raise ImproperlyConfigured(
             "You must set a location at " "instanciation or at " " settings.FTP_STORAGE_LOCATION'."
         )
     self.location = location
     base_url = base_url or settings.MEDIA_URL
     self._config = self._decode_location(location)
     self._base_url = base_url
     self._connection = None
Example #8
0
 def _get_file(self):
     if self._file is None:
         self._file = SpooledTemporaryFile(
             max_size=self._storage.max_memory_size,
             suffix=".GSStorageFile",
             dir=setting("FILE_UPLOAD_TEMP_DIR", None)
         )
         if 'r' in self._mode:
             self._is_dirty = False
             self.blob.download_to_file(self._file)
             self._file.seek(0)
     return self._file
Example #9
0
 def __init__(self, *args, **kwargs):
     super(S3CloudFrontStorage, self).__init__(*args, **kwargs)
     if setting('AWS_CLOUDFRONT_SIGNED_URL'):
         self.cf_connection = cloudfront.CloudFrontConnection(
             setting('AWS_ACCESS_KEY_ID'), setting('AWS_SECRET_ACCESS_KEY')
         )
         self.cf_dist = self.cf_connection.get_distribution_info(setting('AWS_CLOUDFRONT_DISTRIBUTION_ID'))
         self.base_url = "%s://%s" % (setting('AWS_CLOUDFRONT_DISTRIBUTION_PROTOCOL'), self.cf_dist.domain_name)
     else:
         self.base_url = "%s://%s" % (setting('AWS_CLOUDFRONT_DISTRIBUTION_PROTOCOL'),
                                      setting('AWS_CLOUDFRONT_DISTRIBUTION_DOMAIN'))
Example #10
0
    def __init__(self, host=None, params=None, interactive=None, file_mode=None,
                 dir_mode=None, uid=None, gid=None, known_host_file=None,
                 root_path=None, base_url=None):
        self._host = host or setting('SFTP_STORAGE_HOST')

        self._params = params or setting('SFTP_STORAGE_PARAMS', {})
        self._interactive = setting('SFTP_STORAGE_INTERACTIVE', False) \
            if interactive is None else interactive
        self._file_mode = setting('SFTP_STORAGE_FILE_MODE') \
            if file_mode is None else file_mode
        self._dir_mode = setting('SFTP_STORAGE_DIR_MODE') if \
            dir_mode is None else dir_mode

        self._uid = setting('SFTP_STORAGE_UID') if uid is None else uid
        self._gid = setting('SFTP_STORAGE_GID') if gid is None else gid
        self._known_host_file = setting('SFTP_KNOWN_HOST_FILE') \
            if known_host_file is None else known_host_file

        self._root_path = setting('SFTP_STORAGE_ROOT', '') \
            if root_path is None else root_path
        self._base_url = setting('MEDIA_URL') if base_url is None else base_url

        self._sftp = None
Example #11
0
 def _get_file(self):
     if self._file is None:
         self._file = SpooledTemporaryFile(
             max_size=self._storage.max_memory_size,
             suffix=".S3Boto3StorageFile",
             dir=setting("FILE_UPLOAD_TEMP_DIR", None)
         )
         if 'r' in self._mode:
             self._is_dirty = False
             self._file.write(self.obj.get()['Body'].read())
             self._file.seek(0)
         if self._storage.gzip and self.obj.content_encoding == 'gzip':
             self._file = GzipFile(mode=self._mode, fileobj=self._file, mtime=0.0)
     return self._file
Example #12
0
 def _get_file(self):
     if self._file is None:
         self._file = SpooledTemporaryFile(
             max_size=self._storage.max_memory_size,
             suffix=".S3BotoStorageFile",
             dir=setting("FILE_UPLOAD_TEMP_DIR", None),
         )
         if "r" in self._mode:
             self._is_dirty = False
             self.key.get_contents_to_file(self._file)
             self._file.seek(0)
         if self._storage.gzip and self.key.content_encoding == "gzip":
             self._file = GzipFile(mode=self._mode, fileobj=self._file)
     return self._file
Example #13
0
 def get_modified_time(self, name):
     """
     Returns an (aware) datetime object containing the last modified time if
     USE_TZ is True, otherwise returns a naive datetime in the local timezone.
     """
     name = self._normalize_name(self._clean_name(name))
     entry = self.entries.get(name)
     # only call self.bucket.Object() if the key is not found
     # in the preloaded metadata.
     if entry is None:
         entry = self.bucket.Object(self._encode_name(name))
     if setting('USE_TZ'):
         # boto3 returns TZ aware timestamps
         return entry.last_modified
     else:
         return localtime(entry.last_modified).replace(tzinfo=None)
Example #14
0
 def get_modified_time(self, name):
     """
     Returns an (aware) datetime object containing the last modified time if
     USE_TZ is True, otherwise returns a naive datetime in the local timezone.
     """
     name = self._normalize_name(self._clean_name(name))
     entry = self.entries.get(name)
     # only call self.bucket.Object() if the key is not found
     # in the preloaded metadata.
     if entry is None:
         entry = self.bucket.Object(self._encode_name(name))
     if setting('USE_TZ'):
         # boto3 returns TZ aware timestamps
         return entry.last_modified
     else:
         return localtime(entry.last_modified).replace(tzinfo=None)
Example #15
0
class MediaBoto3Storage(S3Boto3Storage):  # pragma: no cover
    """
    Custom storage to allow an S3 bucket to be configured specifically
    for uploaded media.
    """

    bucket_name = setting(
        "MEDIA_STORAGE_BUCKET_NAME", setting("AWS_STORAGE_BUCKET_NAME")
    )
    location = setting("MEDIA_LOCATION", setting("AWS_LOCATION", ""))
    custom_domain = setting("MEDIA_S3_CUSTOM_DOMAIN", setting("AWS_S3_CUSTOM_DOMAIN"))
Example #16
0
 def url(self, name):
     """
     Return public url or a signed url for the Blob.
     This DOES NOT check for existance of Blob - that makes codes too slow
     for many use cases.
     """
     name = self._normalize_name(clean_name(name))
     blob = self.bucket.blob(name)
     custom_endpoint = setting("GS_CUSTOM_ENDPOINT")
     if custom_endpoint == None:
         return blob.public_url
     else:
         custom_url = '{storage_base_url}/{quoted_name}'.format(
             storage_base_url=custom_endpoint,
             quoted_name=_quote(name, safe=b"/~"))
         print(custom_url)
         return custom_url
Example #17
0
    def get_modified_time(self, name):
        """
        Returns an (aware) datetime object containing the last modified time if
        USE_TZ is True, otherwise returns a naive datetime in the local timezone.
        """
        properties = self.client.get_blob_properties(
            self._get_valid_path(name), timeout=self.timeout)
        if not setting('USE_TZ', False):
            return timezone.make_naive(properties.last_modified)

        tz = timezone.get_current_timezone()
        if timezone.is_naive(properties.last_modified):
            return timezone.make_aware(properties.last_modified, tz)

        # `last_modified` is in UTC time_zone, we
        # must convert it to settings time_zone
        return properties.last_modified.astimezone(tz)
Example #18
0
    def _get_file(self):
        if self._file is None:
            self._file = SpooledTemporaryFile(
                max_size=self._storage.max_memory_size,
                suffix=".AzureBoto3StorageFile",
                dir=setting("FILE_UPLOAD_TEMP_DIR", None)
            )
            if 'r' in self._mode:
                self._is_dirty = False
                # I set max connection to 1 since spooledtempfile is not seekable which is required if we use
                # max_conection > 1
                self._storage.connection.get_blob_to_stream(container_name=self._storage.azure_container,
                                                            blob_name=self._name, stream=self._file,
                                                            max_connections=1)

                self._file.seek(0)
        return self._file
    def _get_file(self):
        if self._file is not None:
            return self._file

        file = SpooledTemporaryFile(max_size=self._storage.max_memory_size,
                                    suffix=".AzureStorageFile",
                                    dir=setting("FILE_UPLOAD_TEMP_DIR", None))

        if 'r' in self._mode or 'a' in self._mode:
            download_stream = self._storage.client.download_blob(
                self._path, timeout=self._storage.timeout)
            download_stream.readinto(file)
        if 'r' in self._mode:
            file.seek(0)

        self._file = file
        return self._file
Example #20
0
class CustomPublicGoogleCloudStorage(GoogleCloudStorage):
    """
    Override the `write` method of `GoogleCloudStorage` to set the paths that match a glob in the
    `GS_PUBLIC_READABLE_PATHS` setting list to readable after upload
    """
    public_readable_paths = setting('GS_PUBLIC_READABLE_PATHS', [])

    def _save(self, name, content):
        cleaned_name = super()._save(name, content)
        for public_readable_path in self.public_readable_paths:
            if fnmatch.fnmatch(cleaned_name, public_readable_path):
                encoded_name = self._encode_name(name)
                blob = self.bucket.blob(encoded_name)
                blob.make_public()

                break

        return cleaned_name
    def get_modified_time(self, name):
        """
        Returns an (aware) datetime object containing the last modified time if
        USE_TZ is True, otherwise returns a naive datetime in the local timezone.
        """
        properties = self.service.get_blob_properties(
            self.azure_container,
            self._get_valid_path(name),
            timeout=self.timeout).properties
        if not setting('USE_TZ', False):
            return timezone.make_naive(properties.last_modified)

        tz = timezone.get_current_timezone()
        if timezone.is_naive(properties.last_modified):
            return timezone.make_aware(properties.last_modified, tz)

        # `last_modified` is in UTC time_zone, we
        # must convert it to settings time_zone
        return properties.last_modified.astimezone(tz)
Example #22
0
    def url(self, name):
        """
        Return public url or a signed url for the Blob.
        This DOES NOT check for existance of Blob - that makes codes too slow
        for many use cases.
        """
        name = self._normalize_name(clean_name(name))
        blob = self.bucket.blob(self._encode_name(name))

        name = self._normalize_name(clean_name(name))

        if self.default_acl == 'publicRead':
            object_url = blob.public_url
            filepath = urlparse(object_url).path.lstrip('/')
            prefix = 'tophatch/media/'
            if filepath.startswith(prefix):
                filepath = filepath[len(prefix):]
            return setting('MEDIA_URL') + filepath
        return blob.generate_signed_url(self.expiration)
Example #23
0
    def _get_file(self):
        if self._file is not None:
            return self._file

        file = SpooledTemporaryFile(max_size=self._storage.max_memory_size,
                                    suffix=".AzureStorageFile",
                                    dir=setting("FILE_UPLOAD_TEMP_DIR", None))

        if 'r' in self._mode or 'a' in self._mode:
            # I set max connection to 1 since spooledtempfile is
            # not seekable which is required if we use max_connections > 1
            download_stream = self._storage.client.download_blob(
                self._path, timeout=self._storage.timeout)
            download_stream.download_to_stream(file, max_concurrency=1)
        if 'r' in self._mode:
            file.seek(0)

        self._file = file
        return self._file
class CustomAzureStorage(AzureStorage):
    # This is a workaround for AzureStorage to support custom domains
    custom_domain = setting("AZURE_CUSTOM_DOMAIN", None)

    @property
    def service(self):
        if self._service is None:
            self._service = BlockBlobService(
                self.account_name,
                self.account_key,
                is_emulated=self.is_emulated,
                custom_domain=self.custom_domain,
            )
        return self._service

    def url(self, name, expire=None):
        url = super().url(name, expire=expire)
        if self.is_emulated and url.startswith("https://azurite:"):
            url = url.replace("https://azurite:", "http://localhost:")
        return url
Example #25
0
    def url(self, name, expire=None, mode='r'):
        if hasattr(self.connection, 'make_blob_url'):
            sas_token = None
            make_blob_url_kwargs = {}
            if expire:
                now, now_plus_delta = self._expire_at(expire)
                sas_token = self.connection.generate_blob_shared_access_signature(self.azure_container,
                                                                                  name, 'r',
                                                                                  expiry=now_plus_delta)
                make_blob_url_kwargs['sas_token'] = sas_token

            if self.azure_protocol:
                make_blob_url_kwargs['protocol'] = self.azure_protocol
            return self.connection.make_blob_url(
                container_name=self.azure_container,
                blob_name=name,
                **make_blob_url_kwargs
            )
        else:
            return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
Example #26
0
 def __init__(self, name, mode, storage, buffer_size=None):
     if 'r' in mode and 'w' in mode:
         raise ValueError("Can't combine 'r' and 'w' in mode.")
     self._storage = storage
     self.name = name[len(self._storage.location):].lstrip('/')
     self._mode = mode
     self._force_mode = (lambda b: b) if 'b' in mode else (lambda b: b.decode())
     self.obj = storage.bucket.Object(name)
     if 'w' not in mode:
         # Force early RAII-style exception if object does not exist
         self.obj.load()
     self._is_dirty = False
     self._raw_bytes_written = 0
     self._file = None
     self._multipart = None
     # 5 MB is the minimum part size (if there is more than one part).
     # Amazon allows up to 10,000 parts.  The default supports uploads
     # up to roughly 50 GB.  Increase the part size to accommodate
     # for files larger than this.
     self.buffer_size = buffer_size or setting('AWS_S3_FILE_BUFFER_SIZE', 5242880)
     self._write_counter = 0
Example #27
0
class CustomPublicGoogleCloudStorage(GoogleCloudStorage):
    """
    Override the `_save` method of `GoogleCloudStorage` to set readable permission after upload.

    This is for objects that should be world readable, e.g. avatar images.
    The paths are those that match the `GS_PUBLIC_READABLE_PATHS` setting  glob.:w
    """

    public_readable_paths = setting("GS_PUBLIC_READABLE_PATHS", [])

    def _save(self, name, content):
        cleaned_name = super()._save(name, content)
        for public_readable_path in self.public_readable_paths:
            if fnmatch.fnmatch(cleaned_name, public_readable_path):
                encoded_name = self._encode_name(name)
                blob = self.bucket.blob(encoded_name)
                blob.make_public()

                break

        return cleaned_name
Example #28
0
    def _get_file(self):
        if self._file is not None:
            return self._file

        file = SpooledTemporaryFile(max_size=self._storage.max_memory_size,
                                    suffix=".AzureStorageFile",
                                    dir=setting("FILE_UPLOAD_TEMP_DIR", None))

        if 'r' in self._mode or 'a' in self._mode:
            # I set max connection to 1 since spooledtempfile is
            # not seekable which is required if we use max_connections > 1
            self._storage.service.get_blob_to_stream(
                container_name=self._storage.azure_container,
                blob_name=self._path,
                stream=file,
                max_connections=1,
                timeout=self._storage.timeout)
        if 'r' in self._mode:
            file.seek(0)

        self._file = file
        return self._file
Example #29
0
 def url(self, name, expire=None):
     if hasattr(self.service, 'make_blob_url'):
         if self.auto_sign:
             start = (datetime.utcnow() + timedelta(seconds=-120)).strftime('%Y-%m-%dT%H:%M:%SZ')
             expiry = (datetime.utcnow() + timedelta(seconds=self.ap_expiry)).strftime('%Y-%m-%dT%H:%M:%SZ')
             sas_token = self.service.generate_blob_shared_access_signature(
                 self.azure_container,
                 name,
                 permission=self.azure_access_policy_permission,
                 expiry=expiry,
                 start=start,
             )
         else:
             sas_token = None
         return self.service.make_blob_url(
             container_name=self.azure_container,
             blob_name=name,
             protocol=self.azure_protocol,
             sas_token=sas_token,
         )
     else:
         return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
Example #30
0
class EToolsAzureStorage(AzureStorage):
    account_name = setting("AZURE_ACCOUNT_NAME")
    account_key = setting("AZURE_ACCOUNT_KEY")
    azure_container = setting("AZURE_CONTAINER")
    azure_ssl = setting("AZURE_SSL")

    auto_sign = setting("AZURE_AUTO_SIGN")
    azure_access_policy_permission = setting("AZURE_ACCESS_POLICY_PERMISSION")
    ap_expiry = setting("AZURE_ACCESS_POLICY_EXPIRY")

    def __init__(self, *args, **kwargs):
        super(EToolsAzureStorage, self).__init__(*args, **kwargs)
        self._connection = None

    def url(self, name):
        if hasattr(self.connection, 'make_blob_url'):
            if self.auto_sign:
                access_policy = AccessPolicy()
                access_policy.start = (
                    datetime.utcnow() +
                    timedelta(seconds=-120)).strftime('%Y-%m-%dT%H:%M:%SZ')
                access_policy.expiry = (datetime.utcnow() + timedelta(
                    seconds=self.ap_expiry)).strftime('%Y-%m-%dT%H:%M:%SZ')
                access_policy.permission = self.azure_access_policy_permission
                sap = SharedAccessPolicy(access_policy)
                sas_token = self.connection.generate_shared_access_signature(
                    self.azure_container,
                    blob_name=name,
                    shared_access_policy=sap,
                )
            else:
                sas_token = None
            return self.connection.make_blob_url(
                container_name=self.azure_container,
                blob_name=name,
                protocol=self.azure_protocol,
                sas_token=sas_token,
            )
        else:
            return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container,
                                    name)
    def _get_file(self):
        if self._file is not None:
            return self._file

        file = SpooledTemporaryFile(
            max_size=self._storage.max_memory_size,
            suffix=".AzureStorageFile",
            dir=setting("FILE_UPLOAD_TEMP_DIR", None))

        if 'r' in self._mode or 'a' in self._mode:
            # I set max connection to 1 since spooledtempfile is
            # not seekable which is required if we use max_connections > 1
            self._storage.service.get_blob_to_stream(
                container_name=self._storage.azure_container,
                blob_name=self._path,
                stream=file,
                max_connections=1,
                timeout=10)
        if 'r' in self._mode:
            file.seek(0)

        self._file = file
        return self._file
 def url(self, name):
     if hasattr(self.connection, 'make_blob_url'):
         if self.auto_sign:
             access_policy = AccessPolicy()
             access_policy.start = (datetime.utcnow() + timedelta(seconds=-120)).strftime('%Y-%m-%dT%H:%M:%SZ')
             access_policy.expiry = (datetime.utcnow() + timedelta(seconds=self.ap_expiry)).strftime('%Y-%m-%dT%H:%M:%SZ')
             access_policy.permission = self.azure_access_policy_permission
             sap = SharedAccessPolicy(access_policy)
             
             sas_token = self.connection.generate_shared_access_signature(
                 self.azure_container,
                 blob_name=name,
                 shared_access_policy=sap,
             )
         else:
             sas_token = None
         return self.connection.make_blob_url(
             container_name=self.azure_container,
             blob_name=name,
             protocol=self.azure_protocol,
             sas_token=sas_token
         )
     else:
         return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
Example #33
0
        def __init__(self, file_id, name, original_name, size, metadata=None, **kwargs):
            boto_storage = S3Boto3Storage(
                bucket_name=setting("AWS_STORAGE_BUCKET_NAME"),
                endpoint_url=setting("AWS_S3_ENDPOINT_URL"),
                access_key=setting(
                    "AWS_S3_ACCESS_KEY_ID", setting("AWS_ACCESS_KEY_ID")
                ),
                secret_key=setting(
                    "AWS_S3_SECRET_ACCESS_KEY", setting("AWS_SECRET_ACCESS_KEY")
                ),
            )

            super().__init__(name=name, mode="rb", storage=boto_storage, **kwargs)

            self.file_id = file_id
            self.original_name = original_name
            # self.size is derived from S3boto3StorageFile
            # but size is passed for consistency, and potentially
            # for validation
            self.is_placeholder = False
            self.is_s3direct = True
            self.metadata = metadata
Example #34
0
class AzureStorage(Storage):
    account_name = setting("AZURE_ACCOUNT_NAME")
    account_key = setting("AZURE_ACCOUNT_KEY")
    azure_container = setting("AZURE_CONTAINER")
    azure_ssl = setting("AZURE_SSL")

    def __init__(self, *args, **kwargs):
        super(AzureStorage, self).__init__(*args, **kwargs)
        self._connection = None

    @property
    def connection(self):
        if self._connection is None:
            self._connection = BlockBlobService(
                account_name = self.account_name, account_key = self.account_key)
            #self._connection = BlobService(
            #    self.account_name, self.account_key)
        return self._connection

    @property
    def azure_protocol(self):
        if self.azure_ssl:
            return 'https'
        return 'http' if self.azure_ssl is not None else None

    def __get_blob_properties(self, name):
        try:
            return self.connection.get_blob_properties(
                self.azure_container,
                name
            )
        except AzureMissingResourceHttpError:
            return None

    def _open(self, name, mode="rb"):
        contents = self.connection.get_blob(self.azure_container, name)
        return ContentFile(contents)

    def exists(self, name):
        return self.__get_blob_properties(name) is not None

    def delete(self, name):
        try:
            self.connection.delete_blob(self.azure_container, name)
        except AzureMissingResourceHttpError:
            pass

    def size(self, name):
        properties = self.connection.get_blob_properties(
            self.azure_container, name)
        return properties["content-length"]

    def _save(self, name, content):
        if hasattr(content.file, 'content_type'):
            content_type = content.file.content_type
        else:
            content_type = mimetypes.guess_type(name)[0]

        if hasattr(content, 'chunks'):
            content_data = b''.join(chunk for chunk in content.chunks())
        else:
            content_data = content.read()

        self.connection.create_blob_from_bytes(self.azure_container, name,
              content_data, content_settings=ContentSettings(content_type=content_type))
        #self.connection.put_blob(self.azure_container, name,
        #                         content_data, "BlockBlob",
        #                         x_ms_blob_content_type=content_type)
        return name

    def url(self, name):
        if hasattr(self.connection, 'make_blob_url'):
            return self.connection.make_blob_url(
                container_name=self.azure_container,
                blob_name=name,
                protocol=self.azure_protocol,
            )
        else:
            return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)

    def modified_time(self, name):
        try:
            modified = self.__get_blob_properties(name)['last-modified']
        except (TypeError, KeyError):
            return super(AzureStorage, self).modified_time(name)

        modified = time.strptime(modified, '%a, %d %b %Y %H:%M:%S %Z')
        modified = datetime.fromtimestamp(mktime(modified))

        return modified
Example #35
0
 def __init__(self):
     bucket = setting('COURSE_IMPORT_EXPORT_BUCKET', settings.AWS_STORAGE_BUCKET_NAME)
     super(ImportExportS3Storage, self).__init__(bucket=bucket, custom_domain=None, querystring_auth=True)
class GoogleCloudStorage(Storage):
    project_id = setting('GS_PROJECT_ID', None)
    credentials = setting('GS_CREDENTIALS', None)
    bucket_name = setting('GS_BUCKET_NAME', None)
    location = setting('GS_LOCATION', '')
    auto_create_bucket = setting('GS_AUTO_CREATE_BUCKET', False)
    auto_create_acl = setting('GS_AUTO_CREATE_ACL', 'projectPrivate')
    file_name_charset = setting('GS_FILE_NAME_CHARSET', 'utf-8')
    file_overwrite = setting('GS_FILE_OVERWRITE', True)
    # The max amount of memory a returned file can take up before being
    # rolled over into a temporary file on disk. Default is 0: Do not roll over.
    max_memory_size = setting('GS_MAX_MEMORY_SIZE', 0)
    base_url = setting('GS_CDN_BASE_URL', None)


    def __init__(self, **settings):
        # check if some of the settings we've provided as class attributes
        # need to be overwritten with values passed in here
        for name, value in settings.items():
            if hasattr(self, name):
                setattr(self, name, value)

        self.location = (self.location or '').lstrip('/')
        self._bucket = None
        self._client = None

    @property
    def client(self):
        if self._client is None:
            self._client = Client(
                project=self.project_id,
                credentials=self.credentials
            )
        return self._client

    @property
    def bucket(self):
        if self._bucket is None:
            self._bucket = self._get_or_create_bucket(self.bucket_name)
        return self._bucket

    def _get_or_create_bucket(self, name):
        """
        Retrieves a bucket if it exists, otherwise creates it.
        """
        try:
            return self.client.get_bucket(name)
        except NotFound:
            if self.auto_create_bucket:
                bucket = self.client.create_bucket(name)
                bucket.acl.save_predefined(self.auto_create_acl)
                return bucket
            raise ImproperlyConfigured("Bucket %s does not exist. Buckets "
                                       "can be automatically created by "
                                       "setting GS_AUTO_CREATE_BUCKET to "
                                       "``True``." % name)

    def _normalize_name(self, name):
        """
        Normalizes the name so that paths like /path/to/ignored/../something.txt
        and ./file.txt work.  Note that clean_name adds ./ to some paths so
        they need to be fixed here. We check to make sure that the path pointed
        to is not outside the directory specified by the LOCATION setting.
        """
        try:
            return safe_join(self.location, name)
        except ValueError:
            raise SuspiciousOperation("Attempted access to '%s' denied." %
                                      name)

    def _encode_name(self, name):
        return smart_str(name, encoding=self.file_name_charset)

    def _open(self, name, mode='rb'):
        name = self._normalize_name(clean_name(name))
        file_object = GoogleCloudFile(name, mode, self)
        if not file_object.blob:
            raise IOError(u'File does not exist: %s' % name)
        return file_object

    def _save(self, name, content):
        cleaned_name = clean_name(name)
        name = self._normalize_name(cleaned_name)

        content.name = cleaned_name
        encoded_name = self._encode_name(name)
        file = GoogleCloudFile(encoded_name, 'rw', self)
        file.blob.upload_from_file(content, size=content.size,
                                   content_type=file.mime_type)
        return cleaned_name

    def delete(self, name):
        name = self._normalize_name(clean_name(name))
        self.bucket.delete_blob(self._encode_name(name))

    def exists(self, name):
        if not name:  # root element aka the bucket
            try:
                self.bucket
                return True
            except ImproperlyConfigured:
                return False

        name = self._normalize_name(clean_name(name))
        return bool(self.bucket.get_blob(self._encode_name(name)))

    def listdir(self, name):
        name = self._normalize_name(clean_name(name))
        # for the bucket.list and logic below name needs to end in /
        # But for the root path "" we leave it as an empty string
        if name and not name.endswith('/'):
            name += '/'

        files_list = list(self.bucket.list_blobs(prefix=self._encode_name(name)))
        files = []
        dirs = set()

        base_parts = name.split("/")[:-1]
        for item in files_list:
            parts = item.name.split("/")
            parts = parts[len(base_parts):]
            if len(parts) == 1 and parts[0]:
                # File
                files.append(parts[0])
            elif len(parts) > 1 and parts[0]:
                # Directory
                dirs.add(parts[0])
        return list(dirs), files

    def _get_blob(self, name):
        # Wrap google.cloud.storage's blob to raise if the file doesn't exist
        blob = self.bucket.get_blob(name)

        if blob is None:
            raise NotFound(u'File does not exist: {}'.format(name))

        return blob

    def size(self, name):
        name = self._normalize_name(clean_name(name))
        blob = self._get_blob(self._encode_name(name))
        return blob.size

    def modified_time(self, name):
        name = self._normalize_name(clean_name(name))
        blob = self._get_blob(self._encode_name(name))
        return timezone.make_naive(blob.updated)

    def get_modified_time(self, name):
        name = self._normalize_name(clean_name(name))
        blob = self._get_blob(self._encode_name(name))
        updated = blob.updated
        return updated if setting('USE_TZ') else timezone.make_naive(updated)

    def get_created_time(self, name):
        """
        Return the creation time (as a datetime) of the file specified by name.
        The datetime will be timezone-aware if USE_TZ=True.
        """
        name = self._normalize_name(clean_name(name))
        blob = self._get_blob(self._encode_name(name))
        created = blob.time_created
        return created if setting('USE_TZ') else timezone.make_naive(created)

    def url(self, name):
        # Preserve the trailing slash after normalizing the path.
        name = self._normalize_name(clean_name(name))
        if self.base_url:
            return self.base_url + name
        else:
            blob = self._get_blob(self._encode_name(name))
            return blob.public_url

    def get_available_name(self, name, max_length=None):
        if self.file_overwrite:
            name = clean_name(name)
            return name
        return super(GoogleCloudStorage, self).get_available_name(name, max_length)
Example #37
0
class GoogleCloudStorage(StorageWithTransactionSupportMixin, gcloud.GoogleCloudStorage):
    preserve_acl = setting('GS_PRESERVE_ACL', True)

    def __init__(self, location=settings.LOCATION, storage_url=None, **settings):
        self._location = location
        self._storage_url = storage_url
        super().__init__(**settings)

    def internal_url(self, name):
        if self._storage_url:
            return self._storage_url + filepath_to_uri(name)

        # Remove unnecessary :443 from url that is created by boto for python > 2.7
        url = super().url(name)
        if ':443' in url:
            return url.replace(':443', '')
        return url

    def copy(self, src_name, dest_name):
        src_name = self._normalize_name(gcloud.clean_name(src_name))
        dest_name = self._normalize_name(gcloud.clean_name(dest_name))
        bucket = self.bucket

        source_blob = bucket.blob(self._encode_name(src_name))
        destination_blob = bucket.copy_blob(source_blob,
                                            bucket, self._encode_name(dest_name))

        if self.preserve_acl and self.default_acl:
            destination_blob.acl.save(acl=source_blob.acl)

        return dest_name

    def size(self, name):
        name = self._normalize_name(gcloud.clean_name(name))

        return self.bucket.get_blob(self._encode_name(name)).size

    def delete(self, name):
        name = self._normalize_name(gcloud.clean_name(name))

        self.bucket.delete_blobs([self._encode_name(name)], on_error=lambda blob: None)

    def delete_files(self, prefix, buckets, **kwargs):
        if not prefix.endswith('/'):
            prefix += '/'

        for bucket_name in buckets:
            bucket_name = get_loc_env(self._location, bucket_name)
            bucket = self.client.get_bucket(bucket_name)

            for blob in bucket.list_blobs(prefix=prefix):
                blob.delete()
        return

    def _save(self, name, content):
        storage = getattr(content, '_storage', None)

        if storage and storage == self:
            # If already exists, just copy it.
            return self.copy(content.name, name)

        return super()._save(name, content)
class S3Boto3StorageFile(File):
    """
    The default file object used by the S3Boto3Storage backend.

    This file implements file streaming using boto's multipart
    uploading functionality. The file can be opened in read or
    write mode.

    This class extends Django's File class. However, the contained
    data is only the data contained in the current buffer. So you
    should not access the contained file object directly. You should
    access the data via this class.

    Warning: This file *must* be closed using the close() method in
    order to properly write the file to S3. Be sure to close the file
    in your application.
    """
    buffer_size = setting('AWS_S3_FILE_BUFFER_SIZE', 5242880)

    def __init__(self, name, mode, storage, buffer_size=None):
        if 'r' in mode and 'w' in mode:
            raise ValueError("Can't combine 'r' and 'w' in mode.")
        self._storage = storage
        self.name = name[len(self._storage.location):].lstrip('/')
        self._mode = mode
        self._force_mode = (lambda b: b) if 'b' in mode else (
            lambda b: b.decode())
        self.obj = storage.bucket.Object(name)
        if 'w' not in mode:
            # Force early RAII-style exception if object does not exist
            self.obj.load()
        self._is_dirty = False
        self._raw_bytes_written = 0
        self._file = None
        self._multipart = None
        # 5 MB is the minimum part size (if there is more than one part).
        # Amazon allows up to 10,000 parts.  The default supports uploads
        # up to roughly 50 GB.  Increase the part size to accommodate
        # for files larger than this.
        if buffer_size is not None:
            self.buffer_size = buffer_size
        self._write_counter = 0

    @property
    def size(self):
        return self.obj.content_length

    def _get_file(self):
        if self._file is None:
            self._file = SpooledTemporaryFile(
                max_size=self._storage.max_memory_size,
                suffix=".S3Boto3StorageFile",
                dir=setting("FILE_UPLOAD_TEMP_DIR"))
            if 'r' in self._mode:
                self._is_dirty = False
                self.obj.download_fileobj(self._file)
                self._file.seek(0)
            if self._storage.gzip and self.obj.content_encoding == 'gzip':
                self._file = GzipFile(mode=self._mode,
                                      fileobj=self._file,
                                      mtime=0.0)
        return self._file

    def _set_file(self, value):
        self._file = value

    file = property(_get_file, _set_file)

    def read(self, *args, **kwargs):
        if 'r' not in self._mode:
            raise AttributeError("File was not opened in read mode.")
        return self._force_mode(super().read(*args, **kwargs))

    def readline(self, *args, **kwargs):
        if 'r' not in self._mode:
            raise AttributeError("File was not opened in read mode.")
        return self._force_mode(super().readline(*args, **kwargs))

    def write(self, content):
        if 'w' not in self._mode:
            raise AttributeError("File was not opened in write mode.")
        self._is_dirty = True
        if self._multipart is None:
            self._multipart = self.obj.initiate_multipart_upload(
                **self._storage._get_write_parameters(self.obj.key))
        if self.buffer_size <= self._buffer_file_size:
            self._flush_write_buffer()
        bstr = force_bytes(content)
        self._raw_bytes_written += len(bstr)
        return super().write(bstr)

    @property
    def _buffer_file_size(self):
        pos = self.file.tell()
        self.file.seek(0, os.SEEK_END)
        length = self.file.tell()
        self.file.seek(pos)
        return length

    def _flush_write_buffer(self):
        """
        Flushes the write buffer.
        """
        if self._buffer_file_size:
            self._write_counter += 1
            self.file.seek(0)
            part = self._multipart.Part(self._write_counter)
            part.upload(Body=self.file.read())
            self.file.seek(0)
            self.file.truncate()

    def _create_empty_on_close(self):
        """
        Attempt to create an empty file for this key when this File is closed if no bytes
        have been written and no object already exists on S3 for this key.

        This behavior is meant to mimic the behavior of Django's builtin FileSystemStorage,
        where files are always created after they are opened in write mode:

            f = storage.open("file.txt", mode="w")
            f.close()
        """
        assert "w" in self._mode
        assert self._raw_bytes_written == 0

        try:
            # Check if the object exists on the server; if so, don't do anything
            self.obj.load()
        except ClientError as err:
            if err.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
                self.obj.put(Body=b"",
                             **self._storage._get_write_parameters(
                                 self.obj.key))
            else:
                raise

    def close(self):
        if self._is_dirty:
            self._flush_write_buffer()
            # TODO: Possibly cache the part ids as they're being uploaded
            # instead of requesting parts from server. For now, emulating
            # s3boto's behavior.
            parts = [{
                'ETag': part.e_tag,
                'PartNumber': part.part_number
            } for part in self._multipart.parts.all()]
            self._multipart.complete(MultipartUpload={'Parts': parts})
        else:
            if self._multipart is not None:
                self._multipart.abort()
            if 'w' in self._mode and self._raw_bytes_written == 0:
                self._create_empty_on_close()
        if self._file is not None:
            self._file.close()
            self._file = None
 def test_get_setting(self):
     value = utils.setting('SECRET_KEY')
     self.assertEqual(settings.SECRET_KEY, value)
Example #40
0
class S3Boto3Storage(Storage):
    """
    Amazon Simple Storage Service using Boto3

    This storage backend supports opening files in read or write
    mode and supports streaming(buffering) data in chunks to S3
    when writing.
    """
    connection_service_name = 's3'
    default_content_type = 'application/octet-stream'
    connection_response_error = ClientError
    file_class = S3Boto3StorageFile
    # If config provided in init, signature_version and addressing_style settings/args are ignored.
    config = None

    # used for looking up the access and secret key from env vars
    access_key_names = ['AWS_S3_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID']
    secret_key_names = ['AWS_S3_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY']

    access_key = setting('AWS_S3_ACCESS_KEY_ID', setting('AWS_ACCESS_KEY_ID'))
    secret_key = setting('AWS_S3_SECRET_ACCESS_KEY',
                         setting('AWS_SECRET_ACCESS_KEY'))
    file_overwrite = setting('AWS_S3_FILE_OVERWRITE', True)
    object_parameters = setting('AWS_S3_OBJECT_PARAMETERS', {})
    bucket_name = setting('AWS_STORAGE_BUCKET_NAME')
    auto_create_bucket = setting('AWS_AUTO_CREATE_BUCKET', False)
    default_acl = setting('AWS_DEFAULT_ACL', 'public-read')
    bucket_acl = setting('AWS_BUCKET_ACL', default_acl)
    querystring_auth = setting('AWS_QUERYSTRING_AUTH', True)
    querystring_expire = setting('AWS_QUERYSTRING_EXPIRE', 3600)
    signature_version = setting('AWS_S3_SIGNATURE_VERSION')
    reduced_redundancy = setting('AWS_REDUCED_REDUNDANCY', False)
    location = setting('AWS_LOCATION', '')
    encryption = setting('AWS_S3_ENCRYPTION', False)
    custom_domain = setting('AWS_S3_CUSTOM_DOMAIN')
    addressing_style = setting('AWS_S3_ADDRESSING_STYLE')
    secure_urls = setting('AWS_S3_SECURE_URLS', True)
    file_name_charset = setting('AWS_S3_FILE_NAME_CHARSET', 'utf-8')
    gzip = setting('AWS_IS_GZIPPED', False)
    preload_metadata = setting('AWS_PRELOAD_METADATA', False)
    gzip_content_types = setting('GZIP_CONTENT_TYPES', (
        'text/css',
        'text/javascript',
        'application/javascript',
        'application/x-javascript',
        'image/svg+xml',
    ))
    url_protocol = setting('AWS_S3_URL_PROTOCOL', 'http:')
    endpoint_url = setting('AWS_S3_ENDPOINT_URL', None)
    region_name = setting('AWS_S3_REGION_NAME', None)
    use_ssl = setting('AWS_S3_USE_SSL', True)

    # The max amount of memory a returned file can take up before being
    # rolled over into a temporary file on disk. Default is 0: Do not roll over.
    max_memory_size = setting('AWS_S3_MAX_MEMORY_SIZE', 0)

    def __init__(self, acl=None, bucket=None, **settings):
        # check if some of the settings we've provided as class attributes
        # need to be overwritten with values passed in here
        for name, value in settings.items():
            if hasattr(self, name):
                setattr(self, name, value)

        # For backward-compatibility of old differing parameter names
        if acl is not None:
            self.default_acl = acl
        if bucket is not None:
            self.bucket_name = bucket

        self.location = (self.location or '').lstrip('/')
        # Backward-compatibility: given the anteriority of the SECURE_URL setting
        # we fall back to https if specified in order to avoid the construction
        # of unsecure urls.
        if self.secure_urls:
            self.url_protocol = 'https:'

        self._entries = {}
        self._bucket = None
        self._connection = None

        if not self.access_key and not self.secret_key:
            self.access_key, self.secret_key = self._get_access_keys()

        if not self.config:
            self.config = Config(
                s3={'addressing_style': self.addressing_style},
                signature_version=self.signature_version)

    @property
    def connection(self):
        # TODO: Support host, port like in s3boto
        # Note that proxies are handled by environment variables that the underlying
        # urllib/requests libraries read. See https://github.com/boto/boto3/issues/338
        # and http://docs.python-requests.org/en/latest/user/advanced/#proxies
        if self._connection is None:
            session = boto3.session.Session()
            self._connection = session.resource(
                self.connection_service_name,
                aws_access_key_id=self.access_key,
                aws_secret_access_key=self.secret_key,
                region_name=self.region_name,
                use_ssl=self.use_ssl,
                endpoint_url=self.endpoint_url,
                config=self.config)
        return self._connection

    @property
    def bucket(self):
        """
        Get the current bucket. If there is no current bucket object
        create it.
        """
        if self._bucket is None:
            self._bucket = self._get_or_create_bucket(self.bucket_name)
        return self._bucket

    @property
    def entries(self):
        """
        Get the locally cached files for the bucket.
        """
        if self.preload_metadata and not self._entries:
            self._entries = dict(
                (self._decode_name(entry.key), entry)
                for entry in self.bucket.objects.filter(Prefix=self.location))
        return self._entries

    def _get_access_keys(self):
        """
        Gets the access keys to use when accessing S3. If none
        are provided to the class in the constructor or in the
        settings then get them from the environment variables.
        """
        def lookup_env(names):
            for name in names:
                value = os.environ.get(name)
                if value:
                    return value

        access_key = self.access_key or lookup_env(self.access_key_names)
        secret_key = self.secret_key or lookup_env(self.secret_key_names)
        return access_key, secret_key

    def _get_or_create_bucket(self, name):
        """
        Retrieves a bucket if it exists, otherwise creates it.
        """
        bucket = self.connection.Bucket(name)
        if self.auto_create_bucket:
            try:
                # Directly call head_bucket instead of bucket.load() because head_bucket()
                # fails on wrong region, while bucket.load() does not.
                bucket.meta.client.head_bucket(Bucket=name)
            except self.connection_response_error as err:
                if err.response['ResponseMetadata']['HTTPStatusCode'] == 301:
                    raise ImproperlyConfigured(
                        "Bucket %s exists, but in a different "
                        "region than we are connecting to. Set "
                        "the region to connect to by setting "
                        "AWS_S3_REGION_NAME to the correct region." % name)

                elif err.response['ResponseMetadata']['HTTPStatusCode'] == 404:
                    # Notes: When using the us-east-1 Standard endpoint, you can create
                    # buckets in other regions. The same is not true when hitting region specific
                    # endpoints. However, when you create the bucket not in the same region, the
                    # connection will fail all future requests to the Bucket after the creation
                    # (301 Moved Permanently).
                    #
                    # For simplicity, we enforce in S3Boto3Storage that any auto-created
                    # bucket must match the region that the connection is for.
                    #
                    # Also note that Amazon specifically disallows "us-east-1" when passing bucket
                    # region names; LocationConstraint *must* be blank to create in US Standard.
                    bucket_params = {'ACL': self.bucket_acl}
                    region_name = self.connection.meta.client.meta.region_name
                    if region_name != 'us-east-1':
                        bucket_params['CreateBucketConfiguration'] = {
                            'LocationConstraint': region_name
                        }
                    bucket.create(ACL=self.bucket_acl)
                else:
                    raise ImproperlyConfigured(
                        "Bucket %s does not exist. Buckets "
                        "can be automatically created by "
                        "setting AWS_AUTO_CREATE_BUCKET to "
                        "``True``." % name)
        return bucket

    def _clean_name(self, name):
        """
        Cleans the name so that Windows style paths work
        """
        # Normalize Windows style paths
        clean_name = posixpath.normpath(name).replace('\\', '/')

        # os.path.normpath() can strip trailing slashes so we implement
        # a workaround here.
        if name.endswith('/') and not clean_name.endswith('/'):
            # Add a trailing slash as it was stripped.
            return clean_name + '/'
        else:
            return clean_name

    def _normalize_name(self, name):
        """
        Normalizes the name so that paths like /path/to/ignored/../something.txt
        work. We check to make sure that the path pointed to is not outside
        the directory specified by the LOCATION setting.
        """
        try:
            return safe_join(self.location, name)
        except ValueError:
            raise SuspiciousOperation("Attempted access to '%s' denied." %
                                      name)

    def _encode_name(self, name):
        return smart_str(name, encoding=self.file_name_charset)

    def _decode_name(self, name):
        return force_text(name, encoding=self.file_name_charset)

    def _compress_content(self, content):
        """Gzip a given string content."""
        zbuf = BytesIO()
        zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
        try:
            zfile.write(force_bytes(content.read()))
        finally:
            zfile.close()
        zbuf.seek(0)
        # Boto 2 returned the InMemoryUploadedFile with the file pointer replaced,
        # but Boto 3 seems to have issues with that. No need for fp.name in Boto3
        # so just returning the BytesIO directly
        return zbuf

    def _open(self, name, mode='rb'):
        name = self._normalize_name(self._clean_name(name))
        try:
            f = self.file_class(name, mode, self)
        except self.connection_response_error as err:
            if err.response['ResponseMetadata']['HTTPStatusCode'] == 404:
                raise IOError('File does not exist: %s' % name)
            raise  # Let it bubble up if it was some other error
        return f

    def _save(self, name, content):
        cleaned_name = self._clean_name(name)
        name = self._normalize_name(cleaned_name)
        parameters = self.object_parameters.copy()
        content_type = getattr(
            content, 'content_type',
            mimetypes.guess_type(name)[0] or self.default_content_type)

        # setting the content_type in the key object is not enough.
        parameters.update({'ContentType': content_type})

        if self.gzip and content_type in self.gzip_content_types:
            content = self._compress_content(content)
            parameters.update({'ContentEncoding': 'gzip'})

        encoded_name = self._encode_name(name)
        obj = self.bucket.Object(encoded_name)
        if self.preload_metadata:
            self._entries[encoded_name] = obj

        self._save_content(obj, content, parameters=parameters)
        # Note: In boto3, after a put, last_modified is automatically reloaded
        # the next time it is accessed; no need to specifically reload it.
        return cleaned_name

    def _save_content(self, obj, content, parameters):
        # only pass backwards incompatible arguments if they vary from the default
        put_parameters = parameters.copy() if parameters else {}
        if self.encryption:
            put_parameters['ServerSideEncryption'] = 'AES256'
        if self.reduced_redundancy:
            put_parameters['StorageClass'] = 'REDUCED_REDUNDANCY'
        if self.default_acl:
            put_parameters['ACL'] = self.default_acl
        content.seek(0, os.SEEK_SET)
        obj.upload_fileobj(content, ExtraArgs=put_parameters)

    def delete(self, name):
        name = self._normalize_name(self._clean_name(name))
        self.bucket.Object(self._encode_name(name)).delete()

    def exists(self, name):
        if not name:
            try:
                self.bucket
                return True
            except ImproperlyConfigured:
                return False
        name = self._normalize_name(self._clean_name(name))
        if self.entries:
            return name in self.entries
        obj = self.bucket.Object(self._encode_name(name))
        try:
            obj.load()
            return True
        except self.connection_response_error:
            return False

    def listdir(self, name):
        name = self._normalize_name(self._clean_name(name))
        # for the bucket.objects.filter and logic below name needs to end in /
        # But for the root path "" we leave it as an empty string
        if name and not name.endswith('/'):
            name += '/'

        files = []
        dirs = set()
        base_parts = name.split("/")[:-1]
        for item in self.bucket.objects.filter(Prefix=self._encode_name(name)):
            parts = item.key.split("/")
            parts = parts[len(base_parts):]
            if len(parts) == 1:
                # File
                files.append(parts[0])
            elif len(parts) > 1:
                # Directory
                dirs.add(parts[0])
        return list(dirs), files

    def size(self, name):
        name = self._normalize_name(self._clean_name(name))
        if self.entries:
            entry = self.entries.get(name)
            if entry:
                return entry.content_length
            return 0
        return self.bucket.Object(self._encode_name(name)).content_length

    def get_modified_time(self, name):
        """
        Returns an (aware) datetime object containing the last modified time if
        USE_TZ is True, otherwise returns a naive datetime in the local timezone.
        """
        name = self._normalize_name(self._clean_name(name))
        entry = self.entries.get(name)
        # only call self.bucket.Object() if the key is not found
        # in the preloaded metadata.
        if entry is None:
            entry = self.bucket.Object(self._encode_name(name))
        if setting('USE_TZ'):
            # boto3 returns TZ aware timestamps
            return entry.last_modified
        else:
            return localtime(entry.last_modified).replace(tzinfo=None)

    def modified_time(self, name):
        """Returns a naive datetime object containing the last modified time."""
        # If USE_TZ=False then get_modified_time will return a naive datetime
        # so we just return that, else we have to localize and strip the tz
        mtime = self.get_modified_time(name)
        return mtime if is_naive(mtime) else localtime(mtime).replace(
            tzinfo=None)

    def _strip_signing_parameters(self, url):
        # Boto3 does not currently support generating URLs that are unsigned. Instead we
        # take the signed URLs and strip any querystring params related to signing and expiration.
        # Note that this may end up with URLs that are still invalid, especially if params are
        # passed in that only work with signed URLs, e.g. response header params.
        # The code attempts to strip all query parameters that match names of known parameters
        # from v2 and v4 signatures, regardless of the actual signature version used.
        split_url = urlparse.urlsplit(url)
        qs = urlparse.parse_qsl(split_url.query, keep_blank_values=True)
        blacklist = set([
            'x-amz-algorithm', 'x-amz-credential', 'x-amz-date',
            'x-amz-expires', 'x-amz-signedheaders', 'x-amz-signature',
            'x-amz-security-token', 'awsaccesskeyid', 'expires', 'signature'
        ])
        filtered_qs = ((key, val) for key, val in qs
                       if key.lower() not in blacklist)
        # Note: Parameters that did not have a value in the original query string will have
        # an '=' sign appended to it, e.g ?foo&bar becomes ?foo=&bar=
        joined_qs = ('='.join(keyval) for keyval in filtered_qs)
        split_url = split_url._replace(query="&".join(joined_qs))
        return split_url.geturl()

    def url(self, name, parameters=None, expire=None):
        # Preserve the trailing slash after normalizing the path.
        # TODO: Handle force_http=not self.secure_urls like in s3boto
        name = self._normalize_name(self._clean_name(name))
        if self.custom_domain:
            return "%s//%s/%s" % (self.url_protocol, self.custom_domain,
                                  filepath_to_uri(name))
        if expire is None:
            expire = self.querystring_expire

        params = parameters.copy() if parameters else {}
        params['Bucket'] = self.bucket.name
        params['Key'] = self._encode_name(name)
        url = self.bucket.meta.client.generate_presigned_url('get_object',
                                                             Params=params,
                                                             ExpiresIn=expire)
        if self.querystring_auth:
            return url
        return self._strip_signing_parameters(url)

    def get_available_name(self, name, max_length=None):
        """Overwrite existing file with the same name."""
        if self.file_overwrite:
            name = self._clean_name(name)
            return name
        return super(S3Boto3Storage, self).get_available_name(name, max_length)
Example #41
0
class S3BotoStorageFile(File):
    """
    The default file object used by the S3BotoStorage backend.

    This file implements file streaming using boto's multipart
    uploading functionality. The file can be opened in read or
    write mode.

    This class extends Django's File class. However, the contained
    data is only the data contained in the current buffer. So you
    should not access the contained file object directly. You should
    access the data via this class.

    Warning: This file *must* be closed using the close() method in
    order to properly write the file to S3. Be sure to close the file
    in your application.
    """
    # TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.
    # TODO: When Django drops support for Python 2.5, rewrite to use the
    #       BufferedIO streams in the Python 2.6 io module.
    buffer_size = setting('AWS_S3_FILE_BUFFER_SIZE', 5242880)

    def __init__(self, name, mode, storage, buffer_size=None):
        self._storage = storage
        self.name = name[len(self._storage.location):].lstrip('/')
        self._mode = mode
        self.key = storage.bucket.get_key(self._storage._encode_name(name))
        if not self.key and 'w' in mode:
            self.key = storage.bucket.new_key(storage._encode_name(name))
        self._is_dirty = False
        self._file = None
        self._multipart = None
        # 5 MB is the minimum part size (if there is more than one part).
        # Amazon allows up to 10,000 parts.  The default supports uploads
        # up to roughly 50 GB.  Increase the part size to accommodate
        # for files larger than this.
        if buffer_size is not None:
            self.buffer_size = buffer_size
        self._write_counter = 0

        if not hasattr(django_settings, 'AWS_DEFAULT_ACL'):
            warnings.warn(
                "The default behavior of S3BotoStorage is insecure. By default files "
                "and new buckets are saved with an ACL of 'public-read' (globally "
                "publicly readable). To change to using the bucket's default ACL "
                "set AWS_DEFAULT_ACL = None, otherwise to silence this warning "
                "explicitly set AWS_DEFAULT_ACL."
            )

    @property
    def size(self):
        return self.key.size

    def _get_file(self):
        if self._file is None:
            self._file = SpooledTemporaryFile(
                max_size=self._storage.max_memory_size,
                suffix='.S3BotoStorageFile',
                dir=setting('FILE_UPLOAD_TEMP_DIR')
            )
            if 'r' in self._mode:
                self._is_dirty = False
                self.key.get_contents_to_file(self._file)
                self._file.seek(0)
            if self._storage.gzip and self.key.content_encoding == 'gzip':
                self._file = GzipFile(mode=self._mode, fileobj=self._file)
        return self._file

    def _set_file(self, value):
        self._file = value

    file = property(_get_file, _set_file)

    def read(self, *args, **kwargs):
        if 'r' not in self._mode:
            raise AttributeError('File was not opened in read mode.')
        return super(S3BotoStorageFile, self).read(*args, **kwargs)

    def write(self, content, *args, **kwargs):
        if 'w' not in self._mode:
            raise AttributeError('File was not opened in write mode.')
        self._is_dirty = True
        if self._multipart is None:
            provider = self.key.bucket.connection.provider
            upload_headers = {}
            if self._storage.default_acl:
                upload_headers[provider.acl_header] = self._storage.default_acl
            upload_headers.update({
                'Content-Type': mimetypes.guess_type(self.key.name)[0] or self._storage.key_class.DefaultContentType
            })
            upload_headers.update(self._storage.headers)
            self._multipart = self._storage.bucket.initiate_multipart_upload(
                self.key.name,
                headers=upload_headers,
                reduced_redundancy=self._storage.reduced_redundancy,
                encrypt_key=self._storage.encryption,
            )
        if self.buffer_size <= self._buffer_file_size:
            self._flush_write_buffer()
        return super(S3BotoStorageFile, self).write(force_bytes(content), *args, **kwargs)

    @property
    def _buffer_file_size(self):
        pos = self.file.tell()
        self.file.seek(0, os.SEEK_END)
        length = self.file.tell()
        self.file.seek(pos)
        return length

    def _flush_write_buffer(self):
        if self._buffer_file_size:
            self._write_counter += 1
            self.file.seek(0)
            headers = self._storage.headers.copy()
            self._multipart.upload_part_from_file(
                self.file, self._write_counter, headers=headers)
            self.file.seek(0)
            self.file.truncate()

    def close(self):
        if self._is_dirty:
            self._flush_write_buffer()
            self._multipart.complete_upload()
        else:
            if self._multipart is not None:
                self._multipart.cancel_upload()
        self.key.close()
        if self._file is not None:
            self._file.close()
            self._file = None
 def url(self, name):
     return u"{}/{}".format(setting('MEDIA_URL').rstrip('/'), name)
class DigitalOceanSpacesPrivateMediaStorage(DigitalOceanSpacesStorage):
    location = setting('DO_SPACES_PRIVATE_MEDIA_LOCATION')
    default_acl = 'private'
    file_overwrite = False
    custom_domain = False
Example #44
0
 def get_modified_time(self, name):
     name = self._normalize_name(clean_name(name))
     blob = self._get_blob(self._encode_name(name))
     updated = blob.updated
     return updated if setting('USE_TZ') else timezone.make_naive(updated)
Example #45
0
 def get_modified_time(self, name):
     dt = tz.make_aware(parse_ts(self._get_key(name).last_modified), tz.utc)
     return dt if setting('USE_TZ') else tz.make_naive(dt)
 def test_setting_unfound(self):
     self.assertIsNone(utils.setting('FOO'))
     self.assertEqual(utils.setting('FOO', 'bar'), 'bar')
     with self.assertRaises(ImproperlyConfigured):
         utils.setting('FOO', strict=True)
Example #47
0
class S3BotoStorage(Storage):
    """
    Amazon Simple Storage Service using Boto

    This storage backend supports opening files in read or write
    mode and supports streaming(buffering) data in chunks to S3
    when writing.
    """
    connection_class = S3Connection
    connection_response_error = S3ResponseError
    file_class = S3BotoStorageFile
    key_class = S3Key

    # used for looking up the access and secret key from env vars
    access_key_names = ['AWS_S3_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID']
    secret_key_names = ['AWS_S3_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY']
    security_token_names = ['AWS_SESSION_TOKEN', 'AWS_SECURITY_TOKEN']
    security_token = None

    access_key = setting('AWS_S3_ACCESS_KEY_ID', setting('AWS_ACCESS_KEY_ID'))
    secret_key = setting('AWS_S3_SECRET_ACCESS_KEY', setting('AWS_SECRET_ACCESS_KEY'))
    file_overwrite = setting('AWS_S3_FILE_OVERWRITE', True)
    headers = setting('AWS_HEADERS', {})
    bucket_name = setting('AWS_STORAGE_BUCKET_NAME')
    auto_create_bucket = setting('AWS_AUTO_CREATE_BUCKET', False)
    default_acl = setting('AWS_DEFAULT_ACL', 'public-read')
    bucket_acl = setting('AWS_BUCKET_ACL', default_acl)
    querystring_auth = setting('AWS_QUERYSTRING_AUTH', True)
    querystring_expire = setting('AWS_QUERYSTRING_EXPIRE', 3600)
    reduced_redundancy = setting('AWS_REDUCED_REDUNDANCY', False)
    location = setting('AWS_LOCATION', '')
    origin = setting('AWS_ORIGIN', Location.DEFAULT)
    encryption = setting('AWS_S3_ENCRYPTION', False)
    custom_domain = setting('AWS_S3_CUSTOM_DOMAIN')
    calling_format = setting('AWS_S3_CALLING_FORMAT', SubdomainCallingFormat())
    secure_urls = setting('AWS_S3_SECURE_URLS', True)
    file_name_charset = setting('AWS_S3_FILE_NAME_CHARSET', 'utf-8')
    gzip = setting('AWS_IS_GZIPPED', False)
    preload_metadata = setting('AWS_PRELOAD_METADATA', False)
    gzip_content_types = setting('GZIP_CONTENT_TYPES', (
        'text/css',
        'text/javascript',
        'application/javascript',
        'application/x-javascript',
        'image/svg+xml',
    ))
    url_protocol = setting('AWS_S3_URL_PROTOCOL', 'http:')
    host = setting('AWS_S3_HOST', S3Connection.DefaultHost)
    use_ssl = setting('AWS_S3_USE_SSL', True)
    port = setting('AWS_S3_PORT')
    proxy = setting('AWS_S3_PROXY_HOST')
    proxy_port = setting('AWS_S3_PROXY_PORT')
    max_memory_size = setting('AWS_S3_MAX_MEMORY_SIZE', 0)

    def __init__(self, acl=None, bucket=None, **settings):
        # check if some of the settings we've provided as class attributes
        # need to be overwritten with values passed in here
        for name, value in settings.items():
            if hasattr(self, name):
                setattr(self, name, value)

        # For backward-compatibility of old differing parameter names
        if acl is not None:
            self.default_acl = acl
        if bucket is not None:
            self.bucket_name = bucket

        check_location(self)

        # Backward-compatibility: given the anteriority of the SECURE_URL setting
        # we fall back to https if specified in order to avoid the construction
        # of unsecure urls.
        if self.secure_urls:
            self.url_protocol = 'https:'

        self._entries = {}
        self._bucket = None
        self._connection = None
        self._loaded_meta = False

        self.access_key, self.secret_key = self._get_access_keys()
        self.security_token = self._get_security_token()

    @property
    def connection(self):
        if self._connection is None:
            kwargs = self._get_connection_kwargs()

            self._connection = self.connection_class(
                self.access_key,
                self.secret_key,
                **kwargs
            )
        return self._connection

    def _get_connection_kwargs(self):
        return dict(
            security_token=self.security_token,
            is_secure=self.use_ssl,
            calling_format=self.calling_format,
            host=self.host,
            port=self.port,
            proxy=self.proxy,
            proxy_port=self.proxy_port
        )

    @property
    def bucket(self):
        """
        Get the current bucket. If there is no current bucket object
        create it.
        """
        if self._bucket is None:
            self._bucket = self._get_or_create_bucket(self.bucket_name)
        return self._bucket

    @property
    def entries(self):
        """
        Get the locally cached files for the bucket.
        """
        if self.preload_metadata and not self._loaded_meta:
            self._entries.update({
                self._decode_name(entry.key): entry
                for entry in self.bucket.list(prefix=self.location)
            })
            self._loaded_meta = True
        return self._entries

    def _get_access_keys(self):
        """
        Gets the access keys to use when accessing S3. If none is
        provided in the settings then get them from the environment
        variables.
        """
        access_key = self.access_key or lookup_env(S3BotoStorage.access_key_names)
        secret_key = self.secret_key or lookup_env(S3BotoStorage.secret_key_names)
        return access_key, secret_key

    def _get_security_token(self):
        """
        Gets the security token to use when accessing S3. Get it from
        the environment variables.
        """
        security_token = self.security_token or lookup_env(S3BotoStorage.security_token_names)
        return security_token

    def _get_or_create_bucket(self, name):
        """
        Retrieves a bucket if it exists, otherwise creates it.
        """
        try:
            return self.connection.get_bucket(name, validate=self.auto_create_bucket)
        except self.connection_response_error:
            if self.auto_create_bucket:
                bucket = self.connection.create_bucket(name, location=self.origin)
                if not hasattr(django_settings, 'AWS_BUCKET_ACL'):
                    warnings.warn(
                        "The default behavior of S3BotoStorage is insecure. By default new buckets "
                        "are saved with an ACL of 'public-read' (globally publicly readable). To change "
                        "to using Amazon's default of the bucket owner set AWS_DEFAULT_ACL = None, "
                        "otherwise to silence this warning explicitly set AWS_DEFAULT_ACL."
                    )
                if self.bucket_acl:
                    bucket.set_acl(self.bucket_acl)
                return bucket
            raise ImproperlyConfigured('Bucket %s does not exist. Buckets '
                                       'can be automatically created by '
                                       'setting AWS_AUTO_CREATE_BUCKET to '
                                       '``True``.' % name)

    def _clean_name(self, name):
        """
        Cleans the name so that Windows style paths work
        """
        return clean_name(name)

    def _normalize_name(self, name):
        """
        Normalizes the name so that paths like /path/to/ignored/../something.txt
        work. We check to make sure that the path pointed to is not outside
        the directory specified by the LOCATION setting.
        """
        try:
            return safe_join(self.location, name)
        except ValueError:
            raise SuspiciousOperation("Attempted access to '%s' denied." %
                                      name)

    def _encode_name(self, name):
        return smart_str(name, encoding=self.file_name_charset)

    def _decode_name(self, name):
        return force_text(name, encoding=self.file_name_charset)

    def _compress_content(self, content):
        """Gzip a given string content."""
        zbuf = io.BytesIO()
        #  The GZIP header has a modification time attribute (see http://www.zlib.org/rfc-gzip.html)
        #  This means each time a file is compressed it changes even if the other contents don't change
        #  For S3 this defeats detection of changes using MD5 sums on gzipped files
        #  Fixing the mtime at 0.0 at compression time avoids this problem
        zfile = GzipFile(mode='wb', fileobj=zbuf, mtime=0.0)
        try:
            zfile.write(force_bytes(content.read()))
        finally:
            zfile.close()
        zbuf.seek(0)
        content.file = zbuf
        content.seek(0)
        return content

    def _open(self, name, mode='rb'):
        name = self._normalize_name(self._clean_name(name))
        f = self.file_class(name, mode, self)
        if not f.key:
            raise IOError('File does not exist: %s' % name)
        return f

    def _save(self, name, content):
        cleaned_name = self._clean_name(name)
        name = self._normalize_name(cleaned_name)
        headers = self.headers.copy()
        _type, encoding = mimetypes.guess_type(name)
        content_type = getattr(content, 'content_type', None)
        content_type = content_type or _type or self.key_class.DefaultContentType

        # setting the content_type in the key object is not enough.
        headers.update({'Content-Type': content_type})

        if self.gzip and content_type in self.gzip_content_types:
            content = self._compress_content(content)
            headers.update({'Content-Encoding': 'gzip'})
        elif encoding:
            # If the content already has a particular encoding, set it
            headers.update({'Content-Encoding': encoding})

        content.name = cleaned_name
        encoded_name = self._encode_name(name)
        key = self.bucket.get_key(encoded_name)
        if not key:
            key = self.bucket.new_key(encoded_name)
        if self.preload_metadata:
            self._entries[encoded_name] = key
            key.last_modified = datetime.utcnow().strftime(ISO8601)

        key.set_metadata('Content-Type', content_type)
        self._save_content(key, content, headers=headers)
        return cleaned_name

    def _save_content(self, key, content, headers):
        # only pass backwards incompatible arguments if they vary from the default
        kwargs = {}
        if self.encryption:
            kwargs['encrypt_key'] = self.encryption
        key.set_contents_from_file(content, headers=headers,
                                   policy=self.default_acl,
                                   reduced_redundancy=self.reduced_redundancy,
                                   rewind=True, **kwargs)

    def _get_key(self, name):
        name = self._normalize_name(self._clean_name(name))
        if self.entries:
            return self.entries.get(name)
        return self.bucket.get_key(self._encode_name(name))

    def delete(self, name):
        name = self._normalize_name(self._clean_name(name))
        self.bucket.delete_key(self._encode_name(name))

    def exists(self, name):
        if not name:  # root element aka the bucket
            try:
                self.bucket
                return True
            except ImproperlyConfigured:
                return False

        return self._get_key(name) is not None

    def listdir(self, name):
        name = self._normalize_name(self._clean_name(name))
        # for the bucket.list and logic below name needs to end in /
        # But for the root path "" we leave it as an empty string
        if name and not name.endswith('/'):
            name += '/'

        dirlist = self.bucket.list(self._encode_name(name))
        files = []
        dirs = set()
        base_parts = name.split('/')[:-1]
        for item in dirlist:
            parts = item.name.split('/')
            parts = parts[len(base_parts):]
            if len(parts) == 1:
                # File
                files.append(parts[0])
            elif len(parts) > 1:
                # Directory
                dirs.add(parts[0])
        return list(dirs), files

    def size(self, name):
        return self._get_key(name).size

    def get_modified_time(self, name):
        dt = tz.make_aware(parse_ts(self._get_key(name).last_modified), tz.utc)
        return dt if setting('USE_TZ') else tz.make_naive(dt)

    def modified_time(self, name):
        dt = tz.make_aware(parse_ts(self._get_key(name).last_modified), tz.utc)
        return tz.make_naive(dt)

    def url(self, name, headers=None, response_headers=None, expire=None):
        # Preserve the trailing slash after normalizing the path.
        name = self._normalize_name(self._clean_name(name))
        if self.custom_domain:
            return '{}//{}/{}'.format(self.url_protocol,
                                      self.custom_domain, filepath_to_uri(name))

        if expire is None:
            expire = self.querystring_expire

        return self.connection.generate_url(
            expire,
            method='GET',
            bucket=self.bucket.name,
            key=self._encode_name(name),
            headers=headers,
            query_auth=self.querystring_auth,
            force_http=not self.secure_urls,
            response_headers=response_headers,
        )

    def get_available_name(self, name, max_length=None):
        """ Overwrite existing file with the same name. """
        name = self._clean_name(name)
        if self.file_overwrite:
            return get_available_overwrite_name(name, max_length)
        return super(S3BotoStorage, self).get_available_name(name, max_length)
Example #48
0
class S3Boto3StorageFile(File):
    """
    The default file object used by the S3Boto3Storage backend.

    This file implements file streaming using boto's multipart
    uploading functionality. The file can be opened in read or
    write mode.

    This class extends Django's File class. However, the contained
    data is only the data contained in the current buffer. So you
    should not access the contained file object directly. You should
    access the data via this class.

    Warning: This file *must* be closed using the close() method in
    order to properly write the file to S3. Be sure to close the file
    in your application.
    """
    # TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.
    # TODO: When Django drops support for Python 2.5, rewrite to use the
    #       BufferedIO streams in the Python 2.6 io module.
    buffer_size = setting('AWS_S3_FILE_BUFFER_SIZE', 5242880)

    def __init__(self, name, mode, storage, buffer_size=None):
        self._storage = storage
        self.name = name[len(self._storage.location):].lstrip('/')
        self._mode = mode
        self.obj = storage.bucket.Object(storage._encode_name(name))
        if 'w' not in mode:
            # Force early RAII-style exception if object does not exist
            self.obj.load()
        self._is_dirty = False
        self._file = None
        self._multipart = None
        # 5 MB is the minimum part size (if there is more than one part).
        # Amazon allows up to 10,000 parts.  The default supports uploads
        # up to roughly 50 GB.  Increase the part size to accommodate
        # for files larger than this.
        if buffer_size is not None:
            self.buffer_size = buffer_size
        self._write_counter = 0

    @property
    def size(self):
        return self.obj.content_length

    def _get_file(self):
        if self._file is None:
            self._file = SpooledTemporaryFile(
                max_size=self._storage.max_memory_size,
                suffix=".S3Boto3StorageFile",
                dir=setting("FILE_UPLOAD_TEMP_DIR", None))
            if 'r' in self._mode:
                self._is_dirty = False
                self._file.write(self.obj.get()['Body'].read())
                self._file.seek(0)
            if self._storage.gzip and self.obj.content_encoding == 'gzip':
                self._file = GzipFile(mode=self._mode,
                                      fileobj=self._file,
                                      mtime=0.0)
        return self._file

    def _set_file(self, value):
        self._file = value

    file = property(_get_file, _set_file)

    def read(self, *args, **kwargs):
        if 'r' not in self._mode:
            raise AttributeError("File was not opened in read mode.")
        return super(S3Boto3StorageFile, self).read(*args, **kwargs)

    def write(self, content):
        if 'w' not in self._mode:
            raise AttributeError("File was not opened in write mode.")
        self._is_dirty = True
        if self._multipart is None:
            parameters = self._storage.object_parameters.copy()
            parameters['ACL'] = self._storage.default_acl
            parameters['ContentType'] = (mimetypes.guess_type(self.obj.key)[0]
                                         or self._storage.default_content_type)
            if self._storage.reduced_redundancy:
                parameters['StorageClass'] = 'REDUCED_REDUNDANCY'
            if self._storage.encryption:
                parameters['ServerSideEncryption'] = 'AES256'
            self._multipart = self.obj.initiate_multipart_upload(**parameters)
        if self.buffer_size <= self._buffer_file_size:
            self._flush_write_buffer()
        return super(S3Boto3StorageFile, self).write(force_bytes(content))

    @property
    def _buffer_file_size(self):
        pos = self.file.tell()
        self.file.seek(0, os.SEEK_END)
        length = self.file.tell()
        self.file.seek(pos)
        return length

    def _flush_write_buffer(self):
        """
        Flushes the write buffer.
        """
        if self._buffer_file_size:
            self._write_counter += 1
            self.file.seek(0)
            part = self._multipart.Part(self._write_counter)
            part.upload(Body=self.file.read())

    def close(self):
        if self._is_dirty:
            self._flush_write_buffer()
            # TODO: Possibly cache the part ids as they're being uploaded
            # instead of requesting parts from server. For now, emulating
            # s3boto's behavior.
            parts = [{
                'ETag': part.e_tag,
                'PartNumber': part.part_number
            } for part in self._multipart.parts.all()]
            self._multipart.complete(MultipartUpload={'Parts': parts})
        else:
            if self._multipart is not None:
                self._multipart.abort()
        if self._file is not None:
            self._file.close()
            self._file = None
Example #49
0
 def get_modified_time(self, name):
     dt = tz.make_aware(parse_ts(self._get_key(name).last_modified), tz.utc)
     return dt if setting('USE_TZ') else tz.make_naive(dt)
Example #50
0
 def url(self, name):
     return "{}{}/{}".format(setting('MEDIA_URL'), self.azure_container, name)
    def get_default_settings(self):
        cloudfront_key_id = setting('AWS_CLOUDFRONT_KEY_ID')
        cloudfront_key = setting('AWS_CLOUDFRONT_KEY')
        if bool(cloudfront_key_id) ^ bool(cloudfront_key):
            raise ImproperlyConfigured(
                'Both AWS_CLOUDFRONT_KEY_ID and AWS_CLOUDFRONT_KEY must be '
                'provided together.')

        if cloudfront_key_id:
            cloudfront_signer = self.get_cloudfront_signer(
                cloudfront_key_id, cloudfront_key)
        else:
            cloudfront_signer = None

        return {
            "access_key":
            setting('AWS_S3_ACCESS_KEY_ID', setting('AWS_ACCESS_KEY_ID')),
            "secret_key":
            setting('AWS_S3_SECRET_ACCESS_KEY',
                    setting('AWS_SECRET_ACCESS_KEY')),
            "file_overwrite":
            setting('AWS_S3_FILE_OVERWRITE', True),
            "object_parameters":
            setting('AWS_S3_OBJECT_PARAMETERS', {}),
            "bucket_name":
            setting('AWS_STORAGE_BUCKET_NAME'),
            "querystring_auth":
            setting('AWS_QUERYSTRING_AUTH', True),
            "querystring_expire":
            setting('AWS_QUERYSTRING_EXPIRE', 3600),
            "signature_version":
            setting('AWS_S3_SIGNATURE_VERSION'),
            "location":
            setting('AWS_LOCATION', ''),
            "custom_domain":
            setting('AWS_S3_CUSTOM_DOMAIN'),
            "cloudfront_signer":
            cloudfront_signer,
            "addressing_style":
            setting('AWS_S3_ADDRESSING_STYLE'),
            "secure_urls":
            setting('AWS_S3_SECURE_URLS', True),
            "file_name_charset":
            setting('AWS_S3_FILE_NAME_CHARSET', 'utf-8'),
            "gzip":
            setting('AWS_IS_GZIPPED', False),
            "gzip_content_types":
            setting('GZIP_CONTENT_TYPES', (
                'text/css',
                'text/javascript',
                'application/javascript',
                'application/x-javascript',
                'image/svg+xml',
            )),
            "url_protocol":
            setting('AWS_S3_URL_PROTOCOL', 'http:'),
            "endpoint_url":
            setting('AWS_S3_ENDPOINT_URL'),
            "proxies":
            setting('AWS_S3_PROXIES'),
            "region_name":
            setting('AWS_S3_REGION_NAME'),
            "use_ssl":
            setting('AWS_S3_USE_SSL', True),
            "verify":
            setting('AWS_S3_VERIFY', None),
            "max_memory_size":
            setting('AWS_S3_MAX_MEMORY_SIZE', 0),
        }
 def get_modified_time(self, name):
     name = self._normalize_name(clean_name(name))
     blob = self._get_blob(self._encode_name(name))
     updated = blob.updated
     return updated if setting('USE_TZ') else timezone.make_naive(updated)
Example #53
0
 def __init__(self, oauth2_access_token=setting('DROPBOX_OAUTH2_TOKEN')):
     if oauth2_access_token is None:
         raise ImproperlyConfigured("You must configure a token auth at"
                                    "'settings.DROPBOX_OAUTH2_TOKEN'.")
     self.client = DropboxClient(oauth2_access_token)
Example #54
0
 def __init__(self, *args, **kwargs):
     self.account_name = kwargs.pop('account_name', setting("AZURE_ACCOUNT_NAME"))
     self.account_key = kwargs.pop('account_key', setting("AZURE_ACCOUNT_KEY"))
     self.azure_container = kwargs.pop('azure_container', setting("AZURE_CONTAINER"))
     super(AzureStorage, self).__init__(*args, **kwargs)
     self._connection = None
 def __init__(self, *args, **kwargs):
     if not settings.MEDIA_URL:
         raise Exception('MEDIA_URL has not been configured')
     kwargs['bucket_name'] = setting('GS_MEDIA_BUCKET_NAME', strict=True)
     super(GoogleCloudMediaStorage, self).__init__(*args, **kwargs)
Example #56
0
 def __init__(self, base_url=setting("AWS_CLOUDFRONT_URL"), *args, **kwargs):
     super(S3CloudFrontStorage, self).__init__(*args, **kwargs)
     self.base_url = base_url