예제 #1
0
 def remove_private_acl(self, permission, username,
                        bucket, objectname=None):
     # Remove a private (CanonicalUser) ACL to the bucket or file
     # Returns the ACL of the bucket or file
     grant_type = 'CanonicalUser'
     username = username
     b = self.get_bucket(bucket)
     if objectname:
         k = Key(b)
         k.key = objectname
         policy = k.get_acl()
         new_grants = []
         for grant in policy.acl.grants:
             if grant.permission == permission and \
                grant.type == grant_type and grant.id == username:
                 continue
             else:
                 new_grants.append(grant)
         policy.acl.grants = new_grants
         k.set_acl(policy)
         return k.get_acl()
     policy = b.get_acl()
     new_grants = []
     for grant in policy.acl.grants:
         if grant.permission == permission and \
            grant.type == grant_type and grant.id == username:
             continue
         else:
             new_grants.append(grant)
     policy.acl.grants = new_grants
     b.set_acl(policy)
     return b.get_acl()
예제 #2
0
 def remove_public_acl(self, permission, bucket, objectname=None):
     # Removes a public (Group) ACL to the bucket or file, then
     # returns the ACL of the bucket or file
     grant_type = 'Group'
     uri = 'http://acs.amazonaws.com/groups/global/AllUsers'
     b = self.get_bucket(bucket)
     if objectname:
         k = Key(b)
         k.key = objectname
         policy = k.get_acl()
         new_grants = []
         for grant in policy.acl.grants:
             if grant.permission == permission and grant.uri == uri \
                and grant.type == grant_type:
                 continue
             else:
                 new_grants.append(grant)
         policy.acl.grants = new_grants
         k.set_acl(policy)
         return k.get_acl()
     policy = b.get_acl()
     new_grants = []
     for grant in policy.acl.grants:
         if grant.permission == permission and grant.uri == uri \
            and grant.type == grant_type:
             continue
         else:
             new_grants.append(grant)
     policy.acl.grants = new_grants
     b.set_acl(policy)
     return b.get_acl()
예제 #3
0
 def test_get_object_acl(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.set_contents_from_string(self.data)
     self.assertEqual(k.get_contents_as_string(), self.data)
     self.assertEqual(k.get_acl().to_xml(), self.defaultAcl(self.user1))
예제 #4
0
 def test_get_object_acl(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.set_contents_from_string(self.data)
     self.assertEqual(k.get_contents_as_string(), self.data)
     self.assertEqual(k.get_acl().to_xml(), self.defaultAcl(self.user1))
예제 #5
0
 def add_public_acl(self, permission, bucket, objectname=None):
     # Adds a public (Group) ACL to the bucket or file, then
     # returns the ACL of the bucket or file
     grant = Grant(permission=permission, type='Group',
                   uri='http://acs.amazonaws.com/groups/global/AllUsers')
     b = self.get_bucket(bucket)
     if objectname:
         k = Key(b)
         k.key = objectname
         policy = k.get_acl()
         policy.acl.add_grant(grant)
         k.set_acl(policy)
         return k.get_acl()
     policy = b.get_acl()
     policy.acl.add_grant(grant)
     b.set_acl(policy)
     return b.get_acl()
예제 #6
0
 def add_private_acl(self, permission, username, bucket, objectname=None):
     # Adds a private (CanonicalUser) ACL to the bucket or file, then
     # returns the ACL of the bucket or file
     grant = Grant(permission=permission, type='CanonicalUser',
                   id=username, display_name=username)
     b = self.get_bucket(bucket)
     if objectname:
         k = Key(b)
         k.key = objectname
         policy = k.get_acl()
         policy.acl.add_grant(grant)
         k.set_acl(policy)
         return k.get_acl()
     policy = b.get_acl()
     policy.acl.add_grant(grant)
     b.set_acl(policy)
     return b.get_acl()
예제 #7
0
 def get_acl(self, bucket, objectname=None):
     # Returns the ACL of the bucket or file
     b = self.get_bucket(bucket)
     if objectname:
         k = Key(b)
         k.key = objectname
         return k.get_acl()
     return b.get_acl()
예제 #8
0
파일: test_s3.py 프로젝트: netors/moto
def test_acl_switching():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content, policy='public-read')
    key.set_acl('private')

    grants = key.get_acl().acl.grants
    assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                   g.permission == 'READ' for g in grants), grants
예제 #9
0
def test_acl_switching():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content, policy='public-read')
    key.set_acl('private')

    grants = key.get_acl().acl.grants
    assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                   g.permission == 'READ' for g in grants), grants
예제 #10
0
파일: test_s3.py 프로젝트: netors/moto
def test_acl_setting():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content)
    key.make_public()

    key = bucket.get_key(keyname)

    assert key.get_contents_as_string() == content

    grants = key.get_acl().acl.grants
    assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
               g.permission == 'READ' for g in grants), grants
예제 #11
0
def test_acl_setting():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content)
    key.make_public()

    key = bucket.get_key(keyname)

    assert key.get_contents_as_string() == content

    grants = key.get_acl().acl.grants
    assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
               g.permission == 'READ' for g in grants), grants
예제 #12
0
파일: test_s3.py 프로젝트: netors/moto
def test_acl_setting_via_headers():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content, headers={
        'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
    })

    key = bucket.get_key(keyname)

    assert key.get_contents_as_string() == content

    grants = key.get_acl().acl.grants
    assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
               g.permission == 'FULL_CONTROL' for g in grants), grants
예제 #13
0
def test_acl_setting_via_headers():
    conn = boto.connect_s3()
    bucket = conn.create_bucket('foobar')
    content = b'imafile'
    keyname = 'test.txt'

    key = Key(bucket, name=keyname)
    key.content_type = 'text/plain'
    key.set_contents_from_string(content, headers={
        'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
    })

    key = bucket.get_key(keyname)

    assert key.get_contents_as_string() == content

    grants = key.get_acl().acl.grants
    assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
               g.permission == 'FULL_CONTROL' for g in grants), grants
예제 #14
0
class S3utils(object):

    """
    S3 Utils

    A simple user friendly interface to Amazon S3.
    S3 utils methods are made similar to Linux commands
    so it is easier to use/remember for simple file operations
    on S3 buckets.
    """

    def __init__(
        self, AWS_ACCESS_KEY_ID=getattr(settings, "AWS_ACCESS_KEY_ID", ""),
        AWS_SECRET_ACCESS_KEY=getattr(settings, "AWS_SECRET_ACCESS_KEY", ""),
        AWS_STORAGE_BUCKET_NAME=getattr(settings, "AWS_STORAGE_BUCKET_NAME", ""),
        S3UTILS_DEBUG_LEVEL=getattr(settings, "S3UTILS_DEBUG_LEVEL", 0),
    ):
        """
        Parameters
        ----------

        AWS_ACCESS_KEY_ID : string
            AWS Access key. If it is defined in your Django settings, it will grab it from there.
            Otherwise you need to specify it here.

        AWS_SECRET_ACCESS_KEY : string
            AWS secret. If it is defined in your Django settings, it will grab it from there.
            Otherwise you need to specify it here.

        AWS_STORAGE_BUCKET_NAME : string
            AWS Bucket name. If it is defined in your Django settings, it will grab it from there.
            Otherwise you need to specify it here.

        """

        self.AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID
        self.AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY
        self.AWS_STORAGE_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME
        self.S3UTILS_DEBUG_LEVEL = S3UTILS_DEBUG_LEVEL
        self.conn = None
        self.conn_cloudfront = None

        # setting the logging level based on S3UTILS_DEBUG_LEVEL
        try:
            if (S3UTILS_DEBUG_LEVEL == 0):
                logger.setLevel(logging.ERROR)
            else:
                logger.setLevel(logging.INFO)
        except AttributeError:
            pass

    def __del__(self):
        if self.conn:
            self.disconnect()

    def printv(self, msg):
        if self.S3UTILS_DEBUG_LEVEL:
            print(msg)
            logger.info(msg)

    def connect(self):
        """
        Establish the connection. This is done automatically for you.

        If you lose the connection, you can manually run this to be re-connected.
        """
        self.conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)

        self.bucket = self.conn.get_bucket(self.AWS_STORAGE_BUCKET_NAME)

        self.k = Key(self.bucket)

    def disconnect(self):
        """
        Close the connection.

        This is normally done automatically when the garbage collector is deleting s3utils object.
        """
        self.bucket.connection.connection.close()
        self.conn = None

    def connect_cloudfront(self):
        "Connect to Cloud Front. This is done automatically for you when needed."
        self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)

    @connectit
    def mkdir(self, target_folder):
        """
        Create a folder on S3.

        Examples
        --------
            >>> s3utils.mkdir("path/to/my_folder")
            Making directory: path/to/my_folder
        """
        self.printv("Making directory: %s" % target_folder)
        self.k.key = re.sub(r"^/|/$", "", target_folder) + "/"
        self.k.set_contents_from_string('')
        self.k.close()

    @connectit
    def rm(self, path):
        """
        Delete the path and anything under the path.

        Example
        -------
            >>> s3utils.rm("path/to/file_or_folder")
        """

        list_of_files = list(self.ls(path))

        if list_of_files:
            if len(list_of_files) == 1:
                self.bucket.delete_key(list_of_files[0])
            else:
                self.bucket.delete_keys(list_of_files)
            self.printv("Deleted: %s" % list_of_files)
        else:
            logger.error("There was nothing to remove under %s", path)

    @connectit
    def __put_key(self, local_file, target_file, acl='public-read', del_after_upload=False, overwrite=True, source="filename"):
        """Copy a file to s3."""
        action_word = "moving" if del_after_upload else "copying"

        try:
            self.k.key = target_file  # setting the path (key) of file in the container

            if source == "filename":
                # grabs the contents from local_file address. Note that it loads the whole file into memory
                self.k.set_contents_from_filename(local_file)
            elif source == "fileobj":
                self.k.set_contents_from_file(local_file)
            elif source == "string":
                self.k.set_contents_from_string(local_file)
            else:
                raise Exception("%s is not implemented as a source." % source)
            self.k.set_acl(acl)  # setting the file permissions
            self.k.close()  # not sure if it is needed. Somewhere I read it is recommended.

            self.printv("%s %s to %s" % (action_word, local_file, target_file))
            # if it is supposed to delete the local file after uploading
            if del_after_upload and source == "filename":
                try:
                    os.remove(local_file)
                except:
                    logger.error("Unable to delete the file: ", local_file, exc_info=True)

            return True

        except:
            logger.error("Error in writing to %s", target_file, exc_info=True)
            return False

    def cp(self, local_path, target_path, acl='public-read',
           del_after_upload=False, overwrite=True, invalidate=False):
        """
        Copy a file or folder from local to s3.

        Parameters
        ----------

        local_path : string
            Path to file or folder. Or if you want to copy only the contents of folder, add /* at the end of folder name

        target_path : string
            Target path on S3 bucket.

        acl : string, optional
            File permissions on S3. Default is public-read

            options:
                - private: Owner gets FULL_CONTROL. No one else has any access rights.
                - public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
                - public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
                - authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access


        del_after_upload : boolean, optional
            delete the local file after uploading. This is effectively like moving the file.
            You can use s3utils.mv instead of s3utils.cp to move files from local to S3.
            It basically sets this flag to True.
            default = False

        overwrite : boolean, optional
            overwrites files on S3 if set to True. Default is True

        invalidate : boolean, optional
            invalidates the CDN (a.k.a Distribution) cache if the file already exists on S3
            default = False
            Note that invalidation might take up to 15 minutes to take place. It is easier and faster to use cache buster
            to grab lastest version of your file on CDN than invalidation.

        **Returns**

        Nothing on success but it will return what went wrong if something fails.

        Examples
        --------
            >>> s3utils.cp("path/to/folder","/test/")
            copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
            copying /path/to/myfolder/test.txt to test/myfolder/test.txt
            copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
            copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff

            >>> # When overwrite is set to False, it returns the file(s) that were already existing on s3 and were not overwritten.
            >>> s3utils.cp("/tmp/test3.txt", "test3.txt", overwrite=False)
            ERROR:root:test3.txt already exist. Not overwriting.
            >>> {'existing_files': {'test3.txt'}}

            >>> # To overwrite the files on S3 and invalidate the CDN (cloudfront) cache so the new file goes on CDN:
            >>> s3utils.cp("path/to/folder","/test/", invalidate=True)
            copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
            copying /path/to/myfolder/test.txt to test/myfolder/test.txt
            copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
            copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff

            >>> # When file does not exist, it returns a dictionary of what went wrong.
            >>> s3utils.cp("/tmp/does_not_exist", "somewhere")
            ERROR:root:trying to upload to s3 but file doesn't exist: /tmp/does_not_exist
            >>> {'file_does_not_exist': '/tmp/does_not_exist'}
        """
        result = None
        if overwrite:
            list_of_files = []
        else:
            list_of_files = self.ls(folder=target_path, begin_from_file="", num=-1, get_grants=False, all_grant_data=False)

        # copying the contents of the folder and not folder itself
        if local_path.endswith("/*"):
            local_path = local_path[:-2]
            target_path = re.sub(r"^/|/$", "", target_path)  # Amazon S3 doesn't let the name to begin with /
        # copying folder too
        else:
            local_base_name = os.path.basename(local_path)

            local_path = re.sub(r"/$", "", local_path)
            target_path = re.sub(r"^/", "", target_path)

            if not target_path.endswith(local_base_name):
                target_path = os.path.join(target_path, local_base_name)

        if os.path.exists(local_path):

            result = self.__find_files_and_copy(local_path, target_path, acl, del_after_upload, overwrite, invalidate, list_of_files)

        else:
            result = {'file_does_not_exist': local_path}
            logger.error("trying to upload to s3 but file doesn't exist: %s" % local_path)

        return result

    def __find_files_and_copy(self, local_path, target_path, acl='public-read', del_after_upload=False, overwrite=True, invalidate=False, list_of_files=[]):
        files_to_be_invalidated = []
        failed_to_copy_files = set([])
        existing_files = set([])

        def check_for_overwrite_then_write():

            if overwrite or (not overwrite and target_file not in list_of_files):
                success = self.__put_key(
                    local_file,
                    target_file=target_file,
                    acl=acl,
                    del_after_upload=del_after_upload,
                    overwrite=overwrite,
                )
                if not success:
                    failed_to_copy_files.add(target_file)
            else:
                existing_files.add(target_file)
                logger.error("%s already exist. Not overwriting.", target_file)

            if overwrite and target_file in list_of_files and invalidate:
                files_to_be_invalidated.append(target_file)

        first_local_root = None

        # if it is a folder
        if os.path.isdir(local_path):

            for local_root, directories, files in os.walk(local_path):

                if not first_local_root:
                    first_local_root = local_root

                # if folder is not empty
                if files:
                    # iterating over the files in the folder
                    for a_file in files:
                        local_file = os.path.join(local_root, a_file)
                        target_file = os.path.join(
                            target_path + local_root.replace(first_local_root, ""),
                            a_file
                        )
                        check_for_overwrite_then_write()

                # if folder is empty
                else:
                    target_file = target_path + local_root.replace(first_local_root, "") + "/"

                    if target_file not in list_of_files:
                        self.mkdir(target_file)

            if del_after_upload:
                rmtree(local_path)

        # if it is a file
        else:
            local_file = local_path
            target_file = target_path
            check_for_overwrite_then_write()

        if invalidate and files_to_be_invalidated:
            self.invalidate(files_to_be_invalidated)

        items = ('failed_to_copy_files', 'existing_files')
        local_vars = locals()
        result = {}
        for i in items:
            val = local_vars.get(i)
            if val:
                result[i] = val

        result = None if result == {} else result
        return result

    def echo(self, content, target_path, acl='public-read',
             overwrite=True, invalidate=False):
        """

        Similar to Linux Echo command.

        Puts the string into the target path on s3

        Parameters
        ----------

        content : string
            The content to be put on the s3 bucket.

        target_path : string
            Target path on S3 bucket.

        acl : string, optional
            File permissions on S3. Default is public-read

            options:
                - private: Owner gets FULL_CONTROL. No one else has any access rights.
                - public-read: (Default) Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
                - public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
                - authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access

        overwrite : boolean, optional
            overwrites files on S3 if set to True. Default is True

        invalidate : boolean, optional
            invalidates the CDN (a.k.a Distribution) cache if the file already exists on S3
            default = False
            Note that invalidation might take up to 15 minutes to take place. It is easier and faster to use cache buster
            to serve the lastest version of your file on CDN than invalidation.


        **Returns:**

        Nothing on success, otherwise it returns what went wrong.

        Return type:
        dict

        Examples
        --------
            >>> # On success returns nothing:
            >>> s3utils.echo("Hello World!","/test.txt")
            >>> # On failure returns what went wrong
            >>> s3utils.echo("Hello World!","/test/")
            {'InvalidS3Path': "path on S3 can not end in /"}
        """

        result = None
        if target_path.endswith('/') or target_path.endswith('*'):
            result = {'InvalidS3Path': "Path on S3 can not end in /"}
        if not overwrite and not result:
            file_exists = self.ls(target_path)
            if file_exists:
                logger.error("%s already exist. Not overwriting.", target_path)
                result = {'existing_files': target_path}

        if content and not result:
            if isinstance(content, strings):
                result = self.__put_key(content, target_path, acl=acl,
                                        del_after_upload=False, overwrite=overwrite,
                                        source="string")
            else:
                result = {"TypeError": "Content is not string"}
        return result

    def mv(self, local_file, target_file, acl='public-read', overwrite=True, invalidate=False):
        """
        Similar to Linux mv command.

        Move the file to the S3 and deletes the local copy

        It is basically s3utils.cp that has del_after_upload=True

        Examples
        --------
            >>> s3utils.mv("path/to/folder","/test/")
            moving /path/to/myfolder/test2.txt to test/myfolder/test2.txt
            moving /path/to/myfolder/test.txt to test/myfolder/test.txt
            moving /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
            moving /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff

        **Returns:**

        Nothing on success, otherwise what went wrong.

        Return type:
        dict

        """
        self.cp(local_file, target_file, acl=acl, del_after_upload=True, overwrite=overwrite, invalidate=invalidate)

    @connectit
    def cp_cropduster_image(self, the_image_path, del_after_upload=False, overwrite=False, invalidate=False):
        """
        Deal with saving cropduster images to S3. Cropduster is a Django library for resizing editorial images.
        S3utils was originally written to put cropduster images on S3 bucket.

        Extra Items in your Django Settings
        -----------------------------------

        MEDIA_ROOT : string
            Django media root.
            Currently it is ONLY used in cp_cropduster_image method.
            NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.

        S3_ROOT_BASE : string
            S3 media root base. This will be the root folder in S3.
            Currently it is ONLY used in cp_cropduster_image method.
            NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.


        """

        local_file = os.path.join(settings.MEDIA_ROOT, the_image_path)

        # only try to upload things if the origin cropduster file exists (so it is not already uploaded to the CDN)
        if os.path.exists(local_file):

            the_image_crops_path = os.path.splitext(the_image_path)[0]
            the_image_crops_path_full_path = os.path.join(settings.MEDIA_ROOT, the_image_crops_path)

            self.cp(local_path=local_file,
                    target_path=os.path.join(settings.S3_ROOT_BASE, the_image_path),
                    del_after_upload=del_after_upload,
                    overwrite=overwrite,
                    invalidate=invalidate,
                    )

            self.cp(local_path=the_image_crops_path_full_path + "/*",
                    target_path=os.path.join(settings.S3_ROOT_BASE, the_image_crops_path),
                    del_after_upload=del_after_upload,
                    overwrite=overwrite,
                    invalidate=invalidate,
                    )

    def __get_grants(self, target_file, all_grant_data):
        """
        Return grant permission, grant owner, grant owner email and grant id  as a list.
        It needs you to set k.key to a key on amazon (file path) before running this.
        note that Amazon returns a list of grants for each file.

        options:
            - private: Owner gets FULL_CONTROL. No one else has any access rights.
            - public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
            - public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
            - authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access

        """
        self.k.key = target_file

        the_grants = self.k.get_acl().acl.grants

        grant_list = []

        for grant in the_grants:
            if all_grant_data:
                grant_list.append(
                    {"permission": grant.permission, "name": grant.display_name, "email": grant.email_address, "id": grant.id})
            else:
                grant_list.append({"permission": grant.permission, "name": grant.display_name})

        return grant_list

    @connectit
    def chmod(self, target_file, acl='public-read'):
        """
        sets permissions for a file on S3

        Parameters
        ----------

        target_file : string
            Path to file on S3

        acl : string, optional
            File permissions on S3. Default is public-read

            options:
                - private: Owner gets FULL_CONTROL. No one else has any access rights.
                - public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
                - public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
                - authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access


        Examples
        --------
            >>> s3utils.chmod("path/to/file","private")


        """
        self.k.key = target_file  # setting the path (key) of file in the container
        self.k.set_acl(acl)  # setting the file permissions
        self.k.close()

    @connectit
    def ls(self, folder="", begin_from_file="", num=-1, get_grants=False, all_grant_data=False):
        """
        gets the list of file names (keys) in a s3 folder

        Parameters
        ----------

        folder : string
            Path to file on S3

        num: integer, optional
            number of results to return, by default it returns all results.

        begin_from_file: string, optional
            which file to start from on S3.
            This is usedful in case you are iterating over lists of files and you need to page the result by
            starting listing from a certain file and fetching certain num (number) of files.


        Examples
        --------

            >>> from s3utils import S3utils
            >>> s3utils = S3utils(
            ... AWS_ACCESS_KEY_ID = 'your access key',
            ... AWS_SECRET_ACCESS_KEY = 'your secret key',
            ... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
            ... S3UTILS_DEBUG_LEVEL = 1,  #change it to 0 for less verbose
            ... )
            >>> print(s3utils.ls("test/"))
            {u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'}

        """
        # S3 object key can't start with /
        folder = re.sub(r"^/", "", folder)

        bucket_files = self.bucket.list(prefix=folder, marker=begin_from_file)

        # in case listing grants
        if get_grants:
            list_of_files = OrderedDict()
            for (i, v) in enumerate(bucket_files):
                file_info = {v.name: self.__get_grants(v.name, all_grant_data)}
                list_of_files.update(file_info)
                if i == num:
                    break

        else:
            list_of_files = set([])
            for (i, v) in enumerate(bucket_files):
                list_of_files.add(v.name)
                if i == num:
                    break

        return list_of_files

    def ll(self, folder="", begin_from_file="", num=-1, all_grant_data=False):
        """
        Get the list of files and permissions from S3.

        This is similar to LL (ls -lah) in Linux: List of files with permissions.

        Parameters
        ----------

        folder : string
            Path to file on S3

        num: integer, optional
            number of results to return, by default it returns all results.

        begin_from_file : string, optional
            which file to start from on S3.
            This is usedful in case you are iterating over lists of files and you need to page the result by
            starting listing from a certain file and fetching certain num (number) of files.

        all_grant_data : Boolean, optional
            More detailed file permission data will be returned.

        Examples
        --------

            >>> from s3utils import S3utils
            >>> s3utils = S3utils(
            ... AWS_ACCESS_KEY_ID = 'your access key',
            ... AWS_SECRET_ACCESS_KEY = 'your secret key',
            ... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
            ... S3UTILS_DEBUG_LEVEL = 1,  #change it to 0 for less verbose
            ... )
            >>> import json
            >>> # We use json.dumps to print the results more readable:
            >>> my_folder_stuff = s3utils.ll("/test/")
            >>> print(json.dumps(my_folder_stuff, indent=2))
            {
              "test/myfolder/": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                }
              ],
              "test/myfolder/em/": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                }
              ],
              "test/myfolder/hoho/": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                }
              ],
              "test/myfolder/hoho/.DS_Store": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                },
                {
                  "name": null,
                  "permission": "READ"
                }
              ],
              "test/myfolder/hoho/haha/": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                }
              ],
              "test/myfolder/hoho/haha/ff": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                },
                {
                  "name": null,
                  "permission": "READ"
                }
              ],
              "test/myfolder/hoho/photo.JPG": [
                {
                  "name": "owner's name",
                  "permission": "FULL_CONTROL"
                },
                {
                  "name": null,
                  "permission": "READ"
                }
              ],
            }

        """
        return self.ls(folder=folder, begin_from_file=begin_from_file, num=num, get_grants=True, all_grant_data=all_grant_data)

    @connectit_cloudfront
    def invalidate(self, files_to_be_invalidated):
        """
        Invalidate the CDN (distribution) cache for a certain file of files. This might take up to 15 minutes to be effective.

        You can check for the invalidation status using check_invalidation_request.

        Examples
        --------

            >>> from s3utils import S3utils
            >>> s3utils = S3utils(
            ... AWS_ACCESS_KEY_ID = 'your access key',
            ... AWS_SECRET_ACCESS_KEY = 'your secret key',
            ... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
            ... S3UTILS_DEBUG_LEVEL = 1,  #change it to 0 for less verbose
            ... )
            >>> aa = s3utils.invalidate("test/myfolder/hoho/photo.JPG")
            >>> print(aa)
            ('your distro id', u'your request id')
            >>> invalidation_request_id = aa[1]
            >>> bb = s3utils.check_invalidation_request(*aa)
            >>> for inval in bb:
            ...     print('Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status))


        """
        if not isinstance(files_to_be_invalidated, Iterable):
            files_to_be_invalidated = (files_to_be_invalidated,)

        # Your CDN is called distribution on Amazaon. And you can have more than one distro
        all_distros = self.conn_cloudfront.get_all_distributions()

        for distro in all_distros:
            invalidation_request = self.conn_cloudfront.create_invalidation_request(distro.id, files_to_be_invalidated)

        return (distro.id, invalidation_request.id)

    @connectit_cloudfront
    def check_invalidation_request(self, distro, request_id):

        return self.conn_cloudfront.get_invalidation_requests(distro, request_id)
예제 #15
0
class FileDB(object):
    """
    This class represents a poor man database that stores data in the property-like file.
    It should be used as the last resort.
    """
    TABLE_ASSIGNEMENTS='assignements'
    TABLE_MAPPING='mappings'

    def __init__(self, CFG):
        logging.basicConfig(
        format='%(asctime)s [%(levelname)s] %(message)s',
        datefmt='%Y%m%d %T', level=logging.DEBUG)
        self.CFG = CFG

        if CFG.S3_HA:
            conn = S3Connection(profile_name=CFG.S3_CREDENTIALS_PROFILE)
            bucket = conn.get_bucket(CFG.S3_BUCKET)
            self.k = Key(bucket)
            self.k.key = CFG.DB_FILE

        self.__initialize_db_file()

        self.__configDB = ConfigParser.ConfigParser()
        self.load_db()

    def add_address(self, address, fw_addr):
        self.db_file.set(self.TABLE_MAPPING, address, fw_addr)
        self.save_db()

    def del_address(self, address):
        self.db_file.remove_option(self.TABLE_MAPPING, address)
        self.save_db()

    def add_assignement(self, fw_adr, elb_adr):
        self.db_file.set(self.TABLE_ASSIGNEMENTS, fw_adr, elb_adr)
        self.save_db()

    def del_assignement(self, fw_adr):
        self.db_file.remove_option(self.TABLE_ASSIGNEMENTS, fw_adr)
        self.save_db()

    def clear_assignements(self):
        self.db_file.remove_section(self.TABLE_ASSIGNEMENTS)
        self.db_file.add_section(self.TABLE_ASSIGNEMENTS)
        self.save_db()

    def get_elb_addrs(self):
        elb_addrs_tuples = self.db_file.items(self.TABLE_MAPPING)

        return dict(elb_addrs_tuples).keys()

    def get_assigned_fw(self, elb_addr):
        firewalls = dict(self.db_file.items(self.TABLE_ASSIGNEMENTS))

        for fw in firewalls:
            if elb_addr in firewalls[fw]:
                return fw

        return None

    def get_assigned_addresses(self, fw_addr=None):
        assigned_addr_tuples = dict(self.db_file.items(self.TABLE_ASSIGNEMENTS))
        assigned_addr = []
        if fw_addr is None:
            assigned_addr = assigned_addr_tuples.values()
        else:
            try:
                assigned_addr = assigned_addr_tuples[fw_addr]
            except KeyError:
                # swallow
                assigned_addr = []

        return assigned_addr

    def is_fw_occupied(self, fw_addr):
        assigned_addr = self.get_assigned_addresses(fw_addr)

        return len(assigned_addr) > 0

    def save_db(self):
        with open(self.CFG.DB_FILE, 'wb') as configfile:
            self.db_file.write(configfile)
        configfile.close()

        if (self.CFG.S3_HA):
            with open(self.CFG.DB_FILE, 'r') as myfile:
                data=myfile.read()
            myfile.close()
            self.k.set_contents_from_string(data)

    def load_db(self):
        if (self.CFG.S3_HA):
            self.k.get_contents_to_filename(self.CFG.DB_FILE)
        self.db_file.read(self.CFG.DB_FILE)

    def __initialize_db_file(self):
        """This is called only once at the beginning. If HA is enabled it tries to
        download a file from the S3 bucket. If it cannot find one it will attempt
        to initialize an empty db file and upload it to the S3"""

        # if we do not need HA bail out
        if not self.CFG.S3_HA:
            return

        try:
            self.k.get_acl()
        except S3ResponseError as ex:
            if ex.status == 404:
                LOG.warn('Database file %s not found in S3 bucket [%s]. Initializing the new one',
                         self.CFG.DB_FILE, self.CFG.S3_BUCKET)
                self.k.set_contents_from_string('[mappings]\n[assignements]\n')
            else:
                LOG.fatal('There was a communication issue with S3 bucket [%s] accessing the file %s. Exception: %s',
                          ex, self.CFG.S3_BUCKET, self.CFG.DB_FILE, ex)
                import sys
                sys.exit(0)


    @property
    def db_file(self):
        """
        Return the property once we are assured that we have the latest version.
        Given that the HA is done as add-hoc and that we do not have the real database
        the only way to achieve this (and it is not bullet proof) is to download the file
        every time someone is accessing it. This is ugly and bad.
        """
        return self.__configDB

    def get_inverse_idx(self):
        """given that we dont use real database this will build us our inverse index"""
        addresses = self.db_file.items(self.TABLE_MAPPING)

        fw_reverse_idx = dict()

        for adr in addresses:
            if adr[1] in fw_reverse_idx:
                fw_reverse_idx[adr[1]].append(adr[0])
            else:
                fw_reverse_idx[adr[1]] = [adr[0]]

        return fw_reverse_idx
예제 #16
0
    print "\tCreating obj %d" % (i)
    keyv = 'keynum' + str(i)
    valv = 'Contents of object' + str(i)
    m.key = keyv
    m.set_contents_from_string(valv)

print "\nU2: Listing keys from U1 bucket to see its own objects"
for i in b2.list():
    print "bucket key: " + str(i)

print "\nU2 tries to fetch its own object:"
for i in range(21, 31):
    keyv = 'keynum' + str(i)
    m.key = keyv
    print "My Object " + str(
        i) + ": " + m.get_contents_as_string() + " . ACL: " + str(m.get_acl())

print "\nU2: Attemping to delete own object"
m.key = 'keynum29'
m.delete()
print "\nU2: Listing keys from U1 bucket after delete"
for i in b2.list():
    print "U1 bucket key: " + str(i)

print "\nU2: Attemping to get own object"
m.key = 'keynum28'
print "My Object: " + m.get_contents_as_string()

print "U2: Attempting to fetch objects of other user:"
for i in range(1, 11):
    keyv = 'keynum' + str(i)
예제 #17
0
 def test_set_object_acl(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.set_canned_acl('public-read')
     self.assertEqual(k.get_acl().to_xml(), self.prAcl(self.user1))
예제 #18
0
'''
usr_str = "user804"
obj = dsslib.getConnection(dsslib.USER_keystone804)
dsslib.listBucket(obj, usr_str)
b = obj.create_bucket('docsbuck001')
dsslib.listBucket(obj, usr_str)
b.set_acl('public-read-write')

k = Key(b)
k.key = 'obj1'
k.set_contents_from_string('Data of object')
print "Setting ACL on obj"
k.set_acl('public-read')

print "\nObject has ACL============="
print str(k.get_acl())
##====================================================================
'''=========================================================
## AWS user policy prevents user from listing bucket but allows getting objects inside the particular bucket
## Make listing buckets allowed. Generate signed URLs then make listing buckets not allowed in user policy.
## Check if the URLs still work.
obj = dsslib.getConnection(dsslib.USER_aws2)
b = obj.get_bucket('shivanshubucketauto4')
for i in b.list():
    print 'New key: ' + str(i)
    try:
        print str(i)
        #print "Trying to get object URL..."
        #print i.generate_url(1000);
    except:
        print "No permission!"
예제 #19
0
#bucket.set_acl('public-read')

###

from boto.s3.key import Key
k = Key(bucket)
k.key = 'test1.dat'
k.set_contents_from_string('hello123')
#k.set_acl('public-read')
k.set_remote_metadata( {'x-scal-test': { 'key1':'value1' }}, [], True ) 

c = Key(bucket)
c.key = 'test1.dat'
print c.get_contents_as_string()
print c.get_acl()
print c.metadata

###

###

full_bucket = conn.get_bucket('test-bucket')
# It's full of keys. Delete them all.
for key in full_bucket.list():
    key.delete()

# The bucket is empty now. Delete it.
conn.delete_bucket('test-bucket')

###
예제 #20
0
'''
usr_str = "user804"
obj = dsslib.getConnection(dsslib.USER_keystone804)
dsslib.listBucket(obj, usr_str)
b = obj.create_bucket('docsbuck001')
dsslib.listBucket(obj, usr_str)
b.set_acl('public-read-write')

k = Key(b)
k.key = 'obj1'
k.set_contents_from_string('Data of object')
print "Setting ACL on obj"
k.set_acl('public-read')

print "\nObject has ACL============="
print str(k.get_acl())
##====================================================================


'''=========================================================
## AWS user policy prevents user from listing bucket but allows getting objects inside the particular bucket
## Make listing buckets allowed. Generate signed URLs then make listing buckets not allowed in user policy.
## Check if the URLs still work.
obj = dsslib.getConnection(dsslib.USER_aws2)
b = obj.get_bucket('shivanshubucketauto4')
for i in b.list():
    print 'New key: ' + str(i)
    try:
        print str(i)
        #print "Trying to get object URL..."
        #print i.generate_url(1000);
예제 #21
0
 def test_set_object_acl(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.set_canned_acl('public-read')
     self.assertEqual(k.get_acl().to_xml(), self.prAcl(self.user1))
예제 #22
0
def launch(name,
           region,
           node_type,
           engine,
           engine_version=None,
           num_nodes=1,
           subnet_group=None,
           cache_security_groups=None,
           security_group_ids=None,
           snapshot=None,
           snapshot_optional=False,
           preferred_availability_zone=None,
           preferred_maintenance_window=None,
           notification_topic_arn=None,
           parameter_group=None,
           port=None,
           auto_minor_version_upgrade=True,
           aws_key=None,
           aws_secret=None,
           ecconn=None):
    """
    Launch an Elasticache cluster

    Most arguments are the same as :meth:`.manage`

    """
    if ecconn is None:
        ecconn = __salt__['aws_util.ecconn'](region, aws_key, aws_secret)

    if snapshot is not None:
        snapshots = [snapshot]
        if snapshot_optional:
            s3conn = __salt__['aws_util.s3conn'](aws_key, aws_secret)
            # If the snapshot doesn't exist, ignore it
            i = 0
            while i < len(snapshots):
                path = snapshots[i]
                path_components = path.split('/')
                bucket = s3conn.get_bucket(path_components[0])
                key = Key(bucket, '/'.join(path_components[1:]))
                if not key.exists():
                    del snapshots[i]
                else:
                    # Add read-only access to the snapshot if necessary
                    acl = key.get_acl().acl
                    can_read = False
                    for grant in acl.grants:
                        if grant.permission.lower() == 'read' and \
                                grant.email_address == '*****@*****.**':
                            can_read = True
                            break
                    if not can_read:
                        key.add_email_grant('READ',
                                            '*****@*****.**')
                    i += 1
        for i in range(len(snapshots)):
            snapshots[i] = 'arn:aws:s3:::' + snapshots[i]
    else:
        snapshots = []

    ecconn.create_cache_cluster(
        name,
        num_nodes,
        node_type,
        engine,
        engine_version=engine_version,
        cache_parameter_group_name=parameter_group,
        cache_subnet_group_name=subnet_group,
        cache_security_group_names=cache_security_groups,
        security_group_ids=security_group_ids,
        snapshot_arns=snapshots,
        preferred_availability_zone=preferred_availability_zone,
        preferred_maintenance_window=preferred_maintenance_window,
        port=port,
        notification_topic_arn=notification_topic_arn,
        auto_minor_version_upgrade=auto_minor_version_upgrade)