Esempio n. 1
0
 def setUp(self):
     self.capturedOutput = io.StringIO()
     sys.stdout = self.capturedOutput
     s3 = S3()
     s3.bucket_name = "hello"
     s3.path_list = ["hello.json"]
     self.s3_args = S3Args(s3)
Esempio n. 2
0
def update_object_version(
    s3: S3,
    allversion: bool = False,
    acl: bool = False,
    tagging: bool = False,
) -> None:
    """Update versions of object's attributes.

    Note: this operation only allow update of acl and tagging, because
    this won't introduce new objects.

    :param s3: S3 instance
    :type s3: S3
    :param allversion: update all versions?
    :type allversion: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    """
    obj_versions = s3.get_object_version(select_all=allversion)
    s3_args = S3Args(s3)
    s3_args.set_extra_args(acl, tagging, version=obj_versions)
    # check if only tags or acl is being updated
    # this way it won't create extra versions on the object
    check_result = s3_args.check_tag_acl()

    for obj_version in obj_versions:
        print(
            "(dryrun) update: s3://%s/%s with version %s"
            % (s3.bucket_name, obj_version.get("Key"), obj_version.get("VersionId"))
        )
    if get_confirmation("Confirm?"):
        for obj_version in obj_versions:
            print(
                "update: s3://%s/%s with version %s"
                % (
                    s3.bucket_name,
                    obj_version.get("Key"),
                    obj_version.get("VersionId"),
                )
            )
            if check_result:
                if check_result.get("Tags"):
                    s3.client.put_object_tagging(
                        Bucket=s3.bucket_name,
                        Key=obj_version.get("Key"),
                        VersionId=obj_version.get("VersionId"),
                        Tagging={"TagSet": check_result.get("Tags")},
                    )
                if check_result.get("Grants"):
                    grant_args = {
                        "Bucket": s3.bucket_name,
                        "Key": obj_version.get("Key"),
                        "VersionId": obj_version.get("VersionId"),
                    }
                    grant_args.update(check_result.get("Grants", {}))
                    s3.client.put_object_acl(**grant_args)
            else:
                print("Nothing to update")
Esempio n. 3
0
    def test_get_copy_args_with_version(self):
        data_path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  "../data/s3_obj.json")
        with open(data_path1, "r") as file:
            response1 = json.load(file)
        data_path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  "../data/s3_acl.json")
        with open(data_path2, "r") as file:
            response2 = json.load(file)

        # with version
        s3_client = boto3.client("s3")
        stubber = Stubber(s3_client)
        stubber.add_response("get_object", response1)
        stubber.add_response("get_object_acl", response2)
        stubber.activate()
        s3 = S3()
        s3._client = s3_client
        s3.bucket_name = "hello"
        s3_args = S3Args(s3)
        result = get_copy_args(s3, "hello.json", s3_args, False)
        self.assertEqual(
            result,
            {
                "Bucket": "hello",
                "Key": "hello.json",
                "CopySource": {
                    "Bucket": "hello",
                    "Key": "hello.json"
                },
                "StorageClass": "REDUCED_REDUNDANCY",
                "ServerSideEncryption": "aws:kms",
                "SSEKMSKeyId":
                "arn:aws:kms:ap-southeast-2:11111111:key/11111111-f48d-48b8-90d4-d5bd03a603d4",
                "GrantRead":
                "uri=http://acs.amazonaws.com/groups/global/AllUsers",
            },
        )
Esempio n. 4
0
def update_object_name(s3: S3, version: bool = False) -> None:
    """Update object name.

    :param s3: S3 class instance
    :type s3: S3
    :param version: whether to rename version's name, this will create a new object
    :type version: bool, optional
    """
    print(
        "Enter the new name below (format: newname or path/newname for a new path)"
    )
    new_name = input("Name(Orignal: %s): " % s3.path_list[0])

    if not version:
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s" %
              (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s" %
                  (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(s3,
                                             s3.path_list[0],
                                             s3_args,
                                             extra_args=True)
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": s3.path_list[0],
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(s3.path_list[0], s3.bucket_name,
                                    s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            s3.client.delete_object(
                Bucket=s3.bucket_name,
                Key=s3.path_list[0],
            )

    else:
        # get version
        obj_version = s3.get_object_version(key=s3.path_list[0])[0]
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s with version %s" % (
            s3.bucket_name,
            obj_version.get("Key"),
            s3.bucket_name,
            new_name,
            obj_version.get("VersionId"),
        ))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s with version %s" % (
                s3.bucket_name,
                obj_version.get("Key"),
                s3.bucket_name,
                new_name,
                obj_version.get("VersionId"),
            ))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(
                s3,
                s3.path_list[0],
                s3_args,
                extra_args=True,
                version=obj_version.get("VersionId"),
            )
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": obj_version.get("Key"),
                "VersionId": obj_version.get("VersionId"),
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(
                    obj_version.get("Key", ""),
                    s3.bucket_name,
                    s3.client,
                    version_id=obj_version.get("VersionId"),
                ),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
Esempio n. 5
0
def update_object_recursive(
    s3: S3,
    storage: bool = False,
    acl: bool = False,
    metadata: bool = False,
    encryption: bool = False,
    tagging: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
) -> None:
    """Recursive update object attributes.

    :param s3: S3 class instance
    :type s3: S3
    :param storage: update storage
    :type storage: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    s3_args = S3Args(s3)
    s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
    # check if only tags or acl is being updated
    # this way it won't create extra versions on the object
    check_result = s3_args.check_tag_acl()

    file_list = walk_s3_folder(
        s3.client,
        s3.bucket_name,
        s3.path_list[0],
        s3.path_list[0],
        [],
        exclude,
        include,
        "object",
        s3.path_list[0],
        s3.bucket_name,
    )
    if get_confirmation("Confirm?"):
        if check_result:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                if check_result.get("Tags"):
                    s3.client.put_object_tagging(
                        Bucket=s3.bucket_name,
                        Key=original_key,
                        Tagging={"TagSet": check_result.get("Tags")},
                    )
                if check_result.get("Grants"):
                    grant_args = {
                        "Bucket": s3.bucket_name,
                        "Key": original_key
                    }
                    grant_args.update(check_result.get("Grants", {}))
                    s3.client.put_object_acl(**grant_args)

        else:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                # Note: this will create new version if version is enabled
                copy_object_args = get_copy_args(s3,
                                                 original_key,
                                                 s3_args,
                                                 extra_args=True)
                copy_source = {"Bucket": s3.bucket_name, "Key": original_key}
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    s3.bucket_name,
                    original_key,
                    Callback=S3Progress(original_key, s3.bucket_name,
                                        s3.client),
                    ExtraArgs=copy_object_args,
                    Config=s3transferwrapper.transfer_config,
                )
Esempio n. 6
0
def object_s3(
    profile: Union[str, bool] = False,
    bucket: str = None,
    recursive: bool = False,
    version: bool = False,
    allversion: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    name: bool = False,
    storage: bool = False,
    encryption: bool = False,
    metadata: bool = False,
    tagging: bool = False,
    acl: bool = False,
) -> None:
    """Update selected object settings.

    Display a menu based on recursive and version requirement.
    If name is true, only handle rename.

    :param profile: use a different profile for this operation
    :type profile: Union[str, bool], optional
    :param bucket: bucket name that contains the object
    :type bucket: str, optional
    :param recursive: recursive update object attr
    :type recursive: bool, optional
    :param allversion: update all versions of a object
    :type allversion: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    :param name: update name
    :type name: bool, optional
    :param storage: update storage
    :type storage: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    if allversion:
        version = True

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if recursive and not s3.path_list[0]:
        s3.set_s3_path()
    elif name and not s3.path_list[0]:
        s3.set_s3_object(version)
    elif not s3.path_list[0]:
        s3.set_s3_object(version, multi_select=True)

    # handle rename
    if name:
        update_object_name(s3, version)

    elif recursive:
        update_object_recursive(s3, storage, acl, metadata, encryption,
                                tagging, exclude, include)

    elif version:
        update_object_version(s3, allversion, acl, tagging)

    else:
        # update single object
        s3_args = S3Args(s3)
        s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
        # check if only tags or acl is being updated
        # this way it won't create extra versions on the object, if versioning is enabled
        check_result = s3_args.check_tag_acl()

        for s3_key in s3.path_list:
            print("(dryrun) update: s3://%s/%s" % (s3.bucket_name, s3_key))
        if get_confirmation("Confirm?"):
            for s3_key in s3.path_list:
                print("update: s3://%s/%s" % (s3.bucket_name, s3_key))
                if check_result:
                    if check_result.get("Tags"):
                        s3.client.put_object_tagging(
                            Bucket=s3.bucket_name,
                            Key=s3_key,
                            Tagging={"TagSet": check_result.get("Tags")},
                        )
                    if check_result.get("Grants"):
                        grant_args = {"Bucket": s3.bucket_name, "Key": s3_key}
                        grant_args.update(check_result.get("Grants", {}))
                        s3.client.put_object_acl(**grant_args)

                else:
                    # Note: this will create new version if version is enabled
                    copy_object_args = get_copy_args(s3,
                                                     s3_key,
                                                     s3_args,
                                                     extra_args=True)
                    copy_source = {"Bucket": s3.bucket_name, "Key": s3_key}
                    s3transferwrapper = S3TransferWrapper()
                    s3.client.copy(
                        copy_source,
                        s3.bucket_name,
                        s3_key,
                        Callback=S3Progress(s3_key, s3.bucket_name, s3.client),
                        ExtraArgs=copy_object_args,
                        Config=s3transferwrapper.transfer_config,
                    )
Esempio n. 7
0
def copy_and_preserve(
    s3: S3,
    target_bucket: str,
    target_path: str,
    dest_bucket: str,
    dest_path: str,
    version: str = None,
) -> None:
    """Copy object to other buckets and preserve previous details.

    :param s3: S3 instance, make sure contains bucket name
    :type s3: S3
    :param target_bucket: source bucket for upload
    :type target_bucket: str
    :param dest_bucket: destination bucket
    :type dest_bucket: str
    :param dest_path: destination key name
    :type dest_path: str
    :param version: versionID of the object
    :type version: str
    :raises ClientError: clienterror will raise when coping KMS encrypted file, handled internally
    """
    copy_source: Dict[str, str] = {"Bucket": target_bucket, "Key": target_path}
    if version:
        copy_source["VersionId"] = version
    s3_args = S3Args(s3)
    copy_object_args = get_copy_args(s3,
                                     target_path,
                                     s3_args,
                                     extra_args=True,
                                     version=version)

    # limit to one retry
    attempt_count: int = 0
    while attempt_count < 2:
        try:
            attempt_count += 1
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                dest_bucket,
                dest_path,
                Callback=S3Progress(target_path, s3.bucket_name, s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            break
        except ClientError as e:
            error_pattern = r"^.*\((.*)\).*$"
            error_name = re.match(error_pattern, str(e)).group(1)
            if error_name == "AccessDenied":
                print(80 * "-")
                print(e)
                print(
                    "You may have ACL policies that enable public access but "
                    "the destination bucket is blocking all public access, " +
                    "you need to either uncheck 'block all public access' or update your object ACL settings "
                    +
                    "or try again without the -p flag or continue without preserving the ACL."
                )
                if not get_confirmation("Continue without preserving ACL?"):
                    raise
                copy_object_args.pop("GrantFullControl", None)
                copy_object_args.pop("GrantRead", None)
                copy_object_args.pop("GrantReadACP", None)
                copy_object_args.pop("GrantWriteACP", None)
            # # handle when kms encrypt object move to a bucket in different region
            elif error_name == "KMS.NotFoundException":
                copy_object_args["ServerSideEncryption"] = "AES256"
                copy_object_args.pop("SSEKMSKeyId", None)
            else:
                raise
Esempio n. 8
0
def upload_s3(
    profile: bool = False,
    bucket: str = None,
    local_paths: Optional[Union[str, list]] = None,
    recursive: bool = False,
    hidden: bool = False,
    search_root: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    extra_config: bool = False,
) -> None:
    """Upload local files/directories to s3.

    Upload through boto3 s3 client.
    Glob pattern exclude list are handled first then handle the include list.

    :param profile: profile to use for this operation
    :type profile: bool, optional
    :param bucket: specify bucket to upload
    :type bucket: str, optional
    :param local_paths: local file paths to upload
    :type local_paths: list, optional
    :param recursive: upload directory
    :type recursive: bool, optional
    :param hidden: include hidden files during search
    :type hidden: bool, optional
    :param search_root: search from root
    :type search_root: bool, optional
    :param sync: use aws cli s3 sync
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param extra_config: configure extra settings during upload
    :type extra_config: bool, optional
    """
    if not local_paths:
        local_paths = []
    if not exclude:
        exclude = []
    if not include:
        include = []

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if not s3.path_list[0]:
        s3.set_s3_path()

    if not local_paths:
        fzf = Pyfzf()
        recursive = True if recursive or sync else False
        # don't allow multi_select for recursive operation
        multi_select = True if not recursive else False
        local_paths = fzf.get_local_file(
            search_from_root=search_root,
            directory=recursive,
            hidden=hidden,
            multi_select=multi_select,
        )

    # get the first item from the array since recursive operation doesn't support multi_select
    # local_path is used for sync and recursive operation
    # local_paths is used for single file operation
    if isinstance(local_paths, list):
        local_path = str(local_paths[0])
    else:
        local_path = str(local_paths)

    # construct extra argument
    extra_args = S3Args(s3)
    if extra_config:
        extra_args.set_extra_args(upload=True)

    if sync:
        sync_s3(
            exclude=exclude,
            include=include,
            from_path=local_path,
            to_path="s3://%s/%s" % (s3.bucket_name, s3.path_list[0]),
        )

    elif recursive:
        recursive_upload(s3, local_path, exclude, include, extra_args)

    else:
        for filepath in local_paths:
            # get the formated s3 destination
            destination_key = s3.get_s3_destination_key(filepath)
            print("(dryrun) upload: %s to s3://%s/%s" %
                  (filepath, s3.bucket_name, destination_key))

        if get_confirmation("Confirm?"):
            for filepath in local_paths:
                destination_key = s3.get_s3_destination_key(filepath)
                print("upload: %s to s3://%s/%s" %
                      (filepath, s3.bucket_name, destination_key))
                transfer = S3TransferWrapper(s3.client)
                transfer.s3transfer.upload_file(
                    filepath,
                    s3.bucket_name,
                    destination_key,
                    callback=S3Progress(filepath),
                    extra_args=extra_args.extra_args,
                )