예제 #1
0
def recursive_copy(
    s3: S3,
    target_bucket: str,
    target_path: str,
    dest_bucket: str,
    dest_path: str,
    exclude: List[str],
    include: List[str],
    preserve: bool,
) -> None:
    """Recursive copy object to other bucket.

    :param s3: S3 instance
    :type s3: S3
    :param target_bucket: source bucket
    :type target_bucket: str
    :param target_path: source folder path
    :type target_path: str
    :param dest_bucket: destination bucket
    :type dest_bucket: str
    :param dest_path: dest folder path
    :type dest_path: str
    :param exclude: glob pattern to exclude
    :type exclude: List[str]
    :param include: glob pattern to include
    :type include: List[str]
    :param preserve: preserve previous object config
    :type preserve: bool
    """
    file_list = walk_s3_folder(
        s3.client,
        target_bucket,
        target_path,
        target_path,
        [],
        exclude,
        include,
        "bucket",
        dest_path,
        dest_bucket,
    )

    if get_confirmation("Confirm?"):
        for s3_key, dest_pathname in file_list:
            print("copy: s3://%s/%s to s3://%s/%s" %
                  (target_bucket, s3_key, dest_bucket, dest_pathname))
            copy_source = {"Bucket": target_bucket, "Key": s3_key}
            if not preserve:
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    dest_bucket,
                    dest_pathname,
                    Callback=S3Progress(s3_key, target_bucket, s3.client),
                    Config=s3transferwrapper.transfer_config,
                )
            else:
                s3.bucket_name = target_bucket
                copy_and_preserve(s3, target_bucket, s3_key, dest_bucket,
                                  dest_pathname)
예제 #2
0
    def test_constructor(self, mocked_size):
        mocked_size.return_value = 10

        client = boto3.client("s3")
        stubber = Stubber(client)
        stubber.add_response("head_object", {"ContentLength": 100})
        stubber.activate()

        progress = S3Progress(filename=__file__)
        self.assertEqual(progress._filename, __file__)
        self.assertEqual(progress._seen_so_far, 0)
        self.assertEqual(progress._size, 10)

        progress = S3Progress(filename=__file__, client=client, bucket="hello")
        self.assertEqual(progress._filename, __file__)
        self.assertEqual(progress._seen_so_far, 0)
        self.assertEqual(progress._size, 100)
예제 #3
0
 def test_call(self, mocked_size):
     mocked_size.return_value = 1000
     self.capturedOutput.truncate(0)
     self.capturedOutput.seek(0)
     progress = S3Progress(filename=__file__)
     progress(bytes_amount=20)
     self.assertRegex(
         self.capturedOutput.getvalue(), r"test_s3progress.py  20 Bytes / 1000 Bytes"
     )
예제 #4
0
 def test_human_readable_size(self):
     progress = S3Progress(filename=__file__)
     result = progress.human_readable_size(1000)
     self.assertEqual(result, "1000 Bytes")
     result = progress.human_readable_size(1024)
     self.assertEqual(result, "1.0 KiB")
     result = progress.human_readable_size(1048576)
     self.assertEqual(result, "1.0 MiB")
     result = progress.human_readable_size(1073741824)
     self.assertEqual(result, "1.0 GiB")
     result = progress.human_readable_size(10737418991)
     self.assertEqual(result, "10.0 GiB")
예제 #5
0
def recursive_upload(s3: S3, local_path: str, exclude: List[str],
                     include: List[str], extra_args: S3Args) -> None:
    """Recursive upload local directory to s3.

    Perform a os.walk to upload everyfile under a directory.

    :param s3: S3 instance
    :type s3: S3
    :param local_path: local directory
    :type local_path: str
    :param exclude: glob pattern to exclude
    :type exclude: List[str]
    :param include: glob pattern to include
    :type include: List[str]
    :param extra_args: S3Args instance to set extra argument
    :type extra_args: S3Args
    """
    upload_list: List[Dict[str, str]] = []
    for root, _, files in os.walk(local_path):
        for filename in files:
            full_path = os.path.join(root, filename)
            relative_path = os.path.relpath(full_path, local_path)

            if not exclude_file(exclude, include, relative_path):
                destination_key = s3.get_s3_destination_key(relative_path,
                                                            recursive=True)
                print("(dryrun) upload: %s to s3://%s/%s" %
                      (relative_path, s3.bucket_name, destination_key))
                upload_list.append({
                    "local_path": full_path,
                    "bucket": s3.bucket_name,
                    "key": destination_key,
                    "relative": relative_path,
                })

    if get_confirmation("Confirm?"):
        for item in upload_list:
            print("upload: %s to s3://%s/%s" %
                  (item["relative"], item["bucket"], item["key"]))
            transfer = S3TransferWrapper(s3.client)
            transfer.s3transfer.upload_file(
                item["local_path"],
                item["bucket"],
                item["key"],
                callback=S3Progress(item["local_path"]),
                extra_args=extra_args.extra_args,
            )
예제 #6
0
def download_version(s3: S3, obj_versions: List[Dict[str, str]],
                     local_path: str) -> None:
    """Download versions of a object.

    :param s3: instance of S3
    :type s3: S3
    :param obj_versions: list of object and their versions to download
    :type obj_versions: List[Dict[str, str]]
    :param local_path: local directory to download
    :type local_path: str
    """
    for obj_version in obj_versions:
        destination_path = os.path.join(
            local_path, os.path.basename(obj_version.get("Key", "")))
        print("(dryrun) download: s3://%s/%s to %s with version %s" % (
            s3.bucket_name,
            obj_version.get("Key"),
            destination_path,
            obj_version.get("VersionId"),
        ))

    if get_confirmation("Confirm"):
        for obj_version in obj_versions:
            destination_path = os.path.join(
                local_path, os.path.basename(obj_version.get("Key", "")))
            print("download: s3://%s/%s to %s with version %s" % (
                s3.bucket_name,
                obj_version.get("Key"),
                destination_path,
                obj_version.get("VersionId"),
            ))
            transfer = S3TransferWrapper(s3.client)
            transfer.s3transfer.download_file(
                s3.bucket_name,
                obj_version.get("Key"),
                destination_path,
                extra_args={"VersionId": obj_version.get("VersionId")},
                callback=S3Progress(
                    obj_version.get("Key", ""),
                    s3.bucket_name,
                    s3.client,
                    obj_version.get("VersionId"),
                ),
            )
예제 #7
0
def download_recusive(s3: S3, exclude: List[str], include: List[str],
                      local_path: str) -> None:
    """Download s3 recursive.

    :param s3: S3 instance
    :type s3: S3
    :param exclude: glob pattern to exclude
    :type exclude: List[str]
    :param include: glob pattern to include
    :type include: List[str]
    :param local_path: local directory to download
    :type local_path: str
    """
    download_list = walk_s3_folder(
        s3.client,
        s3.bucket_name,
        s3.path_list[0],
        s3.path_list[0],
        [],
        exclude,
        include,
        "download",
        local_path,
    )

    if get_confirmation("Confirm?"):
        for s3_key, dest_pathname in download_list:
            if not os.path.exists(os.path.dirname(dest_pathname)):
                os.makedirs(os.path.dirname(dest_pathname))
            print("download: s3://%s/%s to %s" %
                  (s3.bucket_name, s3_key, dest_pathname))
            transfer = S3TransferWrapper(s3.client)
            transfer.s3transfer.download_file(
                s3.bucket_name,
                s3_key,
                dest_pathname,
                callback=S3Progress(s3_key, s3.bucket_name, s3.client),
            )
예제 #8
0
def update_object_name(s3: S3, version: bool = False) -> None:
    """Update object name.

    :param s3: S3 class instance
    :type s3: S3
    :param version: whether to rename version's name, this will create a new object
    :type version: bool, optional
    """
    print(
        "Enter the new name below (format: newname or path/newname for a new path)"
    )
    new_name = input("Name(Orignal: %s): " % s3.path_list[0])

    if not version:
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s" %
              (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s" %
                  (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(s3,
                                             s3.path_list[0],
                                             s3_args,
                                             extra_args=True)
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": s3.path_list[0],
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(s3.path_list[0], s3.bucket_name,
                                    s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            s3.client.delete_object(
                Bucket=s3.bucket_name,
                Key=s3.path_list[0],
            )

    else:
        # get version
        obj_version = s3.get_object_version(key=s3.path_list[0])[0]
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s with version %s" % (
            s3.bucket_name,
            obj_version.get("Key"),
            s3.bucket_name,
            new_name,
            obj_version.get("VersionId"),
        ))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s with version %s" % (
                s3.bucket_name,
                obj_version.get("Key"),
                s3.bucket_name,
                new_name,
                obj_version.get("VersionId"),
            ))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(
                s3,
                s3.path_list[0],
                s3_args,
                extra_args=True,
                version=obj_version.get("VersionId"),
            )
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": obj_version.get("Key"),
                "VersionId": obj_version.get("VersionId"),
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(
                    obj_version.get("Key", ""),
                    s3.bucket_name,
                    s3.client,
                    version_id=obj_version.get("VersionId"),
                ),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
예제 #9
0
def update_object_recursive(
    s3: S3,
    storage: bool = False,
    acl: bool = False,
    metadata: bool = False,
    encryption: bool = False,
    tagging: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
) -> None:
    """Recursive update object attributes.

    :param s3: S3 class instance
    :type s3: S3
    :param storage: update storage
    :type storage: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    s3_args = S3Args(s3)
    s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
    # check if only tags or acl is being updated
    # this way it won't create extra versions on the object
    check_result = s3_args.check_tag_acl()

    file_list = walk_s3_folder(
        s3.client,
        s3.bucket_name,
        s3.path_list[0],
        s3.path_list[0],
        [],
        exclude,
        include,
        "object",
        s3.path_list[0],
        s3.bucket_name,
    )
    if get_confirmation("Confirm?"):
        if check_result:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                if check_result.get("Tags"):
                    s3.client.put_object_tagging(
                        Bucket=s3.bucket_name,
                        Key=original_key,
                        Tagging={"TagSet": check_result.get("Tags")},
                    )
                if check_result.get("Grants"):
                    grant_args = {
                        "Bucket": s3.bucket_name,
                        "Key": original_key
                    }
                    grant_args.update(check_result.get("Grants", {}))
                    s3.client.put_object_acl(**grant_args)

        else:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                # Note: this will create new version if version is enabled
                copy_object_args = get_copy_args(s3,
                                                 original_key,
                                                 s3_args,
                                                 extra_args=True)
                copy_source = {"Bucket": s3.bucket_name, "Key": original_key}
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    s3.bucket_name,
                    original_key,
                    Callback=S3Progress(original_key, s3.bucket_name,
                                        s3.client),
                    ExtraArgs=copy_object_args,
                    Config=s3transferwrapper.transfer_config,
                )
예제 #10
0
def object_s3(
    profile: Union[str, bool] = False,
    bucket: str = None,
    recursive: bool = False,
    version: bool = False,
    allversion: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    name: bool = False,
    storage: bool = False,
    encryption: bool = False,
    metadata: bool = False,
    tagging: bool = False,
    acl: bool = False,
) -> None:
    """Update selected object settings.

    Display a menu based on recursive and version requirement.
    If name is true, only handle rename.

    :param profile: use a different profile for this operation
    :type profile: Union[str, bool], optional
    :param bucket: bucket name that contains the object
    :type bucket: str, optional
    :param recursive: recursive update object attr
    :type recursive: bool, optional
    :param allversion: update all versions of a object
    :type allversion: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    :param name: update name
    :type name: bool, optional
    :param storage: update storage
    :type storage: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    if allversion:
        version = True

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if recursive and not s3.path_list[0]:
        s3.set_s3_path()
    elif name and not s3.path_list[0]:
        s3.set_s3_object(version)
    elif not s3.path_list[0]:
        s3.set_s3_object(version, multi_select=True)

    # handle rename
    if name:
        update_object_name(s3, version)

    elif recursive:
        update_object_recursive(s3, storage, acl, metadata, encryption,
                                tagging, exclude, include)

    elif version:
        update_object_version(s3, allversion, acl, tagging)

    else:
        # update single object
        s3_args = S3Args(s3)
        s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
        # check if only tags or acl is being updated
        # this way it won't create extra versions on the object, if versioning is enabled
        check_result = s3_args.check_tag_acl()

        for s3_key in s3.path_list:
            print("(dryrun) update: s3://%s/%s" % (s3.bucket_name, s3_key))
        if get_confirmation("Confirm?"):
            for s3_key in s3.path_list:
                print("update: s3://%s/%s" % (s3.bucket_name, s3_key))
                if check_result:
                    if check_result.get("Tags"):
                        s3.client.put_object_tagging(
                            Bucket=s3.bucket_name,
                            Key=s3_key,
                            Tagging={"TagSet": check_result.get("Tags")},
                        )
                    if check_result.get("Grants"):
                        grant_args = {"Bucket": s3.bucket_name, "Key": s3_key}
                        grant_args.update(check_result.get("Grants", {}))
                        s3.client.put_object_acl(**grant_args)

                else:
                    # Note: this will create new version if version is enabled
                    copy_object_args = get_copy_args(s3,
                                                     s3_key,
                                                     s3_args,
                                                     extra_args=True)
                    copy_source = {"Bucket": s3.bucket_name, "Key": s3_key}
                    s3transferwrapper = S3TransferWrapper()
                    s3.client.copy(
                        copy_source,
                        s3.bucket_name,
                        s3_key,
                        Callback=S3Progress(s3_key, s3.bucket_name, s3.client),
                        ExtraArgs=copy_object_args,
                        Config=s3transferwrapper.transfer_config,
                    )
예제 #11
0
def copy_and_preserve(
    s3: S3,
    target_bucket: str,
    target_path: str,
    dest_bucket: str,
    dest_path: str,
    version: str = None,
) -> None:
    """Copy object to other buckets and preserve previous details.

    :param s3: S3 instance, make sure contains bucket name
    :type s3: S3
    :param target_bucket: source bucket for upload
    :type target_bucket: str
    :param dest_bucket: destination bucket
    :type dest_bucket: str
    :param dest_path: destination key name
    :type dest_path: str
    :param version: versionID of the object
    :type version: str
    :raises ClientError: clienterror will raise when coping KMS encrypted file, handled internally
    """
    copy_source: Dict[str, str] = {"Bucket": target_bucket, "Key": target_path}
    if version:
        copy_source["VersionId"] = version
    s3_args = S3Args(s3)
    copy_object_args = get_copy_args(s3,
                                     target_path,
                                     s3_args,
                                     extra_args=True,
                                     version=version)

    # limit to one retry
    attempt_count: int = 0
    while attempt_count < 2:
        try:
            attempt_count += 1
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                dest_bucket,
                dest_path,
                Callback=S3Progress(target_path, s3.bucket_name, s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            break
        except ClientError as e:
            error_pattern = r"^.*\((.*)\).*$"
            error_name = re.match(error_pattern, str(e)).group(1)
            if error_name == "AccessDenied":
                print(80 * "-")
                print(e)
                print(
                    "You may have ACL policies that enable public access but "
                    "the destination bucket is blocking all public access, " +
                    "you need to either uncheck 'block all public access' or update your object ACL settings "
                    +
                    "or try again without the -p flag or continue without preserving the ACL."
                )
                if not get_confirmation("Continue without preserving ACL?"):
                    raise
                copy_object_args.pop("GrantFullControl", None)
                copy_object_args.pop("GrantRead", None)
                copy_object_args.pop("GrantReadACP", None)
                copy_object_args.pop("GrantWriteACP", None)
            # # handle when kms encrypt object move to a bucket in different region
            elif error_name == "KMS.NotFoundException":
                copy_object_args["ServerSideEncryption"] = "AES256"
                copy_object_args.pop("SSEKMSKeyId", None)
            else:
                raise
예제 #12
0
def bucket_s3(
    profile: bool = False,
    from_bucket: str = None,
    to_bucket: str = None,
    recursive: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    version: bool = False,
    preserve: bool = False,
) -> None:
    """Transfer file between buckets.

    Handle transfer file between buckets or even within the same bucket.
    Handle glob pattern through exclude list first than it will process the include to explicit include files.

    :param profile: use a different profile for this operation
    :type profile: str, optional
    :param from_bucket: source bucket
    :type from_bucket: str, optional
    :param to_bucket: destination bucket
    :type to_bucket: str, optional
    :param recursive: recursive copy a folder
    :type recursive: bool, optional
    :param sync: sync s3 buckets
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param version: move object verions
    :type version: bool, optional
    :param perserve: save all object's config instead of using the new bucket's settings
    :type perserve: bool, optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    s3 = S3(profile)

    # initialise variables to avoid directly using s3 instance since processing 2 buckets
    target_bucket: str = ""
    target_path: str = ""
    target_path_list: List[str] = []
    dest_bucket: str = ""
    dest_path = ""
    obj_versions: List[Dict[str, str]] = []

    search_folder: bool = True if recursive or sync else False

    if from_bucket:
        target_bucket, target_path, target_path_list = process_path_param(
            from_bucket, s3, search_folder, version=version)
    else:
        s3.set_s3_bucket(
            header="set the source bucket which contains the file to transfer")
        target_bucket = s3.bucket_name
        if search_folder:
            s3.set_s3_path()
            target_path = s3.path_list[0]
        else:
            s3.set_s3_object(multi_select=True, version=version)
            target_path_list = s3.path_list[:]
    if version and not search_folder:
        obj_versions = s3.get_object_version()
    # clean up the s3 attributes for next operation
    s3.bucket_name = ""
    s3.path_list[0] = ""

    if to_bucket:
        dest_bucket, dest_path, _ = process_path_param(to_bucket, s3, True)
    else:
        s3.set_s3_bucket(
            header=
            "set the destination bucket where the file should be transfered")
        s3.set_s3_path()
        dest_bucket = s3.bucket_name
        dest_path = s3.path_list[0]

    if sync:
        sync_s3(
            exclude,
            include,
            "s3://%s/%s" % (target_bucket, target_path),
            "s3://%s/%s" % (dest_bucket, dest_path),
        )
    elif recursive:
        recursive_copy(
            s3,
            target_bucket,
            target_path,
            dest_bucket,
            dest_path,
            exclude,
            include,
            preserve,
        )

    elif version:
        copy_version(
            s3,
            dest_bucket,
            dest_path,
            obj_versions,
            target_bucket,
            target_path,
            preserve,
        )

    else:
        # set the s3 instance name and path the destination bucket
        s3.bucket_name = dest_bucket
        s3.path_list[0] = dest_path
        for target_path in target_path_list:
            # process the target key path and get the destination key path
            s3_key = s3.get_s3_destination_key(target_path)
            print("(dryrun) copy: s3://%s/%s to s3://%s/%s" %
                  (target_bucket, target_path, dest_bucket, s3_key))
        if get_confirmation("Confirm?"):
            for target_path in target_path_list:
                s3_key = s3.get_s3_destination_key(target_path)
                print("copy: s3://%s/%s to s3://%s/%s" %
                      (target_bucket, target_path, dest_bucket, s3_key))
                copy_source = {"Bucket": target_bucket, "Key": target_path}
                if not preserve:
                    s3transferwrapper = S3TransferWrapper()
                    s3.client.copy(
                        copy_source,
                        dest_bucket,
                        s3_key,
                        Callback=S3Progress(target_path, target_bucket,
                                            s3.client),
                        Config=s3transferwrapper.transfer_config,
                    )
                else:
                    s3.bucket_name = target_bucket
                    copy_and_preserve(s3, target_bucket, target_path,
                                      dest_bucket, s3_key)
예제 #13
0
def copy_version(
    s3: S3,
    dest_bucket: str,
    dest_path: str,
    obj_versions: List[Dict[str, str]],
    target_bucket: str,
    target_path: str,
    preserve: bool,
) -> None:
    """Copy versions of object to other bucket.

    :param s3: S3 instance
    :type s3: S3
    :param dest_bucket: the destination bucket to transfer the object
    :type dest_bucket: str
    :param dest_path: the destination path in the destination bucket
    :type dest_path: str
    :param obj_version: the selected versions through get_object_version()
    :type obj_version: List[Dict[str, str]]
    :param target_bucket: the bucket contains the object to transfer
    :type target_bucket: str
    :param target_path: the object path of the object to be transfered
    :type target_path: str
    :param preserve: preserve previous object details after transfer
    :type preserve: bool
    """
    # set s3 attributes for getting destination key
    s3.bucket_name = dest_bucket
    s3.path_list[0] = dest_path
    for obj_version in obj_versions:
        s3_key = s3.get_s3_destination_key(obj_version.get("Key", ""))
        print("(dryrun) copy: s3://%s/%s to s3://%s/%s with version %s" % (
            target_bucket,
            obj_version.get("Key"),
            dest_bucket,
            s3_key,
            obj_version.get("VersionId"),
        ))

    if get_confirmation("Confirm?"):
        for obj_version in obj_versions:
            s3_key = s3.get_s3_destination_key(obj_version.get("Key", ""))
            print("copy: s3://%s/%s to s3://%s/%s with version %s" % (
                target_bucket,
                obj_version.get("Key"),
                dest_bucket,
                s3_key,
                obj_version.get("VersionId"),
            ))
            copy_source = {
                "Bucket": target_bucket,
                "Key": obj_version.get("Key"),
                "VersionId": obj_version.get("VersionId"),
            }
            if not preserve:
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    dest_bucket,
                    s3_key,
                    Callback=S3Progress(
                        obj_version.get("Key", ""),
                        target_bucket,
                        s3.client,
                        version_id=obj_version.get("VersionId"),
                    ),
                    Config=s3transferwrapper.transfer_config,
                )
            else:
                s3.bucket_name = target_bucket
                copy_and_preserve(
                    s3,
                    target_bucket,
                    obj_version.get("Key", ""),
                    dest_bucket,
                    s3_key,
                    version=obj_version.get("VersionId"),
                )
예제 #14
0
def upload_s3(
    profile: bool = False,
    bucket: str = None,
    local_paths: Optional[Union[str, list]] = None,
    recursive: bool = False,
    hidden: bool = False,
    search_root: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    extra_config: bool = False,
) -> None:
    """Upload local files/directories to s3.

    Upload through boto3 s3 client.
    Glob pattern exclude list are handled first then handle the include list.

    :param profile: profile to use for this operation
    :type profile: bool, optional
    :param bucket: specify bucket to upload
    :type bucket: str, optional
    :param local_paths: local file paths to upload
    :type local_paths: list, optional
    :param recursive: upload directory
    :type recursive: bool, optional
    :param hidden: include hidden files during search
    :type hidden: bool, optional
    :param search_root: search from root
    :type search_root: bool, optional
    :param sync: use aws cli s3 sync
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param extra_config: configure extra settings during upload
    :type extra_config: bool, optional
    """
    if not local_paths:
        local_paths = []
    if not exclude:
        exclude = []
    if not include:
        include = []

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if not s3.path_list[0]:
        s3.set_s3_path()

    if not local_paths:
        fzf = Pyfzf()
        recursive = True if recursive or sync else False
        # don't allow multi_select for recursive operation
        multi_select = True if not recursive else False
        local_paths = fzf.get_local_file(
            search_from_root=search_root,
            directory=recursive,
            hidden=hidden,
            multi_select=multi_select,
        )

    # get the first item from the array since recursive operation doesn't support multi_select
    # local_path is used for sync and recursive operation
    # local_paths is used for single file operation
    if isinstance(local_paths, list):
        local_path = str(local_paths[0])
    else:
        local_path = str(local_paths)

    # construct extra argument
    extra_args = S3Args(s3)
    if extra_config:
        extra_args.set_extra_args(upload=True)

    if sync:
        sync_s3(
            exclude=exclude,
            include=include,
            from_path=local_path,
            to_path="s3://%s/%s" % (s3.bucket_name, s3.path_list[0]),
        )

    elif recursive:
        recursive_upload(s3, local_path, exclude, include, extra_args)

    else:
        for filepath in local_paths:
            # get the formated s3 destination
            destination_key = s3.get_s3_destination_key(filepath)
            print("(dryrun) upload: %s to s3://%s/%s" %
                  (filepath, s3.bucket_name, destination_key))

        if get_confirmation("Confirm?"):
            for filepath in local_paths:
                destination_key = s3.get_s3_destination_key(filepath)
                print("upload: %s to s3://%s/%s" %
                      (filepath, s3.bucket_name, destination_key))
                transfer = S3TransferWrapper(s3.client)
                transfer.s3transfer.upload_file(
                    filepath,
                    s3.bucket_name,
                    destination_key,
                    callback=S3Progress(filepath),
                    extra_args=extra_args.extra_args,
                )
예제 #15
0
def download_s3(
    profile: Union[str, bool] = False,
    bucket: str = None,
    local_path: str = None,
    recursive: bool = False,
    search_from_root: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    hidden: bool = False,
    version: bool = False,
) -> None:
    """Download files/'directory' from s3.

    Handles sync, download file and download recursive from a s3 bucket.
    Glob pattern are first handled through exclude list and then include list.

    :param profile: profile to use for this operation
    :type profile: bool, optional
    :param bucket: specify bucket to download
    :type bucket: str, optional
    :param local_paths: local file path for download
    :type local_paths: str, optional
    :param recursive: download s3 directory
    :type recursive: bool, optional
    :param search_root: search from root
    :type search_root: bool, optional
    :param sync: use aws cli s3 sync
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param hidden: include hidden files during search
    :type hidden: bool, optional
    :param version: download version object
    :type version: bool, optional
    """
    if not exclude:
        exclude = []
    if not include:
        include = []

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if recursive or sync:
        if not s3.path_list[0]:
            s3.set_s3_path(download=True)
    else:
        if not s3.path_list[0]:
            s3.set_s3_object(multi_select=True, version=version)

    obj_versions: List[Dict[str, str]] = []
    if version:
        obj_versions = s3.get_object_version()

    if not local_path:
        fzf = Pyfzf()
        local_path = str(
            fzf.get_local_file(search_from_root, directory=True,
                               hidden=hidden))

    if sync:
        sync_s3(
            exclude=exclude,
            include=include,
            from_path="s3://%s/%s" % (s3.bucket_name, s3.path_list[0]),
            to_path=local_path,
        )
    elif recursive:
        download_recusive(s3, exclude, include, local_path)

    elif version:
        download_version(s3, obj_versions, local_path)

    else:
        for s3_path in s3.path_list:
            destination_path = os.path.join(local_path,
                                            os.path.basename(s3_path))
            # due the fact without recursive flag s3.path_list[0] is set by s3.set_s3_object
            # the bucket_path is the valid s3 key so we don't need to call s3.get_s3_destination_key
            print("(dryrun) download: s3://%s/%s to %s" %
                  (s3.bucket_name, s3_path, destination_path))
        if get_confirmation("Confirm?"):
            for s3_path in s3.path_list:
                destination_path = os.path.join(local_path,
                                                os.path.basename(s3_path))
                print("download: s3://%s/%s to %s" %
                      (s3.bucket_name, s3_path, destination_path))
                transfer = S3TransferWrapper(s3.client)
                transfer.s3transfer.download_file(
                    s3.bucket_name,
                    s3_path,
                    destination_path,
                    callback=S3Progress(s3_path, s3.bucket_name, s3.client),
                )