예제 #1
0
 def test_get_confirmation(self, mocked_input):
     mocked_input.return_value = "y"
     response = get_confirmation("Confirm?")
     self.assertTrue(response)
     mocked_input.return_value = "n"
     response = get_confirmation("Confirm?")
     self.assertFalse(response)
예제 #2
0
def terminate_instance(
    profile: Union[str, bool] = False,
    region: Union[str, bool] = False,
    wait: bool = False,
) -> None:
    """Terminate the instance.

    :param profile: profile to use for this operation
    :type profile: Union[bool, str]
    :param region: region to use for this operation
    :type region: Union[bool, str]
    :param wait: wait for instance to be terminated
    :type wait: bool, optional
    """
    ec2 = EC2(profile, region)
    ec2.set_ec2_instance()

    ec2.print_instance_details()
    if get_confirmation("Above instance will be terminated, continue?"):
        response = ec2.client.terminate_instances(InstanceIds=ec2.instance_ids)
        response.pop("ResponseMetadata", None)
        print(json.dumps(response, indent=4, default=str))
        print(80 * "-")
        print("Instance termination initiated")

        if wait:
            ec2.wait("instance_terminated",
                     "Wating for instance to be terminated ...")
            print("Instance terminated")
예제 #3
0
def update_object_version(
    s3: S3,
    allversion: bool = False,
    acl: bool = False,
    tagging: bool = False,
) -> None:
    """Update versions of object's attributes.

    Note: this operation only allow update of acl and tagging, because
    this won't introduce new objects.

    :param s3: S3 instance
    :type s3: S3
    :param allversion: update all versions?
    :type allversion: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    """
    obj_versions = s3.get_object_version(select_all=allversion)
    s3_args = S3Args(s3)
    s3_args.set_extra_args(acl, tagging, version=obj_versions)
    # check if only tags or acl is being updated
    # this way it won't create extra versions on the object
    check_result = s3_args.check_tag_acl()

    for obj_version in obj_versions:
        print(
            "(dryrun) update: s3://%s/%s with version %s"
            % (s3.bucket_name, obj_version.get("Key"), obj_version.get("VersionId"))
        )
    if get_confirmation("Confirm?"):
        for obj_version in obj_versions:
            print(
                "update: s3://%s/%s with version %s"
                % (
                    s3.bucket_name,
                    obj_version.get("Key"),
                    obj_version.get("VersionId"),
                )
            )
            if check_result:
                if check_result.get("Tags"):
                    s3.client.put_object_tagging(
                        Bucket=s3.bucket_name,
                        Key=obj_version.get("Key"),
                        VersionId=obj_version.get("VersionId"),
                        Tagging={"TagSet": check_result.get("Tags")},
                    )
                if check_result.get("Grants"):
                    grant_args = {
                        "Bucket": s3.bucket_name,
                        "Key": obj_version.get("Key"),
                        "VersionId": obj_version.get("VersionId"),
                    }
                    grant_args.update(check_result.get("Grants", {}))
                    s3.client.put_object_acl(**grant_args)
            else:
                print("Nothing to update")
예제 #4
0
    def execute_with_capabilities(
        self, cloudformation_action: Callable[..., Dict[str, Any]] = None, **kwargs
    ) -> Dict[str, Any]:
        """Execute the cloudformation_action with capabilities handled.

        When creating stacks with IAM role or nested stacks related, cloudformation
        require extra capabilities to be acknowledged before creating or updating the stack.
        This method will attempt to submit the request and handle the capabilities
        exceptions, then calling the _get_capabilities to get user acknowledge the capabilities.

        :param cloudformation_action: the function to execute for boto3
        :type cloudformation_action: Callable[..., Dict[str, Any]]
        :raises InsufficientCapabilitiesException: when the stack action require extra acknowledgement
        :return: boto3 response of the cloudformation action
        :rtype: Dict[str, Any]
        """
        try:
            print(json.dumps({**kwargs}, indent=4, default=str))
            if get_confirmation("Confirm?"):
                response = cloudformation_action(**kwargs)
            else:
                sys.exit(1)
        except self.client.exceptions.InsufficientCapabilitiesException as e:
            pattern = r"^.*(Requires capabilities.*)$"
            error_msg = re.match(pattern, str(e)).group(1)
            response = cloudformation_action(
                **kwargs, Capabilities=self._get_capabilities(message=error_msg)
            )
        return response
예제 #5
0
def stop_instance(
    profile: Union[str, bool] = False,
    region: Union[str, bool] = False,
    hibernate: bool = False,
    wait: bool = False,
) -> None:
    """Stop the selected instance.

    :param profile: profile to use for this operation
    :type profile: Union[bool, str], optional
    :param region: region to use for this operation
    :type region: Union[bool, str], optional
    :param hibernate: stop hibernate if instance support hibernate
    :type hibernate: bool, optional
    :param wait: wait for instance to be stopped
    :type wait: bool, optional
    """
    ec2 = EC2(profile, region)
    ec2.set_ec2_instance()

    ec2.print_instance_details()
    if get_confirmation("Above instance will be stopped, continue?"):
        response = ec2.client.stop_instances(InstanceIds=ec2.instance_ids,
                                             Hibernate=hibernate)
        response.pop("ResponseMetadata", None)
        print(json.dumps(response, indent=4, default=str))
        print(80 * "-")
        print("Instance stop initiated")

        if wait:
            ec2.wait("instance_stopped",
                     "Wating for instance to be stopped ...")
            print("Instance stopped")
예제 #6
0
def delete_stack(
    profile: Union[str, bool] = False,
    region: Union[str, bool] = False,
    wait: bool = False,
    iam: Union[str, bool] = False,
) -> None:
    """Handle deletion of the stack.

    Two situation, normal deletion and retained deletion.
    When the selected stack is already in a 'DELETE_FAILED' state, extra
    fzf operation would be triggered for user to select logical id to retain
    in order for deletion to be success.

    :param profile: use a different profile for this operation
    :type profile: Union[str, bool], optional
    :param region: use a different region for this operation
    :type region: Union[str, bool], optional
    :param wait: pause the function and wait for stack delete complete
    :type wait: bool, optional
    :param iam: specify a iam arn to delete this stack
    :type iam: Union[str, bool]
    """
    cloudformation = Cloudformation(profile, region)
    cloudformation.set_stack()

    logical_id_list: List[str] = []
    if cloudformation.stack_details["StackStatus"] == "DELETE_FAILED":
        header: str = "stack is in the failed state, specify any resource to skip during deletion"
        logical_id_list = cloudformation.get_stack_resources(empty_allow=True,
                                                             header=header)

    cloudformation_args: Dict[str, Any] = {
        "StackName": cloudformation.stack_name
    }
    if logical_id_list:
        cloudformation_args["RetainResources"] = logical_id_list

    if iam and type(iam) == str:
        cloudformation_args["RoleARN"] = iam
    elif iam and type(iam) == bool:
        iam_instance = IAM(profile=cloudformation.profile)
        iam_instance.set_arns(
            header=
            "select a iam role with permissions to delete the current stack",
            service="cloudformation.amazonaws.com",
        )
        if iam_instance.arns[0]:
            cloudformation_args["RoleARN"] = iam_instance.arns[0]

    if not get_confirmation("Are you sure you want to delete the stack '%s'?" %
                            cloudformation.stack_name):
        sys.exit(1)

    cloudformation.client.delete_stack(**cloudformation_args)
    print("Stack deletion initiated")

    if wait:
        cloudformation.wait("stack_delete_complete",
                            "Wating for stack to be deleted ...")
        print("Stack deleted")
예제 #7
0
def recursive_copy(
    s3: S3,
    target_bucket: str,
    target_path: str,
    dest_bucket: str,
    dest_path: str,
    exclude: List[str],
    include: List[str],
    preserve: bool,
) -> None:
    """Recursive copy object to other bucket.

    :param s3: S3 instance
    :type s3: S3
    :param target_bucket: source bucket
    :type target_bucket: str
    :param target_path: source folder path
    :type target_path: str
    :param dest_bucket: destination bucket
    :type dest_bucket: str
    :param dest_path: dest folder path
    :type dest_path: str
    :param exclude: glob pattern to exclude
    :type exclude: List[str]
    :param include: glob pattern to include
    :type include: List[str]
    :param preserve: preserve previous object config
    :type preserve: bool
    """
    file_list = walk_s3_folder(
        s3.client,
        target_bucket,
        target_path,
        target_path,
        [],
        exclude,
        include,
        "bucket",
        dest_path,
        dest_bucket,
    )

    if get_confirmation("Confirm?"):
        for s3_key, dest_pathname in file_list:
            print("copy: s3://%s/%s to s3://%s/%s" %
                  (target_bucket, s3_key, dest_bucket, dest_pathname))
            copy_source = {"Bucket": target_bucket, "Key": s3_key}
            if not preserve:
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    dest_bucket,
                    dest_pathname,
                    Callback=S3Progress(s3_key, target_bucket, s3.client),
                    Config=s3transferwrapper.transfer_config,
                )
            else:
                s3.bucket_name = target_bucket
                copy_and_preserve(s3, target_bucket, s3_key, dest_bucket,
                                  dest_pathname)
예제 #8
0
def sync_s3(
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    from_path: str = "",
    to_path: str = "",
) -> None:
    """Sync from_path with to_path using awscli.

    Utilizing subprocess to call aws cli s3 sync, as boto3 doesn't provide
    way to have sync operation.

    May try to implement the sync myself using S3 ETag calculation to compare
    file, require more time and benchmark to see time difference.

    For now, sync is the only process require aws cli to be installed.

    :param exclude: list of files to exclude
    :type exclude: List[str], Optional
    :param include: list of files to explicit include
    :type include: List[str], Optional
    :param from_path: orignal file location
    :type from_path: str
    :param to_path: destination file location
    :type to_path: str
    :raises InvalidS3PathPattern: when the from_path and to_path is empty
    """
    if not from_path or not to_path:
        raise InvalidS3PathPattern(
            "Invalid S3 path pattern for sync, example: s3://bucketname/path/")

    if not exclude:
        exclude = []
    if not include:
        include = []
    # add in the exclude flag and include flag into the command list
    exclude_list: List[str] = []
    include_list: List[str] = []
    for pattern in exclude:
        if not exclude_list:
            exclude_list.append("--exclude")
        exclude_list.append(pattern)
    for pattern in include:
        if not include_list:
            include_list.append("--include")
        include_list.append(pattern)

    cmd_list: List[str] = ["aws", "s3", "sync", from_path, to_path]
    cmd_list.extend(exclude_list)
    cmd_list.extend(include_list)
    cmd_list.append("--dryrun")

    sync_dry = subprocess.Popen(cmd_list)
    sync_dry.communicate()
    if get_confirmation("Confirm?"):
        # remove the dryrun flag and actually invoke it
        cmd_list.pop()
        sync = subprocess.Popen(cmd_list)
        sync.communicate()
        print("%s synced with %s" % (from_path, to_path))
예제 #9
0
def recursive_upload(s3: S3, local_path: str, exclude: List[str],
                     include: List[str], extra_args: S3Args) -> None:
    """Recursive upload local directory to s3.

    Perform a os.walk to upload everyfile under a directory.

    :param s3: S3 instance
    :type s3: S3
    :param local_path: local directory
    :type local_path: str
    :param exclude: glob pattern to exclude
    :type exclude: List[str]
    :param include: glob pattern to include
    :type include: List[str]
    :param extra_args: S3Args instance to set extra argument
    :type extra_args: S3Args
    """
    upload_list: List[Dict[str, str]] = []
    for root, _, files in os.walk(local_path):
        for filename in files:
            full_path = os.path.join(root, filename)
            relative_path = os.path.relpath(full_path, local_path)

            if not exclude_file(exclude, include, relative_path):
                destination_key = s3.get_s3_destination_key(relative_path,
                                                            recursive=True)
                print("(dryrun) upload: %s to s3://%s/%s" %
                      (relative_path, s3.bucket_name, destination_key))
                upload_list.append({
                    "local_path": full_path,
                    "bucket": s3.bucket_name,
                    "key": destination_key,
                    "relative": relative_path,
                })

    if get_confirmation("Confirm?"):
        for item in upload_list:
            print("upload: %s to s3://%s/%s" %
                  (item["relative"], item["bucket"], item["key"]))
            transfer = S3TransferWrapper(s3.client)
            transfer.s3transfer.upload_file(
                item["local_path"],
                item["bucket"],
                item["key"],
                callback=S3Progress(item["local_path"]),
                extra_args=extra_args.extra_args,
            )
예제 #10
0
def reboot_instance(profile: Union[str, bool] = False,
                    region: Union[str, bool] = False) -> None:
    """Reboot the selected instances.

    :param profile: profile to use for this operation
    :type profile: Union[bool, str], optional
    :param region: region to use for this operation
    :type region: Union[bool, str], optional
    """
    ec2 = EC2(profile, region)
    ec2.set_ec2_instance()

    ec2.print_instance_details()
    if get_confirmation("Above instance will be rebooted, continue?"):
        ec2.client.reboot_instances(InstanceIds=ec2.instance_ids)
        print(80 * "-")
        print("Instance in being placed in the reboot queue")
        print("It may take aws up to 4mins before it is rebooted")
        print("Instance will remain in running state")
예제 #11
0
def start_instance(
    profile: Union[str, bool] = False,
    region: Union[str, bool] = False,
    wait: bool = False,
    check: bool = False,
) -> None:
    """Start the selected instance.

    :param profile: profile to use for this operation
    :type profile: Union[bool, str], optional
    :param region: region to use for this operation
    :type region: Union[bool, str], optional
    :param wait: wait for instance start
    :type wait: bool, optional
    :param check: wait for all checks to be finished
    :type check: bool, optional
    """
    ec2 = EC2(profile, region)
    ec2.set_ec2_instance()

    ec2.print_instance_details()
    if get_confirmation("Above instance will be started, continue?"):
        response = ec2.client.start_instances(InstanceIds=ec2.instance_ids, )
        response.pop("ResponseMetadata", None)
        print(json.dumps(response, indent=4, default=str))
        print(80 * "-")
        print("Instance start initiated")

        if check:
            print(
                "Wating for instance to be running and 2/2 status checked ...")
            ec2.wait("instance_status_ok")
            print("Instance is ready")
        elif wait:
            ec2.wait("instance_running",
                     "Wating for instance to be running ...")
            print("Instance is running")
예제 #12
0
def upload_s3(
    profile: bool = False,
    bucket: str = None,
    local_paths: Optional[Union[str, list]] = None,
    recursive: bool = False,
    hidden: bool = False,
    search_root: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    extra_config: bool = False,
) -> None:
    """Upload local files/directories to s3.

    Upload through boto3 s3 client.
    Glob pattern exclude list are handled first then handle the include list.

    :param profile: profile to use for this operation
    :type profile: bool, optional
    :param bucket: specify bucket to upload
    :type bucket: str, optional
    :param local_paths: local file paths to upload
    :type local_paths: list, optional
    :param recursive: upload directory
    :type recursive: bool, optional
    :param hidden: include hidden files during search
    :type hidden: bool, optional
    :param search_root: search from root
    :type search_root: bool, optional
    :param sync: use aws cli s3 sync
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param extra_config: configure extra settings during upload
    :type extra_config: bool, optional
    """
    if not local_paths:
        local_paths = []
    if not exclude:
        exclude = []
    if not include:
        include = []

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if not s3.path_list[0]:
        s3.set_s3_path()

    if not local_paths:
        fzf = Pyfzf()
        recursive = True if recursive or sync else False
        # don't allow multi_select for recursive operation
        multi_select = True if not recursive else False
        local_paths = fzf.get_local_file(
            search_from_root=search_root,
            directory=recursive,
            hidden=hidden,
            multi_select=multi_select,
        )

    # get the first item from the array since recursive operation doesn't support multi_select
    # local_path is used for sync and recursive operation
    # local_paths is used for single file operation
    if isinstance(local_paths, list):
        local_path = str(local_paths[0])
    else:
        local_path = str(local_paths)

    # construct extra argument
    extra_args = S3Args(s3)
    if extra_config:
        extra_args.set_extra_args(upload=True)

    if sync:
        sync_s3(
            exclude=exclude,
            include=include,
            from_path=local_path,
            to_path="s3://%s/%s" % (s3.bucket_name, s3.path_list[0]),
        )

    elif recursive:
        recursive_upload(s3, local_path, exclude, include, extra_args)

    else:
        for filepath in local_paths:
            # get the formated s3 destination
            destination_key = s3.get_s3_destination_key(filepath)
            print("(dryrun) upload: %s to s3://%s/%s" %
                  (filepath, s3.bucket_name, destination_key))

        if get_confirmation("Confirm?"):
            for filepath in local_paths:
                destination_key = s3.get_s3_destination_key(filepath)
                print("upload: %s to s3://%s/%s" %
                      (filepath, s3.bucket_name, destination_key))
                transfer = S3TransferWrapper(s3.client)
                transfer.s3transfer.upload_file(
                    filepath,
                    s3.bucket_name,
                    destination_key,
                    callback=S3Progress(filepath),
                    extra_args=extra_args.extra_args,
                )
예제 #13
0
def update_object_name(s3: S3, version: bool = False) -> None:
    """Update object name.

    :param s3: S3 class instance
    :type s3: S3
    :param version: whether to rename version's name, this will create a new object
    :type version: bool, optional
    """
    print(
        "Enter the new name below (format: newname or path/newname for a new path)"
    )
    new_name = input("Name(Orignal: %s): " % s3.path_list[0])

    if not version:
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s" %
              (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s" %
                  (s3.bucket_name, s3.path_list[0], s3.bucket_name, new_name))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(s3,
                                             s3.path_list[0],
                                             s3_args,
                                             extra_args=True)
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": s3.path_list[0],
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(s3.path_list[0], s3.bucket_name,
                                    s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            s3.client.delete_object(
                Bucket=s3.bucket_name,
                Key=s3.path_list[0],
            )

    else:
        # get version
        obj_version = s3.get_object_version(key=s3.path_list[0])[0]
        print("(dryrun) rename: s3://%s/%s to s3://%s/%s with version %s" % (
            s3.bucket_name,
            obj_version.get("Key"),
            s3.bucket_name,
            new_name,
            obj_version.get("VersionId"),
        ))
        if get_confirmation("Confirm?"):
            print("rename: s3://%s/%s to s3://%s/%s with version %s" % (
                s3.bucket_name,
                obj_version.get("Key"),
                s3.bucket_name,
                new_name,
                obj_version.get("VersionId"),
            ))
            # initialise empty s3_args so that get_copy_args will use all the original value
            s3_args = S3Args(s3)
            copy_object_args = get_copy_args(
                s3,
                s3.path_list[0],
                s3_args,
                extra_args=True,
                version=obj_version.get("VersionId"),
            )
            copy_source = {
                "Bucket": s3.bucket_name,
                "Key": obj_version.get("Key"),
                "VersionId": obj_version.get("VersionId"),
            }
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                s3.bucket_name,
                new_name,
                Callback=S3Progress(
                    obj_version.get("Key", ""),
                    s3.bucket_name,
                    s3.client,
                    version_id=obj_version.get("VersionId"),
                ),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
예제 #14
0
def update_object_recursive(
    s3: S3,
    storage: bool = False,
    acl: bool = False,
    metadata: bool = False,
    encryption: bool = False,
    tagging: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
) -> None:
    """Recursive update object attributes.

    :param s3: S3 class instance
    :type s3: S3
    :param storage: update storage
    :type storage: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    s3_args = S3Args(s3)
    s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
    # check if only tags or acl is being updated
    # this way it won't create extra versions on the object
    check_result = s3_args.check_tag_acl()

    file_list = walk_s3_folder(
        s3.client,
        s3.bucket_name,
        s3.path_list[0],
        s3.path_list[0],
        [],
        exclude,
        include,
        "object",
        s3.path_list[0],
        s3.bucket_name,
    )
    if get_confirmation("Confirm?"):
        if check_result:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                if check_result.get("Tags"):
                    s3.client.put_object_tagging(
                        Bucket=s3.bucket_name,
                        Key=original_key,
                        Tagging={"TagSet": check_result.get("Tags")},
                    )
                if check_result.get("Grants"):
                    grant_args = {
                        "Bucket": s3.bucket_name,
                        "Key": original_key
                    }
                    grant_args.update(check_result.get("Grants", {}))
                    s3.client.put_object_acl(**grant_args)

        else:
            for original_key, _ in file_list:
                print("update: s3://%s/%s" % (s3.bucket_name, original_key))
                # Note: this will create new version if version is enabled
                copy_object_args = get_copy_args(s3,
                                                 original_key,
                                                 s3_args,
                                                 extra_args=True)
                copy_source = {"Bucket": s3.bucket_name, "Key": original_key}
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    s3.bucket_name,
                    original_key,
                    Callback=S3Progress(original_key, s3.bucket_name,
                                        s3.client),
                    ExtraArgs=copy_object_args,
                    Config=s3transferwrapper.transfer_config,
                )
예제 #15
0
def bucket_s3(
    profile: bool = False,
    from_bucket: str = None,
    to_bucket: str = None,
    recursive: bool = False,
    sync: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    version: bool = False,
    preserve: bool = False,
) -> None:
    """Transfer file between buckets.

    Handle transfer file between buckets or even within the same bucket.
    Handle glob pattern through exclude list first than it will process the include to explicit include files.

    :param profile: use a different profile for this operation
    :type profile: str, optional
    :param from_bucket: source bucket
    :type from_bucket: str, optional
    :param to_bucket: destination bucket
    :type to_bucket: str, optional
    :param recursive: recursive copy a folder
    :type recursive: bool, optional
    :param sync: sync s3 buckets
    :type sync: bool, optional
    :param exclude: glob patterns to exclude
    :type exclude: List[str], optional
    :param include: glob patterns to include
    :type include: List[str], optional
    :param version: move object verions
    :type version: bool, optional
    :param perserve: save all object's config instead of using the new bucket's settings
    :type perserve: bool, optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    s3 = S3(profile)

    # initialise variables to avoid directly using s3 instance since processing 2 buckets
    target_bucket: str = ""
    target_path: str = ""
    target_path_list: List[str] = []
    dest_bucket: str = ""
    dest_path = ""
    obj_versions: List[Dict[str, str]] = []

    search_folder: bool = True if recursive or sync else False

    if from_bucket:
        target_bucket, target_path, target_path_list = process_path_param(
            from_bucket, s3, search_folder, version=version)
    else:
        s3.set_s3_bucket(
            header="set the source bucket which contains the file to transfer")
        target_bucket = s3.bucket_name
        if search_folder:
            s3.set_s3_path()
            target_path = s3.path_list[0]
        else:
            s3.set_s3_object(multi_select=True, version=version)
            target_path_list = s3.path_list[:]
    if version and not search_folder:
        obj_versions = s3.get_object_version()
    # clean up the s3 attributes for next operation
    s3.bucket_name = ""
    s3.path_list[0] = ""

    if to_bucket:
        dest_bucket, dest_path, _ = process_path_param(to_bucket, s3, True)
    else:
        s3.set_s3_bucket(
            header=
            "set the destination bucket where the file should be transfered")
        s3.set_s3_path()
        dest_bucket = s3.bucket_name
        dest_path = s3.path_list[0]

    if sync:
        sync_s3(
            exclude,
            include,
            "s3://%s/%s" % (target_bucket, target_path),
            "s3://%s/%s" % (dest_bucket, dest_path),
        )
    elif recursive:
        recursive_copy(
            s3,
            target_bucket,
            target_path,
            dest_bucket,
            dest_path,
            exclude,
            include,
            preserve,
        )

    elif version:
        copy_version(
            s3,
            dest_bucket,
            dest_path,
            obj_versions,
            target_bucket,
            target_path,
            preserve,
        )

    else:
        # set the s3 instance name and path the destination bucket
        s3.bucket_name = dest_bucket
        s3.path_list[0] = dest_path
        for target_path in target_path_list:
            # process the target key path and get the destination key path
            s3_key = s3.get_s3_destination_key(target_path)
            print("(dryrun) copy: s3://%s/%s to s3://%s/%s" %
                  (target_bucket, target_path, dest_bucket, s3_key))
        if get_confirmation("Confirm?"):
            for target_path in target_path_list:
                s3_key = s3.get_s3_destination_key(target_path)
                print("copy: s3://%s/%s to s3://%s/%s" %
                      (target_bucket, target_path, dest_bucket, s3_key))
                copy_source = {"Bucket": target_bucket, "Key": target_path}
                if not preserve:
                    s3transferwrapper = S3TransferWrapper()
                    s3.client.copy(
                        copy_source,
                        dest_bucket,
                        s3_key,
                        Callback=S3Progress(target_path, target_bucket,
                                            s3.client),
                        Config=s3transferwrapper.transfer_config,
                    )
                else:
                    s3.bucket_name = target_bucket
                    copy_and_preserve(s3, target_bucket, target_path,
                                      dest_bucket, s3_key)
예제 #16
0
    def _set_explicit_ACL(
            self,
            original: bool = False,
            version: Optional[List[Dict[str, str]]] = None) -> None:
        """Set explicit ACL for grantees and permissions.

        Get user id/email first than display fzf allow multi_select
        to select permissions

        example version: [{"Key", key, "VersionId": versionid}]

        :param original: display original value
        :type original: bool, optional
        :param version: version of the object
        :type version: List[Dict[str, str]], optional
        """
        original_acl: Dict[str, List[str]] = {
            "FULL_CONTROL": [],
            "WRITE_ACP": [],
            "READ": [],
            "READ_ACP": [],
        }

        # get original values
        if original:
            acls = None
            if not version:
                acls = self.s3.client.get_object_acl(
                    Bucket=self.s3.bucket_name, Key=self.s3.path_list[0])
            elif len(version) == 1:
                acls = self.s3.client.get_object_acl(
                    Bucket=self.s3.bucket_name,
                    Key=self.s3.path_list[0],
                    VersionId=version[0].get("VersionId"),
                )
            if acls:
                owner = acls["Owner"]["ID"]
                for grantee in acls.get("Grants", []):
                    if grantee["Grantee"].get("EmailAddress"):
                        original_acl[grantee["Permission"]].append(
                            "%s=%s" % ("emailAddress",
                                       grantee["Grantee"].get("EmailAddress")))
                    elif (grantee["Grantee"].get("ID")
                          and grantee["Grantee"].get("ID") != owner):
                        original_acl[grantee["Permission"]].append(
                            "%s=%s" % ("id", grantee["Grantee"].get("ID")))
                    elif grantee["Grantee"].get("URI"):
                        original_acl[grantee["Permission"]].append(
                            "%s=%s" % ("uri", grantee["Grantee"].get("URI")))

                print("Current ACL:")
                print(json.dumps(original_acl, indent=4, default=str))
                print("Note: fzf.aws cannot preserve previous ACL permission")
                if not get_confirmation("Continue?"):
                    return

        # get what permission to set
        fzf = Pyfzf()
        fzf.append_fzf("GrantFullControl\n")
        fzf.append_fzf("GrantRead\n")
        fzf.append_fzf("GrantReadACP\n")
        fzf.append_fzf("GrantWriteACP\n")
        results: List[str] = list(
            fzf.execute_fzf(empty_allow=True, print_col=1, multi_select=True))
        if not results:
            print(
                "No permission is set, default ACL settings of the bucket would be used"
            )
        else:
            for result in results:
                print("Set permisstion for %s" % result)
                print(
                    "Enter a list of either the Canonical ID, Account email, Predefined Group url to grant permission (Seperate by comma)"
                )
                print(
                    "Format: id=XXX,id=XXX,[email protected],uri=http://acs.amazonaws.com/groups/global/AllUsers"
                )
                if original:
                    print(80 * "-")
                    if result == "GrantFullControl" and original_acl.get(
                            "FULL_CONTROL"):
                        print("Orignal: %s" %
                              ",".join(original_acl.get("FULL_CONTROL", [])))
                    elif result == "GrantRead" and original_acl.get("READ"):
                        print("Orignal: %s" %
                              ",".join(original_acl.get("READ", [])))
                    elif result == "GrantReadACP" and original_acl.get(
                            "READ_ACP"):
                        print("Orignal: %s" %
                              ",".join(original_acl.get("READ_ACP", [])))
                    elif result == "GrantWriteACP" and original_acl.get(
                            "WRITE_ACP"):
                        print("Orignal: %s" %
                              ",".join(original_acl.get("WRITE_ACP", [])))
                accounts = input("Accounts: ")
                print(80 * "-")
                self._extra_args[result] = str(accounts)
예제 #17
0
def copy_and_preserve(
    s3: S3,
    target_bucket: str,
    target_path: str,
    dest_bucket: str,
    dest_path: str,
    version: str = None,
) -> None:
    """Copy object to other buckets and preserve previous details.

    :param s3: S3 instance, make sure contains bucket name
    :type s3: S3
    :param target_bucket: source bucket for upload
    :type target_bucket: str
    :param dest_bucket: destination bucket
    :type dest_bucket: str
    :param dest_path: destination key name
    :type dest_path: str
    :param version: versionID of the object
    :type version: str
    :raises ClientError: clienterror will raise when coping KMS encrypted file, handled internally
    """
    copy_source: Dict[str, str] = {"Bucket": target_bucket, "Key": target_path}
    if version:
        copy_source["VersionId"] = version
    s3_args = S3Args(s3)
    copy_object_args = get_copy_args(s3,
                                     target_path,
                                     s3_args,
                                     extra_args=True,
                                     version=version)

    # limit to one retry
    attempt_count: int = 0
    while attempt_count < 2:
        try:
            attempt_count += 1
            s3transferwrapper = S3TransferWrapper()
            s3.client.copy(
                copy_source,
                dest_bucket,
                dest_path,
                Callback=S3Progress(target_path, s3.bucket_name, s3.client),
                ExtraArgs=copy_object_args,
                Config=s3transferwrapper.transfer_config,
            )
            break
        except ClientError as e:
            error_pattern = r"^.*\((.*)\).*$"
            error_name = re.match(error_pattern, str(e)).group(1)
            if error_name == "AccessDenied":
                print(80 * "-")
                print(e)
                print(
                    "You may have ACL policies that enable public access but "
                    "the destination bucket is blocking all public access, " +
                    "you need to either uncheck 'block all public access' or update your object ACL settings "
                    +
                    "or try again without the -p flag or continue without preserving the ACL."
                )
                if not get_confirmation("Continue without preserving ACL?"):
                    raise
                copy_object_args.pop("GrantFullControl", None)
                copy_object_args.pop("GrantRead", None)
                copy_object_args.pop("GrantReadACP", None)
                copy_object_args.pop("GrantWriteACP", None)
            # # handle when kms encrypt object move to a bucket in different region
            elif error_name == "KMS.NotFoundException":
                copy_object_args["ServerSideEncryption"] = "AES256"
                copy_object_args.pop("SSEKMSKeyId", None)
            else:
                raise
예제 #18
0
def object_s3(
    profile: Union[str, bool] = False,
    bucket: str = None,
    recursive: bool = False,
    version: bool = False,
    allversion: bool = False,
    exclude: Optional[List[str]] = None,
    include: Optional[List[str]] = None,
    name: bool = False,
    storage: bool = False,
    encryption: bool = False,
    metadata: bool = False,
    tagging: bool = False,
    acl: bool = False,
) -> None:
    """Update selected object settings.

    Display a menu based on recursive and version requirement.
    If name is true, only handle rename.

    :param profile: use a different profile for this operation
    :type profile: Union[str, bool], optional
    :param bucket: bucket name that contains the object
    :type bucket: str, optional
    :param recursive: recursive update object attr
    :type recursive: bool, optional
    :param allversion: update all versions of a object
    :type allversion: bool, optional
    :param exclude: glob pattern to exclude
    :type exclude: List[str], optional
    :param include: glob pattern to include
    :type include: List[str], optional
    :param name: update name
    :type name: bool, optional
    :param storage: update storage
    :type storage: bool, optional
    :param encryption: update encryption
    :type encryption: bool, optional
    :param metadata: update metadata
    :type metadata: bool, optional
    :param tagging: update tagging
    :type tagging: bool, optional
    :param acl: update acl
    :type acl: bool, optional
    """
    if exclude is None:
        exclude = []
    if include is None:
        include = []

    if allversion:
        version = True

    s3 = S3(profile)
    s3.set_bucket_and_path(bucket)
    if not s3.bucket_name:
        s3.set_s3_bucket()
    if recursive and not s3.path_list[0]:
        s3.set_s3_path()
    elif name and not s3.path_list[0]:
        s3.set_s3_object(version)
    elif not s3.path_list[0]:
        s3.set_s3_object(version, multi_select=True)

    # handle rename
    if name:
        update_object_name(s3, version)

    elif recursive:
        update_object_recursive(s3, storage, acl, metadata, encryption,
                                tagging, exclude, include)

    elif version:
        update_object_version(s3, allversion, acl, tagging)

    else:
        # update single object
        s3_args = S3Args(s3)
        s3_args.set_extra_args(storage, acl, metadata, encryption, tagging)
        # check if only tags or acl is being updated
        # this way it won't create extra versions on the object, if versioning is enabled
        check_result = s3_args.check_tag_acl()

        for s3_key in s3.path_list:
            print("(dryrun) update: s3://%s/%s" % (s3.bucket_name, s3_key))
        if get_confirmation("Confirm?"):
            for s3_key in s3.path_list:
                print("update: s3://%s/%s" % (s3.bucket_name, s3_key))
                if check_result:
                    if check_result.get("Tags"):
                        s3.client.put_object_tagging(
                            Bucket=s3.bucket_name,
                            Key=s3_key,
                            Tagging={"TagSet": check_result.get("Tags")},
                        )
                    if check_result.get("Grants"):
                        grant_args = {"Bucket": s3.bucket_name, "Key": s3_key}
                        grant_args.update(check_result.get("Grants", {}))
                        s3.client.put_object_acl(**grant_args)

                else:
                    # Note: this will create new version if version is enabled
                    copy_object_args = get_copy_args(s3,
                                                     s3_key,
                                                     s3_args,
                                                     extra_args=True)
                    copy_source = {"Bucket": s3.bucket_name, "Key": s3_key}
                    s3transferwrapper = S3TransferWrapper()
                    s3.client.copy(
                        copy_source,
                        s3.bucket_name,
                        s3_key,
                        Callback=S3Progress(s3_key, s3.bucket_name, s3.client),
                        ExtraArgs=copy_object_args,
                        Config=s3transferwrapper.transfer_config,
                    )
예제 #19
0
def changeset_stack(
    profile: Union[str, bool] = False,
    region: Union[str, bool] = False,
    replace: bool = False,
    local_path: Union[str, bool] = False,
    root: bool = False,
    wait: bool = False,
    info: bool = False,
    execute: bool = False,
    delete: bool = False,
    extra: bool = False,
    bucket: str = None,
    version: Union[str, bool] = False,
) -> None:
    """Handle changeset actions.

    Main function to interacte with changeset, use argument
    to control the actions.

    This function is using update_stack to handle all the dirty
    works as both functions are processing cloudformation arguments
    and having the same arguments.

    :param profile: use a different profile for this operation
    :type profile: Union[bool, str], optional
    :param region: use a different region for this operation
    :type region: Union[bool, str], optional
    :param replace: replace the template during update
    :type replace: bool, optional
    :param local_path: Select a template from local machine
    :type local_path: Union[bool, str], optional
    :param root: Search local file from root directory
    :type root: bool, optional
    :param wait: wait for stack to be completed before exiting the program
    :type wait: bool, optional
    :param info: display result of a changeset
    :type info: bool, optional
    :param execute: execute changeset
    :type execute: bool, optional
    :param delete: delete changeset
    :type delete: bool, optional
    :param extra: configure extra options for the stack, (tags, IAM, termination protection etc..)
    :type extra: bool, optional
    :param bucket: specify a bucket/bucketpath to skip s3 selection
    :type bucket: str, optional
    :param version: use previous version of template in s3 bucket
    :type version: Union[bool, str], optional
    :raises NoNameEntered: If no changset name is entered
    """
    cloudformation = Cloudformation(profile, region)
    cloudformation.set_stack()

    # if not creating new changeset
    if info or execute or delete:
        fzf = Pyfzf()
        response: Dict[str, Any] = cloudformation.client.list_change_sets(
            StackName=cloudformation.stack_name)
        # get the changeset name
        fzf.process_list(
            response.get("Summaries", []),
            "ChangeSetName",
            "StackName",
            "ExecutionStatus",
            "Status",
            "Description",
        )

        if info:
            selected_changeset = str(fzf.execute_fzf())
            describe_changes(cloudformation, selected_changeset)

        # execute the change set
        elif execute:
            selected_changeset = fzf.execute_fzf()
            if get_confirmation("Execute changeset %s?" % selected_changeset):
                response = cloudformation.client.execute_change_set(
                    ChangeSetName=selected_changeset,
                    StackName=cloudformation.stack_name,
                )
                cloudformation.wait("stack_update_complete",
                                    "Wating for stack to be updated ...")
                print("Stack updated")

        elif delete:
            selected_changeset = fzf.execute_fzf(multi_select=True)
            for changeset in selected_changeset:
                print("(dryrun) Delete changeset %s" % changeset)
            if get_confirmation("Confirm?"):
                for changeset in selected_changeset:
                    cloudformation.client.delete_change_set(
                        ChangeSetName=changeset,
                        StackName=cloudformation.stack_name)

    else:
        changeset_name = input("Enter name of this changeset: ")
        if not changeset_name:
            raise NoNameEntered("No changeset name specified")
        changeset_description = input("Description: ")
        # since is almost same operation as update stack
        # let update_stack handle it, but return update details instead of execute
        cloudformation_args = update_stack(
            cloudformation.profile,
            cloudformation.region,
            replace,
            local_path,
            root,
            wait,
            extra,
            bucket,
            version,
            dryrun=True,
            cloudformation=cloudformation,
        )
        cloudformation_args[
            "cloudformation_action"] = cloudformation.client.create_change_set
        cloudformation_args["ChangeSetName"] = changeset_name
        if changeset_description:
            cloudformation_args["Description"] = changeset_description

        response = cloudformation.execute_with_capabilities(
            **cloudformation_args)

        response.pop("ResponseMetadata", None)
        print(json.dumps(response, indent=4, default=str))
        print(80 * "-")
        print("Changeset create initiated")

        if wait:
            cloudformation.wait(
                "change_set_create_complete",
                "Wating for changset to be created ...",
                ChangeSetName=changeset_name,
            )
            print("Changeset created")
            describe_changes(cloudformation, changeset_name)
예제 #20
0
def copy_version(
    s3: S3,
    dest_bucket: str,
    dest_path: str,
    obj_versions: List[Dict[str, str]],
    target_bucket: str,
    target_path: str,
    preserve: bool,
) -> None:
    """Copy versions of object to other bucket.

    :param s3: S3 instance
    :type s3: S3
    :param dest_bucket: the destination bucket to transfer the object
    :type dest_bucket: str
    :param dest_path: the destination path in the destination bucket
    :type dest_path: str
    :param obj_version: the selected versions through get_object_version()
    :type obj_version: List[Dict[str, str]]
    :param target_bucket: the bucket contains the object to transfer
    :type target_bucket: str
    :param target_path: the object path of the object to be transfered
    :type target_path: str
    :param preserve: preserve previous object details after transfer
    :type preserve: bool
    """
    # set s3 attributes for getting destination key
    s3.bucket_name = dest_bucket
    s3.path_list[0] = dest_path
    for obj_version in obj_versions:
        s3_key = s3.get_s3_destination_key(obj_version.get("Key", ""))
        print("(dryrun) copy: s3://%s/%s to s3://%s/%s with version %s" % (
            target_bucket,
            obj_version.get("Key"),
            dest_bucket,
            s3_key,
            obj_version.get("VersionId"),
        ))

    if get_confirmation("Confirm?"):
        for obj_version in obj_versions:
            s3_key = s3.get_s3_destination_key(obj_version.get("Key", ""))
            print("copy: s3://%s/%s to s3://%s/%s with version %s" % (
                target_bucket,
                obj_version.get("Key"),
                dest_bucket,
                s3_key,
                obj_version.get("VersionId"),
            ))
            copy_source = {
                "Bucket": target_bucket,
                "Key": obj_version.get("Key"),
                "VersionId": obj_version.get("VersionId"),
            }
            if not preserve:
                s3transferwrapper = S3TransferWrapper()
                s3.client.copy(
                    copy_source,
                    dest_bucket,
                    s3_key,
                    Callback=S3Progress(
                        obj_version.get("Key", ""),
                        target_bucket,
                        s3.client,
                        version_id=obj_version.get("VersionId"),
                    ),
                    Config=s3transferwrapper.transfer_config,
                )
            else:
                s3.bucket_name = target_bucket
                copy_and_preserve(
                    s3,
                    target_bucket,
                    obj_version.get("Key", ""),
                    dest_bucket,
                    s3_key,
                    version=obj_version.get("VersionId"),
                )