def _add_additional_subscribers(self, subscribers, fileinfo):
     subscribers.append(ProvideSizeSubscriber(fileinfo.size))
     if self._should_inject_content_type():
         subscribers.append(ProvideCopyContentTypeSubscriber())
     if self._cli_params.get('is_move', False):
         subscribers.append(DeleteCopySourceObjectSubscriber(
             fileinfo.source_client))
 def _add_additional_subscribers(self, subscribers, fileinfo):
     subscribers.append(ProvideSizeSubscriber(fileinfo.size))
     subscribers.append(DirectoryCreatorSubscriber())
     subscribers.append(ProvideLastModifiedTimeSubscriber(
         fileinfo.last_update, self._result_queue))
     if self._cli_params.get('is_move', False):
         subscribers.append(DeleteSourceObjectSubscriber(
             fileinfo.source_client))
Exemple #3
0
    def _upload(self, manager, bucket, key):
        """
        Upload stdin using to the specified location.

        :type manager: s3transfer.manager.TransferManager
        :param manager: The transfer manager to use for the upload.

        :type bucket: str
        :param bucket: The bucket to upload the stream to.

        :type key: str
        :param key: The name of the key to upload the stream to.

        :return: A CommandResult representing the upload status.
        """
        expected_size = self.params.get('expected_size', None)
        subscribers = None
        if expected_size is not None:
            # `expected_size` comes in as a string
            expected_size = int(expected_size)

            # set the size of the transfer if we know it ahead of time.
            subscribers = [ProvideSizeSubscriber(expected_size)]

            # TODO: remove when this happens in s3transfer
            # If we have the expected size, we can calculate an appropriate
            # chunksize based on max parts and chunksize limits
            chunksize = find_chunksize(expected_size,
                                       self.config.multipart_chunksize)
        else:
            # TODO: remove when this happens in s3transfer
            # Otherwise, we can still adjust for chunksize limits
            chunksize = adjust_chunksize_to_upload_limits(
                self.config.multipart_chunksize)
        self.config.multipart_chunksize = chunksize

        params = {}
        RequestParamsMapper.map_put_object_params(params, self.params)

        fileobj = NonSeekableStream(binary_stdin)
        with manager:
            future = manager.upload(fileobj=fileobj,
                                    bucket=bucket,
                                    key=key,
                                    extra_args=params,
                                    subscribers=subscribers)

            return self._process_transfer(future)
Exemple #4
0
 def test_size_set(self):
     self.transfer_meta.provide_transfer_size(5)
     subscriber = ProvideSizeSubscriber(10)
     subscriber.on_queued(self.transfer_future)
     self.assertEqual(self.transfer_meta.size, 10)
 def test_size_set(self):
     self.transfer_meta.provide_transfer_size(5)
     subscriber = ProvideSizeSubscriber(10)
     subscriber.on_queued(self.transfer_future)
     self.assertEqual(self.transfer_meta.size, 10)
 def _add_additional_subscribers(self, subscribers, fileinfo):
     expected_size = self._cli_params.get('expected_size', None)
     if expected_size is not None:
         subscribers.append(ProvideSizeSubscriber(int(expected_size)))
Exemple #7
0
 def _add_additional_subscribers(self, subscribers, fileinfo):
     subscribers.append(ProvideSizeSubscriber(fileinfo.size))
     if self._should_inject_content_type():
         subscribers.append(ProvideCopyContentTypeSubscriber())
Exemple #8
0
 def _add_additional_subscribers(self, subscribers, fileinfo):
     subscribers.append(ProvideSizeSubscriber(fileinfo.size))
     subscribers.append(DirectoryCreatorSubscriber())
     subscribers.append(
         ProvideLastModifiedTimeSubscriber(fileinfo.last_update,
                                           self._result_queue))