Пример #1
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")

        if self.file.name != "<stdin>":
                size_left = file_size = os.stat(self.file.name)[ST_SIZE]
                nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
                debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
        else:
            debug("MultiPart: Uploading from %s" % (self.file.name))

	self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024

        seq = 1
	if self.file.name != "<stdin>":
            while size_left > 0:
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = min(file_size - offset, self.chunk_size)
                size_left -= current_chunk_size
                labels = {
                    'source' : unicodise(self.file.name),
                    'destination' : unicodise(self.uri.uri()),
                    'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels)
                except:
                    error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
                    self.abort_upload()
                    raise
                seq += 1
        else:
            while True:
                buffer = self.file.read(self.chunk_size)
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = len(buffer)
                labels = {
                    'source' : unicodise(self.file.name),
                    'destination' : unicodise(self.uri.uri()),
                    'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                if len(buffer) == 0: # EOF
                    break
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels, buffer)
                except:
                    error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
                    self.abort_upload()
                    raise
                seq += 1

        debug("MultiPart: Upload finished: %d parts", seq - 1)
Пример #2
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")

        size_left = file_size = os.stat(self.file.name)[ST_SIZE]
        self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
        nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
        debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
        pool = Pool(1000)

        seq = 1
        while size_left > 0:
            offset = self.chunk_size * (seq - 1)
            current_chunk_size = min(file_size - offset, self.chunk_size)
            size_left -= current_chunk_size
            labels = {
                'source' : unicodise(self.file.name),
                'destination' : unicodise(self.uri.uri()),
                'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
            }
            pool.spawn(self.upload_part, seq, offset, current_chunk_size, labels)
            seq += 1

        try:
            pool.join()
        except Exception, e:
            error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
            self.abort_upload()
            raise
Пример #3
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError(
                "Attempting to use a multipart upload that has not been initiated."
            )

        size_left = file_size = os.stat(self.file.name)[ST_SIZE]
        self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
        nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size
                                                  and 1)
        debug("MultiPart: Uploading %s in %d parts" %
              (self.file.name, nr_parts))

        seq = 1
        while size_left > 0:
            offset = self.chunk_size * (seq - 1)
            current_chunk_size = min(file_size - offset, self.chunk_size)
            size_left -= current_chunk_size
            labels = {
                'source':
                unicodise(self.file.name),
                'destination':
                unicodise(self.uri.uri()),
                'extra':
                "[part %d of %d, %s]" %
                (seq, nr_parts,
                 "%d%sB" % formatSize(current_chunk_size, human_readable=True))
            }
            try:
                self.upload_part(seq, offset, current_chunk_size, labels)
            except:
                error(
                    u"Upload of '%s' part %d failed. Aborting multipart upload."
                    % (self.file.name, seq))
                self.abort_upload()
                raise
            seq += 1

        debug("MultiPart: Upload finished: %d parts", seq - 1)
Пример #4
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")

        self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
        filename = unicodise(self.file.name)

        if filename != "<stdin>":
                size_left = file_size = os.stat(deunicodise(filename))[ST_SIZE]
                nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
                debug("MultiPart: Uploading %s in %d parts" % (filename, nr_parts))
        else:
            debug("MultiPart: Uploading from %s" % filename)

        remote_statuses = dict()
        if self.s3.config.put_continue:
            remote_statuses = self.get_parts_information(self.uri, self.upload_id)

        seq = 1
        if filename != "<stdin>":
            while size_left > 0:
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = min(file_size - offset, self.chunk_size)
                size_left -= current_chunk_size
                labels = {
                    'source' : filename,
                    'destination' : self.uri.uri(),
                    'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses.get(seq))
                except:
                    error(u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort the upload, or\n  %s --upload-id %s put ...\nto continue the upload."
                          % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1
        else:
            while True:
                buffer = self.file.read(self.chunk_size)
                offset = 0 # send from start of the buffer
                current_chunk_size = len(buffer)
                labels = {
                    'source' : filename,
                    'destination' : self.uri.uri(),
                    'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                if len(buffer) == 0: # EOF
                    break
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses.get(seq))
                except:
                    error(u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort, or\n  %s --upload-id %s put ...\nto continue the upload."
                          % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1

        debug("MultiPart: Upload finished: %d parts", seq - 1)
Пример #5
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError(
                "Attempting to use a multipart upload that has not been initiated."
            )

        self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024

        if self.file.name != "<stdin>":
            size_left = file_size = os.stat(self.file.name)[ST_SIZE]
            nr_parts = file_size / self.chunk_size + (file_size %
                                                      self.chunk_size and 1)
            debug("MultiPart: Uploading %s in %d parts" %
                  (self.file.name, nr_parts))
        else:
            debug("MultiPart: Uploading from %s" % (self.file.name))

        remote_statuses = defaultdict(lambda: None)
        if self.s3.config.put_continue:
            remote_statuses = self.get_parts_information(
                self.uri, self.upload_id)

        seq = 1
        if self.file.name != "<stdin>":
            while size_left > 0:
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = min(file_size - offset, self.chunk_size)
                size_left -= current_chunk_size
                labels = {
                    'source':
                    unicodise(self.file.name),
                    'destination':
                    unicodise(self.uri.uri()),
                    'extra':
                    "[part %d of %d, %s]" %
                    (seq, nr_parts, "%d%sB" %
                     formatSize(current_chunk_size, human_readable=True))
                }
                try:
                    self.upload_part(seq,
                                     offset,
                                     current_chunk_size,
                                     labels,
                                     remote_status=remote_statuses[seq])
                except:
                    error(
                        u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort the upload, or\n  %s --upload-id %s put ...\nto continue the upload."
                        % (self.file.name, seq, sys.argv[0], self.uri,
                           self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1
        else:
            while True:
                buffer = self.file.read(self.chunk_size)
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = len(buffer)
                labels = {
                    'source':
                    unicodise(self.file.name),
                    'destination':
                    unicodise(self.uri.uri()),
                    'extra':
                    "[part %d, %s]" %
                    (seq, "%d%sB" %
                     formatSize(current_chunk_size, human_readable=True))
                }
                if len(buffer) == 0:  # EOF
                    break
                try:
                    self.upload_part(seq,
                                     offset,
                                     current_chunk_size,
                                     labels,
                                     buffer,
                                     remote_status=remote_statuses[seq])
                except:
                    error(
                        u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort, or\n  %s --upload-id %s put ...\nto continue the upload."
                        % (self.file.name, seq, self.uri, sys.argv[0],
                           self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1

        debug("MultiPart: Upload finished: %d parts", seq - 1)
Пример #6
0
    def upload_all_parts(self):
        """
        Execute a full multipart upload on a file
        Returns the seq/etag dict
        TODO use num_processes to thread it
        """
        if not self.upload_id:
            raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")

        self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024

        if self.file.name != "<stdin>":
                size_left = file_size = os.stat(self.file.name)[ST_SIZE]
                nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
                debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
        else:
            debug("MultiPart: Uploading from %s" % (self.file.name))

        remote_statuses = dict()
        if self.s3.config.put_continue:
            remote_statuses = self.get_parts_information(self.uri, self.upload_id)

        seq = 1
        if self.file.name != "<stdin>":
            while size_left > 0:
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = min(file_size - offset, self.chunk_size)
                size_left -= current_chunk_size
                labels = {
                    'source' : unicodise(self.file.name),
                    'destination' : unicodise(self.uri.uri()),
                    'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses.get(seq))
                except:
                    error(u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort the upload, or\n  %s --upload-id %s put ...\nto continue the upload."
                          % (self.file.name, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1
        else:
            if self.s3.config.encrypt == True:
                args = {
                    "gpg_command" : self.s3.config.gpg_command,
                    "passphrase" : self.s3.config.gpg_passphrase,
                }
                info(u"Encrypting stdin...")
                command = resolve_list(self.s3.config.gpg_stream.split(" "), args)
                debug("GPG command: " + " ".join(command))
                p = subprocess.Popen(command, stdin = sys.stdin, stdout = subprocess.PIPE)
                stream=p.stdout
            else:
                stream=self.file
            cont = True
            while cont:
                buffer = stream.read(self.chunk_size)
                offset = self.chunk_size * (seq - 1)
                current_chunk_size = len(buffer)
                labels = {
                    'source' : unicodise(self.file.name),
                    'destination' : unicodise(self.uri.uri()),
                    'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
                }
                if len(buffer) == 0: # EOF
                    if self.s3.config.encrypt == True:
                        p.wait()
                        buffer=stream.read()
                        cont=False
                        if len(buffer) ==0:
                            break;
                    else:
                        break
                try:
                    self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses.get(seq))
                except:
                    error(u"\nUpload of '%s' part %d failed. Use\n  %s abortmp %s %s\nto abort, or\n  %s --upload-id %s put ...\nto continue the upload."
                          % (self.file.name, seq, self.uri, sys.argv[0], self.upload_id, sys.argv[0], self.upload_id))
                    raise
                seq += 1

        debug("MultiPart: Upload finished: %d parts", seq - 1)