예제 #1
0
파일: test_writer.py 프로젝트: hzy/boto
    def check_no_resume(self, data, resume_set=set()):
        fobj = StringIO(data)
        part_hash_map = {}
        for part_index in resume_set:
            start = self.part_size * part_index
            end = start + self.part_size
            part_data = data[start:end]
            part_hash_map[part_index] = tree_hash(
                chunk_hashes(part_data, self.chunk_size))

        resume_file_upload(
            self.vault, sentinel.upload_id, self.part_size, fobj,
            part_hash_map, self.chunk_size)

        upload_part_calls, data_tree_hashes = calculate_mock_vault_calls(
            data, self.part_size, self.chunk_size)
        resume_upload_part_calls = [
            call for part_index, call in enumerate(upload_part_calls)
                    if part_index not in resume_set]
        check_mock_vault_calls(
            self.vault, resume_upload_part_calls, data_tree_hashes, len(data))
예제 #2
0
    def check_no_resume(self, data, resume_set=set()):
        fobj = StringIO(data)
        part_hash_map = {}
        for part_index in resume_set:
            start = self.part_size * part_index
            end = start + self.part_size
            part_data = data[start:end]
            part_hash_map[part_index] = tree_hash(
                chunk_hashes(part_data, self.chunk_size))

        resume_file_upload(self.vault, sentinel.upload_id, self.part_size,
                           fobj, part_hash_map, self.chunk_size)

        upload_part_calls, data_tree_hashes = calculate_mock_vault_calls(
            data, self.part_size, self.chunk_size)
        resume_upload_part_calls = [
            call for part_index, call in enumerate(upload_part_calls)
            if part_index not in resume_set
        ]
        check_mock_vault_calls(self.vault, resume_upload_part_calls,
                               data_tree_hashes, len(data))
예제 #3
0
    def resume_archive_from_file(self,
                                 upload_id,
                                 filename=None,
                                 file_obj=None):
        """Resume upload of a file already part-uploaded to Glacier.

        The resumption of an upload where the part-uploaded section is empty
        is a valid degenerate case that this function can handle.

        One and only one of filename or file_obj must be specified.

        :type upload_id: str
        :param upload_id: existing Glacier upload id of upload being resumed.

        :type filename: str
        :param filename: file to open for resume

        :type fobj: file
        :param fobj: file-like object containing local data to resume. This
            must read from the start of the entire upload, not just from the
            point being resumed. Use fobj.seek(0) to achieve this if necessary.

        :rtype: str
        :return: The archive id of the newly created archive

        """
        part_list_response = self.list_all_parts(upload_id)
        part_size = part_list_response['PartSizeInBytes']

        part_hash_map = {}
        for part_desc in part_list_response['Parts']:
            part_index = self._range_string_to_part_index(
                part_desc['RangeInBytes'], part_size)
            part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'],
                                           'hex_codec')
            part_hash_map[part_index] = part_tree_hash

        if not file_obj:
            file_obj = open(filename, "rb")

        return resume_file_upload(self, upload_id, part_size, file_obj,
                                  part_hash_map)
예제 #4
0
파일: vault.py 프로젝트: CashStar/boto
    def resume_archive_from_file(self, upload_id, filename=None,
                                 file_obj=None):
        """Resume upload of a file already part-uploaded to Glacier.

        The resumption of an upload where the part-uploaded section is empty
        is a valid degenerate case that this function can handle.

        One and only one of filename or file_obj must be specified.

        :type upload_id: str
        :param upload_id: existing Glacier upload id of upload being resumed.

        :type filename: str
        :param filename: file to open for resume

        :type fobj: file
        :param fobj: file-like object containing local data to resume. This
            must read from the start of the entire upload, not just from the
            point being resumed. Use fobj.seek(0) to achieve this if necessary.

        :rtype: str
        :return: The archive id of the newly created archive

        """
        part_list_response = self.list_all_parts(upload_id)
        part_size = part_list_response['PartSizeInBytes']

        part_hash_map = {}
        for part_desc in part_list_response['Parts']:
            part_index = self._range_string_to_part_index(
                part_desc['RangeInBytes'], part_size)
            part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec')
            part_hash_map[part_index] = part_tree_hash

        if not file_obj:
            file_obj = open(filename, "rb")

        return resume_file_upload(
            self, upload_id, part_size, file_obj, part_hash_map)
예제 #5
0
파일: test_writer.py 프로젝트: hzy/boto
 def test_returns_archive_id(self):
     archive_id = resume_file_upload(
         self.vault, sentinel.upload_id, self.part_size, StringIO('1'), {},
         self.chunk_size)
     self.assertEquals(sentinel.archive_id, archive_id)
예제 #6
0
 def test_returns_archive_id(self):
     archive_id = resume_file_upload(self.vault,
                                     sentinel.upload_id, self.part_size,
                                     StringIO('1'), {}, self.chunk_size)
     self.assertEquals(sentinel.archive_id, archive_id)