예제 #1
0
 def calculate_tree_hash(self, bytestring):
     start = time.time()
     calculated = bytes_to_hex(tree_hash(chunk_hashes(bytestring)))
     end = time.time()
     logging.debug("Tree hash calc time for length %s: %s", len(bytestring),
                   end - start)
     return calculated
예제 #2
0
파일: writer.py 프로젝트: 0t3dWCE/boto
def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map,
                       chunk_size=_ONE_MEGABYTE):
    """Resume upload of a file already part-uploaded to Glacier.

    The resumption of an upload where the part-uploaded section is empty is a
    valid degenerate case that this function can handle. In this case,
    part_hash_map should be an empty dict.

    :param vault: boto.glacier.vault.Vault object.
    :param upload_id: existing Glacier upload id of upload being resumed.
    :param part_size: part size of existing upload.
    :param fobj: file object containing local data to resume. This must read
        from the start of the entire upload, not just from the point being
        resumed. Use fobj.seek(0) to achieve this if necessary.
    :param part_hash_map: {part_index: part_tree_hash, ...} of data already
        uploaded. Each supplied part_tree_hash will be verified and the part
        re-uploaded if there is a mismatch.
    :param chunk_size: chunk size of tree hash calculation. This must be
        1 MiB for Amazon.

    """
    uploader = _Uploader(vault, upload_id, part_size, chunk_size)
    for part_index, part_data in enumerate(
            generate_parts_from_fobj(fobj, part_size)):
        part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size))
        if (part_index not in part_hash_map or
                part_hash_map[part_index] != part_tree_hash):
            uploader.upload_part(part_index, part_data)
        else:
            uploader.skip_part(part_index, part_tree_hash, len(part_data))
    uploader.close()
    return uploader.archive_id
예제 #3
0
파일: writer.py 프로젝트: 0t3dWCE/boto
    def upload_part(self, part_index, part_data):
        """Upload a part to Glacier.

        :param part_index: part number where 0 is the first part
        :param part_data: data to upload corresponding to this part

        """
        if self.closed:
            raise ValueError("I/O operation on closed file")
        # Create a request and sign it
        part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size))
        self._insert_tree_hash(part_index, part_tree_hash)

        hex_tree_hash = bytes_to_hex(part_tree_hash)
        linear_hash = hashlib.sha256(part_data).hexdigest()
        start = self.part_size * part_index
        content_range = (start,
                         (start + len(part_data)) - 1)
        response = self.vault.layer1.upload_part(self.vault.name,
                                                 self.upload_id,
                                                 linear_hash,
                                                 hex_tree_hash,
                                                 content_range, part_data)
        response.read()
        self._uploaded_size += len(part_data)
예제 #4
0
def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map,
                       chunk_size=_ONE_MEGABYTE):
    """Resume upload of a file already part-uploaded to Glacier.

    The resumption of an upload where the part-uploaded section is empty is a
    valid degenerate case that this function can handle. In this case,
    part_hash_map should be an empty dict.

    :param vault: boto.glacier.vault.Vault object.
    :param upload_id: existing Glacier upload id of upload being resumed.
    :param part_size: part size of existing upload.
    :param fobj: file object containing local data to resume. This must read
        from the start of the entire upload, not just from the point being
        resumed. Use fobj.seek(0) to achieve this if necessary.
    :param part_hash_map: {part_index: part_tree_hash, ...} of data already
        uploaded. Each supplied part_tree_hash will be verified and the part
        re-uploaded if there is a mismatch.
    :param chunk_size: chunk size of tree hash calculation. This must be
        1 MiB for Amazon.

    """
    uploader = _Uploader(vault, upload_id, part_size, chunk_size)
    for part_index, part_data in enumerate(
            generate_parts_from_fobj(fobj, part_size)):
        part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size))
        if (part_index not in part_hash_map or
                part_hash_map[part_index] != part_tree_hash):
            uploader.upload_part(part_index, part_data)
        else:
            uploader.skip_part(part_index, part_tree_hash, len(part_data))
    uploader.close()
    return uploader.archive_id
예제 #5
0
    def upload_part(self, part_index, part_data):
        """Upload a part to Glacier.

        :param part_index: part number where 0 is the first part
        :param part_data: data to upload corresponding to this part

        """
        if self.closed:
            raise ValueError("I/O operation on closed file")
        # Create a request and sign it
        part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size))
        self._insert_tree_hash(part_index, part_tree_hash)

        hex_tree_hash = bytes_to_hex(part_tree_hash)
        linear_hash = hashlib.sha256(part_data).hexdigest()
        start = self.part_size * part_index
        content_range = (start,
                         (start + len(part_data)) - 1)
        response = self.vault.layer1.upload_part(self.vault.name,
                                                 self.upload_id,
                                                 linear_hash,
                                                 hex_tree_hash,
                                                 content_range, part_data)
        response.read()
        self._uploaded_size += len(part_data)
예제 #6
0
파일: test_utils.py 프로젝트: 10sr/hue
 def calculate_tree_hash(self, bytestring):
     start = time.time()
     calculated = bytes_to_hex(tree_hash(chunk_hashes(bytestring)))
     end = time.time()
     logging.debug("Tree hash calc time for length %s: %s",
                   len(bytestring), end - start)
     return calculated
예제 #7
0
파일: writer.py 프로젝트: 2mind/boto
    def current_tree_hash(self):
        """
        Returns the current tree hash for the data that's been written
        **so far**.

        Only once the writing is complete is the final tree hash returned.
        """
        return tree_hash(self.uploader._tree_hashes)
예제 #8
0
    def current_tree_hash(self):
        """
        Returns the current tree hash for the data that's been written
        **so far**.

        Only once the writing is complete is the final tree hash returned.
        """
        return tree_hash(self.uploader._tree_hashes)
예제 #9
0
def check_mock_vault_calls(vault, upload_part_calls, data_tree_hashes,
                           data_len):
    vault.layer1.upload_part.assert_has_calls(upload_part_calls,
                                              any_order=True)
    assert_equal(len(upload_part_calls), vault.layer1.upload_part.call_count)

    data_tree_hash = bytes_to_hex(tree_hash(data_tree_hashes))
    vault.layer1.complete_multipart_upload.assert_called_once_with(
        sentinel.vault_name, sentinel.upload_id, data_tree_hash, data_len)
예제 #10
0
파일: test_writer.py 프로젝트: 0t3dWCE/boto
def check_mock_vault_calls(vault, upload_part_calls, data_tree_hashes,
                           data_len):
    vault.layer1.upload_part.assert_has_calls(
        upload_part_calls, any_order=True)
    assert_equal(
        len(upload_part_calls), vault.layer1.upload_part.call_count)

    data_tree_hash = bytes_to_hex(tree_hash(data_tree_hashes))
    vault.layer1.complete_multipart_upload.assert_called_once_with(
        sentinel.vault_name, sentinel.upload_id, data_tree_hash, data_len)
예제 #11
0
 def close(self):
     if self.closed:
         return
     if None in self._tree_hashes:
         raise RuntimeError("Some parts were not uploaded.")
     # Complete the multiplart glacier upload
     hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
     response = self.vault.layer1.complete_multipart_upload(
         self.vault.name, self.upload_id, hex_tree_hash,
         self._uploaded_size)
     self.archive_id = response['ArchiveId']
     self.closed = True
예제 #12
0
파일: writer.py 프로젝트: 0t3dWCE/boto
 def close(self):
     if self.closed:
         return
     if None in self._tree_hashes:
         raise RuntimeError("Some parts were not uploaded.")
     # Complete the multiplart glacier upload
     hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
     response = self.vault.layer1.complete_multipart_upload(
         self.vault.name, self.upload_id, hex_tree_hash,
         self._uploaded_size)
     self.archive_id = response['ArchiveId']
     self.closed = True
예제 #13
0
    def upload(self, filename, description=None):
        """Concurrently create an archive.

        The part_size value specified when the class was constructed
        will be used *unless* it is smaller than the minimum required
        part size needed for the size of the given file.  In that case,
        the part size used will be the minimum part size required
        to properly upload the given file.

        :type file: str
        :param file: The filename to upload

        :type description: str
        :param description: The description of the archive.

        :rtype: str
        :return: The archive id of the newly created archive.

        """
        total_size = os.stat(filename).st_size
        total_parts, part_size = self._calculate_required_part_size(total_size)
        hash_chunks = [None] * total_parts
        worker_queue = Queue()
        result_queue = Queue()
        response = self._api.initiate_multipart_upload(self._vault_name,
                                                       part_size,
                                                       description)
        upload_id = response['UploadId']
        # The basic idea is to add the chunks (the offsets not the actual
        # contents) to a work queue, start up a thread pool, let the crank
        # through the items in the work queue, and then place their results
        # in a result queue which we use to complete the multipart upload.
        self._add_work_items_to_queue(total_parts, worker_queue, part_size)
        self._start_upload_threads(result_queue, upload_id,
                                   worker_queue, filename)
        try:
            self._wait_for_upload_threads(hash_chunks, result_queue,
                                          total_parts)
        except UploadArchiveError as e:
            log.debug("An error occurred while uploading an archive, "
                      "aborting multipart upload.")
            self._api.abort_multipart_upload(self._vault_name, upload_id)
            raise e
        log.debug("Completing upload.")
        response = self._api.complete_multipart_upload(
            self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)),
            total_size)
        log.debug("Upload finished.")
        return response['ArchiveId']
예제 #14
0
파일: concurrent.py 프로젝트: CashStar/boto
    def upload(self, filename, description=None):
        """Concurrently create an archive.

        The part_size value specified when the class was constructed
        will be used *unless* it is smaller than the minimum required
        part size needed for the size of the given file.  In that case,
        the part size used will be the minimum part size required
        to properly upload the given file.

        :type file: str
        :param file: The filename to upload

        :type description: str
        :param description: The description of the archive.

        :rtype: str
        :return: The archive id of the newly created archive.

        """
        total_size = os.stat(filename).st_size
        total_parts, part_size = self._calculate_required_part_size(total_size)
        hash_chunks = [None] * total_parts
        worker_queue = Queue()
        result_queue = Queue()
        response = self._api.initiate_multipart_upload(self._vault_name,
                                                       part_size,
                                                       description)
        upload_id = response['UploadId']
        # The basic idea is to add the chunks (the offsets not the actual
        # contents) to a work queue, start up a thread pool, let the crank
        # through the items in the work queue, and then place their results
        # in a result queue which we use to complete the multipart upload.
        self._add_work_items_to_queue(total_parts, worker_queue, part_size)
        self._start_upload_threads(result_queue, upload_id,
                                   worker_queue, filename)
        try:
            self._wait_for_upload_threads(hash_chunks, result_queue,
                                          total_parts)
        except UploadArchiveError as e:
            log.debug("An error occurred while uploading an archive, "
                      "aborting multipart upload.")
            self._api.abort_multipart_upload(self._vault_name, upload_id)
            raise e
        log.debug("Completing upload.")
        response = self._api.complete_multipart_upload(
            self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)),
            total_size)
        log.debug("Upload finished.")
        return response['ArchiveId']
예제 #15
0
파일: concurrent.py 프로젝트: CashStar/boto
 def _upload_chunk(self, work):
     part_number, part_size = work
     start_byte = part_number * part_size
     self._fileobj.seek(start_byte)
     contents = self._fileobj.read(part_size)
     linear_hash = hashlib.sha256(contents).hexdigest()
     tree_hash_bytes = tree_hash(chunk_hashes(contents))
     byte_range = (start_byte, start_byte + len(contents) - 1)
     log.debug("Uploading chunk %s of size %s", part_number, part_size)
     response = self._api.upload_part(self._vault_name, self._upload_id,
                                      linear_hash,
                                      bytes_to_hex(tree_hash_bytes),
                                      byte_range, contents)
     # Reading the response allows the connection to be reused.
     response.read()
     return (part_number, tree_hash_bytes)
예제 #16
0
 def _upload_chunk(self, work):
     part_number, part_size = work
     start_byte = part_number * part_size
     self._fileobj.seek(start_byte)
     contents = self._fileobj.read(part_size)
     linear_hash = hashlib.sha256(contents).hexdigest()
     tree_hash_bytes = tree_hash(chunk_hashes(contents))
     byte_range = (start_byte, start_byte + len(contents) - 1)
     log.debug("Uploading chunk %s of size %s", part_number, part_size)
     response = self._api.upload_part(self._vault_name, self._upload_id,
                                      linear_hash,
                                      bytes_to_hex(tree_hash_bytes),
                                      byte_range, contents)
     # Reading the response allows the connection to be reused.
     response.read()
     return (part_number, tree_hash_bytes)
예제 #17
0
def calculate_mock_vault_calls(data, part_size, chunk_size):
    upload_part_calls = []
    data_tree_hashes = []
    for i, data_part in enumerate(partify(data, part_size)):
        start = i * part_size
        end = start + len(data_part)
        data_part_tree_hash_blob = tree_hash(
            chunk_hashes(data_part, chunk_size))
        data_part_tree_hash = bytes_to_hex(data_part_tree_hash_blob)
        data_part_linear_hash = sha256(data_part).hexdigest()
        upload_part_calls.append(
            call.layer1.upload_part(sentinel.vault_name, sentinel.upload_id,
                                    data_part_linear_hash, data_part_tree_hash,
                                    (start, end - 1), data_part))
        data_tree_hashes.append(data_part_tree_hash_blob)

    return upload_part_calls, data_tree_hashes
예제 #18
0
파일: test_writer.py 프로젝트: 0t3dWCE/boto
def calculate_mock_vault_calls(data, part_size, chunk_size):
    upload_part_calls = []
    data_tree_hashes = []
    for i, data_part in enumerate(partify(data, part_size)):
        start = i * part_size
        end = start + len(data_part)
        data_part_tree_hash_blob = tree_hash(
            chunk_hashes(data_part, chunk_size))
        data_part_tree_hash = bytes_to_hex(data_part_tree_hash_blob)
        data_part_linear_hash = sha256(data_part).hexdigest()
        upload_part_calls.append(
            call.layer1.upload_part(
                sentinel.vault_name, sentinel.upload_id,
                data_part_linear_hash, data_part_tree_hash,
                (start, end - 1), data_part))
        data_tree_hashes.append(data_part_tree_hash_blob)

    return upload_part_calls, data_tree_hashes
예제 #19
0
파일: concurrent.py 프로젝트: CashStar/boto
    def _download_chunk(self, work):
        """
        Downloads a chunk of archive from Glacier. Saves the data to a temp file
        Returns the part number and temp file location

        :param work:
        """
        part_number, part_size = work
        start_byte = part_number * part_size
        byte_range = (start_byte, start_byte + part_size - 1)
        log.debug("Downloading chunk %s of size %s", part_number, part_size)
        response = self._job.get_output(byte_range)
        data = response.read()
        actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data)))
        if response['TreeHash'] != actual_hash:
            raise TreeHashDoesNotMatchError(
                "Tree hash for part number %s does not match, "
                "expected: %s, got: %s" % (part_number, response['TreeHash'],
                                           actual_hash))
        return (part_number, part_size, binascii.unhexlify(actual_hash), data)
예제 #20
0
    def _download_chunk(self, work):
        """
        Downloads a chunk of archive from Glacier. Saves the data to a temp file
        Returns the part number and temp file location

        :param work:
        """
        part_number, part_size = work
        start_byte = part_number * part_size
        byte_range = (start_byte, start_byte + part_size - 1)
        log.debug("Downloading chunk %s of size %s", part_number, part_size)
        response = self._job.get_output(byte_range)
        data = response.read()
        actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data)))
        if response['TreeHash'] != actual_hash:
            raise TreeHashDoesNotMatchError(
                "Tree hash for part number %s does not match, "
                "expected: %s, got: %s" %
                (part_number, response['TreeHash'], actual_hash))
        return (part_number, part_size, binascii.unhexlify(actual_hash), data)
예제 #21
0
    def check_no_resume(self, data, resume_set=set()):
        fobj = StringIO(data)
        part_hash_map = {}
        for part_index in resume_set:
            start = self.part_size * part_index
            end = start + self.part_size
            part_data = data[start:end]
            part_hash_map[part_index] = tree_hash(
                chunk_hashes(part_data, self.chunk_size))

        resume_file_upload(self.vault, sentinel.upload_id, self.part_size,
                           fobj, part_hash_map, self.chunk_size)

        upload_part_calls, data_tree_hashes = calculate_mock_vault_calls(
            data, self.part_size, self.chunk_size)
        resume_upload_part_calls = [
            call for part_index, call in enumerate(upload_part_calls)
            if part_index not in resume_set
        ]
        check_mock_vault_calls(self.vault, resume_upload_part_calls,
                               data_tree_hashes, len(data))
예제 #22
0
파일: test_writer.py 프로젝트: 0t3dWCE/boto
    def check_no_resume(self, data, resume_set=set()):
        fobj = StringIO(data)
        part_hash_map = {}
        for part_index in resume_set:
            start = self.part_size * part_index
            end = start + self.part_size
            part_data = data[start:end]
            part_hash_map[part_index] = tree_hash(
                chunk_hashes(part_data, self.chunk_size))

        resume_file_upload(
            self.vault, sentinel.upload_id, self.part_size, fobj,
            part_hash_map, self.chunk_size)

        upload_part_calls, data_tree_hashes = calculate_mock_vault_calls(
            data, self.part_size, self.chunk_size)
        resume_upload_part_calls = [
            call for part_index, call in enumerate(upload_part_calls)
                    if part_index not in resume_set]
        check_mock_vault_calls(
            self.vault, resume_upload_part_calls, data_tree_hashes, len(data))
예제 #23
0
    def _wait_for_download_threads(self, filename, result_queue, total_parts):
        """
        Waits until the result_queue is filled with all the downloaded parts
        This indicates that all part downloads have completed

        Saves downloaded parts into filename

        :param filename:
        :param result_queue:
        :param total_parts:
        """
        hash_chunks = [None] * total_parts
        with open(filename, "wb") as f:
            for _ in range(total_parts):
                result = result_queue.get()
                if isinstance(result, Exception):
                    log.debug(
                        "An error was found in the result queue, "
                        "terminating threads: %s", result)
                    self._shutdown_threads()
                    raise DownloadArchiveError(
                        "An error occurred while uploading "
                        "an archive: %s" % result)
                part_number, part_size, actual_hash, data = result
                hash_chunks[part_number] = actual_hash
                start_byte = part_number * part_size
                f.seek(start_byte)
                f.write(data)
                f.flush()
        final_hash = bytes_to_hex(tree_hash(hash_chunks))
        log.debug(
            "Verifying final tree hash of archive, expecting: %s, "
            "actual: %s", self._job.sha256_treehash, final_hash)
        if self._job.sha256_treehash != final_hash:
            self._shutdown_threads()
            raise TreeHashDoesNotMatchError(
                "Tree hash for entire archive does not match, "
                "expected: %s, got: %s" %
                (self._job.sha256_treehash, final_hash))
        self._shutdown_threads()
예제 #24
0
파일: concurrent.py 프로젝트: CashStar/boto
    def _wait_for_download_threads(self, filename, result_queue, total_parts):
        """
        Waits until the result_queue is filled with all the downloaded parts
        This indicates that all part downloads have completed

        Saves downloaded parts into filename

        :param filename:
        :param result_queue:
        :param total_parts:
        """
        hash_chunks = [None] * total_parts
        with open(filename, "wb") as f:
            for _ in range(total_parts):
                result = result_queue.get()
                if isinstance(result, Exception):
                    log.debug("An error was found in the result queue, "
                              "terminating threads: %s", result)
                    self._shutdown_threads()
                    raise DownloadArchiveError(
                        "An error occurred while uploading "
                        "an archive: %s" % result)
                part_number, part_size, actual_hash, data = result
                hash_chunks[part_number] = actual_hash
                start_byte = part_number * part_size
                f.seek(start_byte)
                f.write(data)
                f.flush()
        final_hash = bytes_to_hex(tree_hash(hash_chunks))
        log.debug("Verifying final tree hash of archive, expecting: %s, "
                  "actual: %s", self._job.sha256_treehash, final_hash)
        if self._job.sha256_treehash != final_hash:
            self._shutdown_threads()
            raise TreeHashDoesNotMatchError(
                "Tree hash for entire archive does not match, "
                "expected: %s, got: %s" % (self._job.sha256_treehash,
                                           final_hash))
        self._shutdown_threads()