def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts): for _ in range(total_parts): result = result_queue.get() if isinstance(result, Exception): log.debug("An error was found in the result queue, terminating " "threads: %s", result) self._shutdown_threads() raise UploadArchiveError("An error occurred while uploading " "an archive: %s" % result) # Each unit of work returns the tree hash for the given part # number, which we use at the end to compute the tree hash of # the entire archive. part_number, tree_sha256 = result hash_chunks[part_number] = tree_sha256 self._shutdown_threads()
def create_archive_from_file(self, filename=None, file_obj=None, description=None, upload_id_callback=None): """ Create a new archive and upload the data from the given file or file-like object. :type filename: str :param filename: A filename to upload :type file_obj: file :param file_obj: A file-like object to upload :type description: str :param description: An optional description for the archive. :type upload_id_callback: function :param upload_id_callback: if set, call with the upload_id as the only parameter when it becomes known, to enable future calls to resume_archive_from_file in case resume is needed. :rtype: str :return: The archive id of the newly created archive """ part_size = self.DefaultPartSize if not file_obj: file_size = os.path.getsize(filename) try: part_size = minimum_part_size(file_size, part_size) except ValueError: raise UploadArchiveError("File size of %s bytes exceeds " "40,000 GB archive limit of Glacier.") file_obj = open(filename, "rb") writer = self.create_archive_writer(description=description, part_size=part_size) if upload_id_callback: upload_id_callback(writer.upload_id) while True: data = file_obj.read(part_size) if not data: break writer.write(data) writer.close() return writer.get_archive_id()