def __init__(self, client, initial_response, deserialization_callback, polling_method): # type: (Any, Any, Callable, PollingMethod[PollingReturnType]) -> None self._callbacks = [] # type: List[Callable] self._polling_method = polling_method # This implicit test avoids bringing in an explicit dependency on Model directly try: deserialization_callback = deserialization_callback.deserialize # type: ignore except AttributeError: pass # Might raise a CloudError self._polling_method.initialize(client, initial_response, deserialization_callback) # Prepare thread execution self._thread = None self._done = None self._exception = None if not self._polling_method.finished(): self._done = threading.Event() self._thread = threading.Thread( target=with_current_context(self._start), name="LROPoller({})".format(uuid.uuid4())) self._thread.daemon = True self._thread.start()
def wait(self, timeout=None): # type: (Optional[int]) -> None """Wait on the long running operation for a number of seconds. You can check if this call has ended with timeout with the "done()" method. :param int timeout: Period of time to wait for the long running operation to complete (in seconds). :raises ~azure.core.exceptions.HttpResponseError: Server problem with the query. """ if not self._polling_method.finished(): self._done = threading.Event() self._thread = threading.Thread( target=with_current_context(self._start), name="KeyVaultOperationPoller({})".format(uuid.uuid4())) self._thread.daemon = True self._thread.start() if self._thread is None: return self._thread.join(timeout=timeout) try: # Let's handle possible None in forgiveness here raise self._exception # type: ignore except TypeError: # Was None pass
def upload_substream_blocks(service=None, uploader_class=None, total_size=None, chunk_size=None, max_concurrency=None, stream=None, **kwargs): parallel = max_concurrency > 1 if parallel and 'modified_access_conditions' in kwargs: # Access conditions do not work with parallelism kwargs['modified_access_conditions'] = None uploader = uploader_class(service=service, total_size=total_size, chunk_size=chunk_size, stream=stream, parallel=parallel, **kwargs) if parallel: executor = futures.ThreadPoolExecutor(max_concurrency) upload_tasks = uploader.get_substream_blocks() running_futures = [ executor.submit( with_current_context(uploader.process_substream_block), u) for u in islice(upload_tasks, 0, max_concurrency) ] range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) else: range_ids = [ uploader.process_substream_block(b) for b in uploader.get_substream_blocks() ] return sorted(range_ids)
def readinto(self, stream): """Download the contents of this file to a stream. :param stream: The stream to download to. This can be an open file-handle, or any writable stream. The stream must be seekable if the download uses more than one parallel connection. :returns: The number of bytes read. :rtype: int """ # The stream must be seekable if parallel download is required parallel = self._max_concurrency > 1 if parallel: error_message = "Target stream handle must be seekable." if sys.version_info >= (3, ) and not stream.seekable(): raise ValueError(error_message) try: stream.seek(stream.tell()) except (NotImplementedError, AttributeError): raise ValueError(error_message) # Write the content to the user stream stream.write(self._current_content) if self._download_complete: return self.size data_end = self._file_size if self._end_range is not None: # Use the length unless it is over the end of the file data_end = min(self._file_size, self._end_range + 1) downloader = _ChunkDownloader( client=self._client, total_size=self.size, chunk_size=self._config.max_chunk_get_size, current_progress=self._first_get_size, start_range=self._initial_range[1] + 1, # Start where the first download ended end_range=data_end, stream=stream, parallel=parallel, validate_content=self._validate_content, encryption_options=self._encryption_options, use_location=self._location_mode, etag=self._etag, **self._request_options) if parallel: import concurrent.futures with concurrent.futures.ThreadPoolExecutor( self._max_concurrency) as executor: list( executor.map( with_current_context(downloader.process_chunk), downloader.get_chunk_offsets())) else: for chunk in downloader.get_chunk_offsets(): downloader.process_chunk(chunk) return self.size
def upload_data_chunks(service=None, uploader_class=None, total_size=None, chunk_size=None, max_concurrency=None, stream=None, validate_content=None, encryption_options=None, **kwargs): if encryption_options: encryptor, padder = get_blob_encryptor_and_padder( encryption_options.get('cek'), encryption_options.get('vector'), uploader_class is not PageBlobChunkUploader) kwargs['encryptor'] = encryptor kwargs['padder'] = padder parallel = max_concurrency > 1 if parallel and 'modified_access_conditions' in kwargs: # Access conditions do not work with parallelism kwargs['modified_access_conditions'] = None uploader = uploader_class(service=service, total_size=total_size, chunk_size=chunk_size, stream=stream, parallel=parallel, validate_content=validate_content, **kwargs) if parallel: with futures.ThreadPoolExecutor(max_concurrency) as executor: upload_tasks = uploader.get_chunk_streams() running_futures = [ executor.submit(with_current_context(uploader.process_chunk), u) for u in islice(upload_tasks, 0, max_concurrency) ] range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) else: range_ids = [ uploader.process_chunk(result) for result in uploader.get_chunk_streams() ] if any(range_ids): return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] return uploader.response_headers
def __init__(self, polling_generator, result_callback): self._callbacks = [] self._polling_generator = polling_generator # generator self._polling_method = None self._result_callback = result_callback # Prepare thread execution self._thread = None self._done = None self._exception = None self._done = threading.Event() self._thread = threading.Thread( target=with_current_context(self._start), name="AAZLROPoller({})".format(uuid.uuid4())) self._thread.daemon = True self._thread.start()
def _parallel_uploads(executor, uploader, pending, running): range_ids = [] while True: # Wait for some download to finish before adding a new one done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) range_ids.extend([chunk.result() for chunk in done]) try: next_chunk = next(pending) except StopIteration: break else: running.add(executor.submit(with_current_context(uploader), next_chunk)) # Wait for the remaining uploads to finish done, _running = futures.wait(running) range_ids.extend([chunk.result() for chunk in done]) return range_ids
def download_to_stream(self, stream, max_concurrency=1): """Download the contents of this file to a stream. :param stream: The stream to download to. This can be an open file-handle, or any writable stream. The stream must be seekable if the download uses more than one parallel connection. :returns: The properties of the downloaded file. :rtype: Any """ # the stream must be seekable if parallel download is required if max_concurrency > 1: error_message = "Target stream handle must be seekable." if sys.version_info >= (3, ) and not stream.seekable(): raise ValueError(error_message) try: stream.seek(stream.tell()) except (NotImplementedError, AttributeError): raise ValueError(error_message) if self.download_size == 0: content = b"" else: content = process_content(self.response, self.initial_offset[0], self.initial_offset[1], self.encryption_options) # Write the content to the user stream if content is not None: stream.write(content) if self._download_complete: return self.properties data_end = self.file_size if self.length is not None: # Use the length unless it is over the end of the file data_end = min(self.file_size, self.length + 1) downloader_class = ParallelChunkDownloader if max_concurrency > 1 else SequentialChunkDownloader downloader = downloader_class( client=self.client, total_size=self.download_size, chunk_size=self.config.max_chunk_get_size, current_progress=self.first_get_size, start_range=self.initial_range[1] + 1, # start where the first download ended end_range=data_end, stream=stream, validate_content=self.validate_content, encryption_options=self.encryption_options, use_location=self.location_mode, **self.request_options) if max_concurrency > 1: import concurrent.futures executor = concurrent.futures.ThreadPoolExecutor(max_concurrency) list( executor.map(with_current_context(downloader.process_chunk), downloader.get_chunk_offsets())) else: for chunk in downloader.get_chunk_offsets(): downloader.process_chunk(chunk) return self.properties