def test_retries_in_middle_of_streaming(self): # After the first read a retryable error will be thrown self.stubber.add_response('get_object', service_response={ 'Body': StreamWithError( copy.deepcopy(self.stream), SOCKET_ERROR, 1) }, expected_params={ 'Bucket': self.bucket, 'Key': self.key }) self.stubber.add_response('get_object', service_response={'Body': self.stream}, expected_params={ 'Bucket': self.bucket, 'Key': self.key }) task = self.get_download_task(io_chunksize=1) task() self.stubber.assert_no_pending_responses() expected_contents = [] # This is the content intially read in before the retry hit on the # second read() expected_contents.append((0, bytes(self.content[0:1]))) # The rest of the content should be the entire set of data partitioned # out based on the one byte stream chunk size. Note the second # element in the list should be a copy of the first element since # a retryable exception happened in between. for i in range(len(self.content)): expected_contents.append((i, bytes(self.content[i:i + 1]))) self.assert_io_writes(expected_contents)
def add_n_retryable_get_object_responses(self, n, num_reads=0): for _ in range(n): self.stubber.add_response(method='get_object', service_response={ 'Body': StreamWithError( copy.deepcopy(self.stream), SOCKET_ERROR, num_reads) })
def test_run_does_retries_for_get_object(self): self.add_get_object_job() self.add_shutdown() self.add_stubbed_get_object_response( body=StreamWithError(self.stream, ReadTimeoutError( endpoint_url=''))) self.add_stubbed_get_object_response() self.worker.run() self.stubber.assert_no_pending_responses() self.assert_contents(self.temp_filename, self.remote_contents)
def test_run_can_exhaust_retries_for_get_object(self): self.add_get_object_job() self.add_shutdown() # 5 is the current setting for max number of GetObject attempts for _ in range(5): self.add_stubbed_get_object_response(body=StreamWithError( self.stream, ReadTimeoutError(endpoint_url=''))) self.worker.run() self.stubber.assert_no_pending_responses() self.assertIsInstance( self.transfer_monitor.get_exception(self.transfer_id), RetriesExceededError)
def test_retries_failure(self): for _ in range(self.max_attempts): self.stubber.add_response( 'get_object', service_response={ 'Body': StreamWithError(self.stream, SOCKET_ERROR) }, expected_params={'Bucket': self.bucket, 'Key': self.key} ) task = self.get_download_task() task() self.transfer_coordinator.announce_done() # Should have failed out on a RetriesExceededError with self.assertRaises(RetriesExceededError): self.transfer_coordinator.result() self.stubber.assert_no_pending_responses()
def test_retries_succeeds(self): self.stubber.add_response( 'get_object', service_response={ 'Body': StreamWithError(self.stream, SOCKET_ERROR) }, expected_params={'Bucket': self.bucket, 'Key': self.key} ) self.stubber.add_response( 'get_object', service_response={'Body': self.stream}, expected_params={'Bucket': self.bucket, 'Key': self.key} ) task = self.get_download_task() task() # Retryable error should have not affected the bytes placed into # the io queue. self.stubber.assert_no_pending_responses() self.assert_io_writes([(0, self.content)])