def setUp(self): self.leaky_bucket = mock.Mock(LeakyBucket) self.time_utils = mock.Mock(TimeUtils) self.tempdir = tempfile.mkdtemp() self.content = b'a' * 1024 * 1024 self.filename = os.path.join(self.tempdir, 'myfile') with open(self.filename, 'wb') as f: f.write(self.content) self.coordinator = TransferCoordinator()
def test_cancel(self): transfer_coordinator = TransferCoordinator() # Add the transfer coordinator self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) # Cancel with the canceler self.coordinator_controller.cancel() # Check that coordinator got canceled self.assert_coordinator_is_cancelled(transfer_coordinator)
def test_add_transfer_coordinator(self): transfer_coordinator = TransferCoordinator() # Add the transfer coordinator self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) # Ensure that is tracked. self.assertEqual( self.coordinator_controller.tracked_transfer_coordinators, set([transfer_coordinator]))
def _get_future_with_components(self, call_args): transfer_id = self._id_counter # Creates a new transfer future along with its components transfer_coordinator = TransferCoordinator(transfer_id=transfer_id) # Track the transfer coordinator for transfers to manage. self._coordinator_controller.add_transfer_coordinator( transfer_coordinator) # Also make sure that the transfer coordinator is removed once # the transfer completes so it does not stick around in memory. transfer_coordinator.add_done_callback( self._coordinator_controller.remove_transfer_coordinator, transfer_coordinator) components = { 'meta': TransferMeta(call_args, transfer_id=transfer_id), 'coordinator': transfer_coordinator } transfer_future = TransferFuture(**components) return transfer_future, components
def test_remove_transfer_coordinator(self): transfer_coordinator = TransferCoordinator() # Add the coordinator self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) # Now remove the coordinator self.coordinator_controller.remove_transfer_coordinator( transfer_coordinator) # Make sure that it is no longer getting tracked. self.assertEqual( self.coordinator_controller.tracked_transfer_coordinators, set())
def test_wait_does_not_propogate_exceptions_from_result(self): transfer_coordinator = TransferCoordinator() transfer_coordinator.set_exception(FutureResultException()) transfer_coordinator.announce_done() try: self.coordinator_controller.wait() except FutureResultException as e: self.fail('%s should not have been raised.' % e)
def test_cancel_with_message(self): message = 'my cancel message' transfer_coordinator = TransferCoordinator() self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) self.coordinator_controller.cancel(message) transfer_coordinator.announce_done() with self.assertRaisesRegexp(CancelledError, message): transfer_coordinator.result()
def test_cancel_with_provided_exception(self): message = 'my cancel message' transfer_coordinator = TransferCoordinator() self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) self.coordinator_controller.cancel(message, exc_type=FatalError) transfer_coordinator.announce_done() with self.assertRaisesRegexp(FatalError, message): transfer_coordinator.result()
def test_wait_for_done_transfer_coordinators(self): # Create a coordinator and add it to the canceler transfer_coordinator = TransferCoordinator() self.coordinator_controller.add_transfer_coordinator( transfer_coordinator) sleep_time = 0.02 with ThreadPoolExecutor(max_workers=1) as executor: # In a seperate thread sleep and then set the transfer coordinator # to done after sleeping. start_time = time.time() executor.submit(self.sleep_then_announce_done, transfer_coordinator, sleep_time) # Now call wait to wait for the transfer coordinator to be done. self.coordinator_controller.wait() end_time = time.time() wait_time = end_time - start_time # The time waited should not be less than the time it took to sleep in # the seperate thread because the wait ending should be dependent on # the sleeping thread announcing that the transfer coordinator is done. self.assertTrue(sleep_time <= wait_time)
def setUp(self): self.transfer_coordinator = TransferCoordinator()
def setUp(self): self.transfer_id = 1 self.transfer_coordinator = TransferCoordinator( transfer_id=self.transfer_id)
class TestTask(unittest.TestCase): def setUp(self): self.transfer_id = 1 self.transfer_coordinator = TransferCoordinator( transfer_id=self.transfer_id) def test_repr(self): main_kwargs = {'bucket': 'mybucket', 'param_to_not_include': 'foo'} task = ReturnKwargsTask(self.transfer_coordinator, main_kwargs=main_kwargs) # The repr should not include the other parameter because it is not # a desired parameter to include. self.assertEqual( repr(task), 'ReturnKwargsTask(transfer_id=%s, %s)' % (self.transfer_id, { 'bucket': 'mybucket' })) def test_transfer_id(self): task = SuccessTask(self.transfer_coordinator) # Make sure that the id is the one provided to the id associated # to the transfer coordinator. self.assertEqual(task.transfer_id, self.transfer_id) def test_context_status_transitioning_success(self): # The status should be set to running. self.transfer_coordinator.set_status_to_running() self.assertEqual(self.transfer_coordinator.status, 'running') # If a task is called, the status still should be running. SuccessTask(self.transfer_coordinator)() self.assertEqual(self.transfer_coordinator.status, 'running') # Once the final task is called, the status should be set to success. SuccessTask(self.transfer_coordinator, is_final=True)() self.assertEqual(self.transfer_coordinator.status, 'success') def test_context_status_transitioning_failed(self): self.transfer_coordinator.set_status_to_running() SuccessTask(self.transfer_coordinator)() self.assertEqual(self.transfer_coordinator.status, 'running') # A failure task should result in the failed status FailureTask(self.transfer_coordinator)() self.assertEqual(self.transfer_coordinator.status, 'failed') # Even if the final task comes in and succeeds, it should stay failed. SuccessTask(self.transfer_coordinator, is_final=True)() self.assertEqual(self.transfer_coordinator.status, 'failed') def test_result_setting_for_success(self): override_return = 'foo' SuccessTask(self.transfer_coordinator)() SuccessTask(self.transfer_coordinator, main_kwargs={'return_value': override_return}, is_final=True)() # The return value for the transfer future should be of the final # task. self.assertEqual(self.transfer_coordinator.result(), override_return) def test_result_setting_for_error(self): FailureTask(self.transfer_coordinator)() # If another failure comes in, the result should still throw the # original exception when result() is eventually called. FailureTask(self.transfer_coordinator, main_kwargs={'exception': Exception})() # Even if a success task comes along, the result of the future # should be the original exception SuccessTask(self.transfer_coordinator, is_final=True)() with self.assertRaises(TaskFailureException): self.transfer_coordinator.result() def test_done_callbacks_success(self): callback_results = [] SuccessTask(self.transfer_coordinator, done_callbacks=[ partial(callback_results.append, 'first'), partial(callback_results.append, 'second') ])() # For successful tasks, the done callbacks should get called. self.assertEqual(callback_results, ['first', 'second']) def test_done_callbacks_failure(self): callback_results = [] FailureTask(self.transfer_coordinator, done_callbacks=[ partial(callback_results.append, 'first'), partial(callback_results.append, 'second') ])() # For even failed tasks, the done callbacks should get called. self.assertEqual(callback_results, ['first', 'second']) # Callbacks should continue to be called even after a related failure SuccessTask(self.transfer_coordinator, done_callbacks=[ partial(callback_results.append, 'third'), partial(callback_results.append, 'fourth') ])() self.assertEqual(callback_results, ['first', 'second', 'third', 'fourth']) def test_failure_cleanups_on_failure(self): callback_results = [] self.transfer_coordinator.add_failure_cleanup(callback_results.append, 'first') self.transfer_coordinator.add_failure_cleanup(callback_results.append, 'second') FailureTask(self.transfer_coordinator)() # The failure callbacks should have not been called yet because it # is not the last task self.assertEqual(callback_results, []) # Now the failure callbacks should get called. SuccessTask(self.transfer_coordinator, is_final=True)() self.assertEqual(callback_results, ['first', 'second']) def test_no_failure_cleanups_on_success(self): callback_results = [] self.transfer_coordinator.add_failure_cleanup(callback_results.append, 'first') self.transfer_coordinator.add_failure_cleanup(callback_results.append, 'second') SuccessTask(self.transfer_coordinator, is_final=True)() # The failure cleanups should not have been called because no task # failed for the transfer context. self.assertEqual(callback_results, []) def test_passing_main_kwargs(self): main_kwargs = {'foo': 'bar', 'baz': 'biz'} ReturnKwargsTask(self.transfer_coordinator, main_kwargs=main_kwargs, is_final=True)() # The kwargs should have been passed to the main() self.assertEqual(self.transfer_coordinator.result(), main_kwargs) def test_passing_pending_kwargs_single_futures(self): pending_kwargs = {} ref_main_kwargs = {'foo': 'bar', 'baz': 'biz'} # Pass some tasks to an executor with futures.ThreadPoolExecutor(1) as executor: pending_kwargs['foo'] = executor.submit( SuccessTask( self.transfer_coordinator, main_kwargs={'return_value': ref_main_kwargs['foo']})) pending_kwargs['baz'] = executor.submit( SuccessTask( self.transfer_coordinator, main_kwargs={'return_value': ref_main_kwargs['baz']})) # Create a task that depends on the tasks passed to the executor ReturnKwargsTask(self.transfer_coordinator, pending_main_kwargs=pending_kwargs, is_final=True)() # The result should have the pending keyword arg values flushed # out. self.assertEqual(self.transfer_coordinator.result(), ref_main_kwargs) def test_passing_pending_kwargs_list_of_futures(self): pending_kwargs = {} ref_main_kwargs = {'foo': ['first', 'second']} # Pass some tasks to an executor with futures.ThreadPoolExecutor(1) as executor: first_future = executor.submit( SuccessTask( self.transfer_coordinator, main_kwargs={'return_value': ref_main_kwargs['foo'][0]})) second_future = executor.submit( SuccessTask( self.transfer_coordinator, main_kwargs={'return_value': ref_main_kwargs['foo'][1]})) # Make the pending keyword arg value a list pending_kwargs['foo'] = [first_future, second_future] # Create a task that depends on the tasks passed to the executor ReturnKwargsTask(self.transfer_coordinator, pending_main_kwargs=pending_kwargs, is_final=True)() # The result should have the pending keyword arg values flushed # out in the expected order. self.assertEqual(self.transfer_coordinator.result(), ref_main_kwargs) def test_passing_pending_and_non_pending_kwargs(self): main_kwargs = {'nonpending_value': 'foo'} pending_kwargs = {} ref_main_kwargs = { 'nonpending_value': 'foo', 'pending_value': 'bar', 'pending_list': ['first', 'second'] } # Create the pending tasks with futures.ThreadPoolExecutor(1) as executor: pending_kwargs['pending_value'] = executor.submit( SuccessTask(self.transfer_coordinator, main_kwargs={ 'return_value': ref_main_kwargs['pending_value'] })) first_future = executor.submit( SuccessTask(self.transfer_coordinator, main_kwargs={ 'return_value': ref_main_kwargs['pending_list'][0] })) second_future = executor.submit( SuccessTask(self.transfer_coordinator, main_kwargs={ 'return_value': ref_main_kwargs['pending_list'][1] })) # Make the pending keyword arg value a list pending_kwargs['pending_list'] = [first_future, second_future] # Create a task that depends on the tasks passed to the executor # and just regular nonpending kwargs. ReturnKwargsTask(self.transfer_coordinator, main_kwargs=main_kwargs, pending_main_kwargs=pending_kwargs, is_final=True)() # The result should have all of the kwargs (both pending and # nonpending) self.assertEqual(self.transfer_coordinator.result(), ref_main_kwargs) def test_single_failed_pending_future(self): pending_kwargs = {} # Pass some tasks to an executor. Make one successful and the other # a failure. with futures.ThreadPoolExecutor(1) as executor: pending_kwargs['foo'] = executor.submit( SuccessTask(self.transfer_coordinator, main_kwargs={'return_value': 'bar'})) pending_kwargs['baz'] = executor.submit( FailureTask(self.transfer_coordinator)) # Create a task that depends on the tasks passed to the executor ReturnKwargsTask(self.transfer_coordinator, pending_main_kwargs=pending_kwargs, is_final=True)() # The end result should raise the exception from the initial # pending future value with self.assertRaises(TaskFailureException): self.transfer_coordinator.result() def test_single_failed_pending_future_in_list(self): pending_kwargs = {} # Pass some tasks to an executor. Make one successful and the other # a failure. with futures.ThreadPoolExecutor(1) as executor: first_future = executor.submit( SuccessTask(self.transfer_coordinator, main_kwargs={'return_value': 'bar'})) second_future = executor.submit( FailureTask(self.transfer_coordinator)) pending_kwargs['pending_list'] = [first_future, second_future] # Create a task that depends on the tasks passed to the executor ReturnKwargsTask(self.transfer_coordinator, pending_main_kwargs=pending_kwargs, is_final=True)() # The end result should raise the exception from the initial # pending future value in the list with self.assertRaises(TaskFailureException): self.transfer_coordinator.result()
def setUp(self): self.meta = TransferMeta() self.coordinator = TransferCoordinator() self.future = self._get_transfer_future()
def setUp(self): self.coordinator = TransferCoordinator() self.tag_semaphores = {} self.executor = self.get_executor()
def test_repr(self): transfer_coordinator = TransferCoordinator(transfer_id=1) self.assertEqual(repr(transfer_coordinator), 'TransferCoordinator(transfer_id=1)')
def test_transfer_id(self): transfer_coordinator = TransferCoordinator(transfer_id=1) self.assertEqual(transfer_coordinator.transfer_id, 1)
def setUp(self): super(BaseTaskTest, self).setUp() self.transfer_coordinator = TransferCoordinator()