def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ _logger.debug("Task failed : [%s]" % task_id) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save()
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() common_utils.delete_working_directory()
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if kwargs.get('scheduled_call_id') is not None: utils.increment_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() self._handle_cProfile(task_id) common_utils.delete_working_directory()
def test_to_dict(self): test_exception = PulpException("foo_msg") test_exception.error_data = {"foo": "bar"} result = test_exception.to_dict() compare_dict(result, {'code': test_exception.error_code.code, 'description': str(test_exception), 'data': {"foo": "bar"}, 'sub_errors': []})
def test_serialize(self): async_result = AsyncResult('foo') test_exception = PulpException('foo') result = tasks.TaskResult('foo', test_exception, [{'task_id': 'baz'}, async_result, "qux"]) serialized = result.serialize() self.assertEquals(serialized.get('result'), 'foo') compare_dict(test_exception.to_dict(), serialized.get('error')) self.assertEquals(serialized.get('spawned_tasks'), [{'task_id': 'baz'}, {'task_id': 'foo'}, {'task_id': 'qux'}])
def test_to_dict_nested_pulp_exception(self): test_exception = PulpException("foo_msg") test_exception.error_data = {"foo": "bar"} test_exception.add_child_exception(PulpCodedException(error_codes.PLP0001)) result = test_exception.to_dict() child_exception = result['sub_errors'][0] compare_dict(child_exception, {'code': error_codes.PLP0001.code, 'description': error_codes.PLP0001.message, 'data': {}, 'sub_errors': []})
def _handle_on_failure_cleanup(self, task_id, exc, einfo): now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() self._handle_cProfile(task_id) common_utils.delete_working_directory()
def test_serialize(self): async_result = AsyncResult('foo') test_exception = PulpException('foo') task_status = TaskStatus(task_id='quux') result = tasks.TaskResult('foo', test_exception, [{'task_id': 'baz'}, async_result, "qux", task_status]) serialized = result.serialize() self.assertEquals(serialized.get('result'), 'foo') compare_dict(test_exception.to_dict(), serialized.get('error')) self.assertEquals(serialized.get('spawned_tasks'), [{'task_id': 'baz'}, {'task_id': 'foo'}, {'task_id': 'qux'}, {'task_id': 'quux'}])
def decode_admin_user(self, encoded_string): ''' Decodes the single line admin user identification produced by encode_admin_user into all of the parts that make up that identification. @param encoded_string: string representation of the user provided by encode_admin_user @type encoded_string: string @return: tuple of information describing the admin user; (username, id) @rtype: (string, string) ''' # Strip off the leading "admin:" prefix encoded_string = encoded_string[len(ADMIN_PREFIX):] # Find where to split parsed = encoded_string.split(ADMIN_SPLITTER) if len(parsed) != 2: raise PulpException('Invalid encoded admin user information [%s]' % encoded_string) username = parsed[0] id = parsed[1] return username, id
def test_spawned_task_status(self, mock_request): async_result = AsyncResult('foo-id') retval = tasks.TaskResult(error=PulpException('error-foo'), result='bar') retval.spawned_tasks = [async_result] task_id = str(uuid.uuid4()) args = [1, 'b', 'iii'] kwargs = { '1': 'for the money', 'tags': ['test_tags'], 'routing_key': WORKER_2 } mock_request.called_directly = False task_status = TaskStatus(task_id).save() self.assertEqual(task_status['state'], 'waiting') self.assertEqual(task_status['finish_time'], None) task = tasks.Task() task.on_success(retval, task_id, args, kwargs) new_task_status = TaskStatus.objects(task_id=task_id).first() self.assertEqual(new_task_status['state'], 'finished') self.assertEqual(new_task_status['result'], 'bar') self.assertEqual(new_task_status['error']['description'], 'error-foo') self.assertFalse(new_task_status['finish_time'] is None) # Make sure that parse_iso8601_datetime is able to parse the finish_time without errors dateutils.parse_iso8601_datetime(new_task_status['finish_time']) self.assertEqual(new_task_status['spawned_tasks'], ['foo-id'])
def test_distributor_update_with_agent_errors(self, mock_dist_manager, mock_bind_manager, mock_bind): generated_distributor = {'foo': 'bar'} mock_dist_manager.return_value.update_distributor_config.return_value = \ generated_distributor mock_bind_manager.return_value.find_by_distributor.return_value = [{ 'consumer_id': 'foo', 'repo_id': 'repo-foo', 'distributor_id': 'dist-id', 'notify_agent': True, 'binding_config': { 'conf': 'baz' } }] side_effect_exception = PulpException('foo') mock_bind.side_effect = side_effect_exception result = repository.distributor_update('foo-id', 'bar-id', {}, None) self.assertTrue(isinstance(result.error, PulpException)) self.assertEquals(result.error.error_code, error_codes.PLP0002) self.assertEquals(result.error.child_exceptions[0], side_effect_exception)
def test_distributor_delete_with_agent_errors(self, mock_dist_manager, mock_bind_manager, mock_unbind): mock_bind_manager.return_value.find_by_repo.return_value = [{ 'consumer_id': 'foo', 'repo_id': 'repo-foo', 'distributor_id': 'dist-id' }] mock_bind_manager.return_value.find_by_distributor.return_value = [{ 'consumer_id': 'foo', 'repo_id': 'repo-foo', 'distributor_id': 'dist-id' }] side_effect_exception = PulpException('foo') mock_unbind.side_effect = side_effect_exception result = repository.distributor_delete('foo-id', 'bar-id') mock_unbind.assert_called_once_with('foo', 'repo-foo', 'dist-id', ANY) self.assertTrue(isinstance(result.error, PulpException)) self.assertEquals(result.error.error_code, error_codes.PLP0003) self.assertEquals(result.error.child_exceptions[0], side_effect_exception)
def test_on_success_handler_spawned_task_status(self, mock_request): """ Make sure that overridden on_success handler updates task status correctly """ async_result = AsyncResult('foo-id') retval = tasks.TaskResult(error=PulpException('error-foo'), result='bar') retval.spawned_tasks = [async_result] task_id = str(uuid.uuid4()) args = [1, 'b', 'iii'] kwargs = { '1': 'for the money', 'tags': ['test_tags'], 'queue': WORKER_2_QUEUE } mock_request.called_directly = False task_status = TaskStatusManager.create_task_status( task_id, 'some_queue') self.assertEqual(task_status['state'], 'waiting') self.assertEqual(task_status['finish_time'], None) task = tasks.Task() task.on_success(retval, task_id, args, kwargs) new_task_status = TaskStatusManager.find_by_task_id(task_id) self.assertEqual(new_task_status['state'], 'finished') self.assertEqual(new_task_status['result'], 'bar') self.assertEqual(new_task_status['error']['description'], 'error-foo') self.assertFalse(new_task_status['finish_time'] is None) # Make sure that parse_iso8601_datetime is able to parse the finish_time without errors dateutils.parse_iso8601_datetime(new_task_status['finish_time']) self.assertEqual(new_task_status['spawned_tasks'], ['foo-id'])
def test_to_dict_nested_general_exception(self): test_exception = PulpException("foo_msg") test_exception.error_data = {"foo": "bar"} test_exception.add_child_exception(Exception("Foo Message")) test_exception.add_child_exception(Exception("Bar Message")) result = test_exception.to_dict() child_exception = result['sub_errors'][0] compare_dict(child_exception, {'code': error_codes.PLP0000.code, 'description': "Foo Message", 'data': {}, 'sub_errors': []}) child_exception = result['sub_errors'][1] compare_dict(child_exception, {'code': error_codes.PLP0000.code, 'description': "Bar Message", 'data': {}, 'sub_errors': []})
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ logger.debug("Task failed : [%s]" % task_id) if not self.request.called_directly: delta = {'state': dispatch_constants.CALL_ERROR_STATE, 'finish_time': dateutils.now_utc_timestamp(), 'traceback': einfo.traceback} if not isinstance(exc, PulpException): exc = PulpException(str(exc)) delta['error'] = exc.to_dict() TaskStatusManager.update_task_status(task_id=task_id, delta=delta)
def test_delete_with_bindings_errors(self, mock_repo_manager, mock_bind_manager, mock_unbind): mock_bind_manager.return_value.find_by_repo.return_value = [{ 'consumer_id': 'foo', 'repo_id': 'repo-foo', 'distributor_id': 'dist-id' }] side_effect_exception = PulpException('foo') mock_unbind.side_effect = side_effect_exception result = repository.delete('foo-repo') mock_unbind.assert_called_once_with('foo', 'repo-foo', 'dist-id', ANY) self.assertTrue(isinstance(result.error, PulpException)) self.assertEquals(result.error.error_code, error_codes.PLP0007) error_dict = result.error.to_dict() self.assertTrue("Error occurred while cascading delete of repository" in error_dict['description']) self.assertEquals(result.error.child_exceptions[0], side_effect_exception)