def test_delete_working_directory_existing(self, mock_pulp_config_get, mock_rmtree, mock_path_exists, mock_celery_current_task): mock_pulp_config_get.return_value = '/var/cache/pulp' mock_celery_current_task.request = mock.Mock(id='mock-task-id', hostname='mock-host') delete_working_directory() mock_pulp_config_get.assert_called_with('server', 'working_directory') mock_rmtree.assert_called_with('/var/cache/pulp/mock-host/mock-task-id')
def test_delete_working_directory_non_existing(self, mock_pulp_config_get, mock_rmtree, mock_path_exists, mock_celery_current_task): mock_pulp_config_get.return_value = '/var/cache/pulp' mock_celery_current_task.request = mock.Mock(id='mock-task-id', hostname='mock-host') delete_working_directory() mock_pulp_config_get.assert_called_with('server', 'working_directory') self.assertFalse(mock_rmtree.called, "Nothing should be removed.")
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() common_utils.delete_working_directory()
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if kwargs.get('scheduled_call_id') is not None: utils.increment_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() self._handle_cProfile(task_id) common_utils.delete_working_directory()
def on_success(self, retval, task_id, args, kwargs): """ This overrides the success handler run by the worker when the task executes successfully. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. """ _logger.debug("Task successful : [%s]" % task_id) if kwargs.get('scheduled_call_id') is not None: if not isinstance(retval, AsyncResult): _logger.info( _('resetting consecutive failure count for schedule %(id)s' ) % {'id': kwargs['scheduled_call_id']}) utils.reset_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['finish_time'] = finish_time task_status['result'] = retval # Only set the state to finished if it's not already in a complete state. This is # important for when the task has been canceled, so we don't move the task from canceled # to finished. if task_status['state'] not in constants.CALL_COMPLETE_STATES: task_status['state'] = constants.CALL_FINISHED_STATE if isinstance(retval, TaskResult): task_status['result'] = retval.return_value if retval.error: task_status['error'] = retval.error.to_dict() if retval.spawned_tasks: task_list = [] for spawned_task in retval.spawned_tasks: if isinstance(spawned_task, AsyncResult): task_list.append(spawned_task.task_id) elif isinstance(spawned_task, dict): task_list.append(spawned_task['task_id']) task_status['spawned_tasks'] = task_list if isinstance(retval, AsyncResult): task_status['spawned_tasks'] = [ retval.task_id, ] task_status['result'] = None task_status.save() if config.get('profiling', 'enabled') is True: profile_directory = config.get('profiling', 'directory') self.pr.disable() self.pr.dump_stats("%s/%s" % (profile_directory, task_id)) common_utils.delete_working_directory()
def on_success(self, retval, task_id, args, kwargs): """ This overrides the success handler run by the worker when the task executes successfully. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. """ _logger.debug("Task successful : [%s]" % task_id) if kwargs.get('scheduled_call_id') is not None: if not isinstance(retval, AsyncResult): _logger.info(_('resetting consecutive failure count for schedule %(id)s') % {'id': kwargs['scheduled_call_id']}) utils.reset_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['finish_time'] = finish_time task_status['result'] = retval # Only set the state to finished if it's not already in a complete state. This is # important for when the task has been canceled, so we don't move the task from canceled # to finished. if task_status['state'] not in constants.CALL_COMPLETE_STATES: task_status['state'] = constants.CALL_FINISHED_STATE if isinstance(retval, TaskResult): task_status['result'] = retval.return_value if retval.error: task_status['error'] = retval.error.to_dict() if retval.spawned_tasks: task_list = [] for spawned_task in retval.spawned_tasks: if isinstance(spawned_task, AsyncResult): task_list.append(spawned_task.task_id) elif isinstance(spawned_task, dict): task_list.append(spawned_task['task_id']) task_status['spawned_tasks'] = task_list if isinstance(retval, AsyncResult): task_status['spawned_tasks'] = [retval.task_id, ] task_status['result'] = None task_status.save() if config.get('profiling', 'enabled') is True: profile_directory = config.get('profiling', 'directory') self.pr.disable() self.pr.dump_stats("%s/%s" % (profile_directory, task_id)) common_utils.delete_working_directory()
def _handle_on_failure_cleanup(self, task_id, exc, einfo): now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() self._handle_cProfile(task_id) common_utils.delete_working_directory()