def test_gets_correct_collection(self, mock_get_collection): """ make sure this operation uses the correct collection """ utils.reset_failure_count(self.schedule_id) mock_get_collection.assert_called_once_with()
def handle_succeeded_task(self, event): """ Celery event handler for succeeded tasks. This will check if we are watching the task for failure, and if so, ensure that the corresponding schedule's failure count either already was 0 when the task was queued or that it gets reset to 0. :param event: dictionary of poorly-documented data about a celery task. At a minimum, this method depends on the key 'uuid' being present and representing the task's ID. :type event: dict """ event_id = event['uuid'] schedule_id, has_failure = self.pop(event_id) if schedule_id: return_value = AsyncResult(event_id, app=app).result if isinstance(return_value, AsyncResult): msg = _('watching child event %(id)s for failure') % { 'id': return_value.id } _logger.debug(msg) self.add(return_value.id, schedule_id, has_failure) elif has_failure: _logger.info( _('resetting consecutive failure count for schedule %(id)s' ) % {'id': schedule_id}) utils.reset_failure_count(schedule_id)
def on_success(self, retval, task_id, args, kwargs): """ This overrides the success handler run by the worker when the task executes successfully. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. """ _logger.debug("Task successful : [%s]" % task_id) if kwargs.get('scheduled_call_id') is not None: if not isinstance(retval, AsyncResult): _logger.info( _('resetting consecutive failure count for schedule %(id)s' ) % {'id': kwargs['scheduled_call_id']}) utils.reset_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['finish_time'] = finish_time task_status['result'] = retval # Only set the state to finished if it's not already in a complete state. This is # important for when the task has been canceled, so we don't move the task from canceled # to finished. if task_status['state'] not in constants.CALL_COMPLETE_STATES: task_status['state'] = constants.CALL_FINISHED_STATE if isinstance(retval, TaskResult): task_status['result'] = retval.return_value if retval.error: task_status['error'] = retval.error.to_dict() if retval.spawned_tasks: task_list = [] for spawned_task in retval.spawned_tasks: if isinstance(spawned_task, AsyncResult): task_list.append(spawned_task.task_id) elif isinstance(spawned_task, dict): task_list.append(spawned_task['task_id']) task_status['spawned_tasks'] = task_list if isinstance(retval, AsyncResult): task_status['spawned_tasks'] = [ retval.task_id, ] task_status['result'] = None task_status.save() if config.get('profiling', 'enabled') is True: profile_directory = config.get('profiling', 'directory') self.pr.disable() self.pr.dump_stats("%s/%s" % (profile_directory, task_id)) common_utils.delete_working_directory()
def on_success(self, retval, task_id, args, kwargs): """ This overrides the success handler run by the worker when the task executes successfully. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. """ _logger.debug("Task successful : [%s]" % task_id) if kwargs.get('scheduled_call_id') is not None: if not isinstance(retval, AsyncResult): _logger.info(_('resetting consecutive failure count for schedule %(id)s') % {'id': kwargs['scheduled_call_id']}) utils.reset_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['finish_time'] = finish_time task_status['result'] = retval # Only set the state to finished if it's not already in a complete state. This is # important for when the task has been canceled, so we don't move the task from canceled # to finished. if task_status['state'] not in constants.CALL_COMPLETE_STATES: task_status['state'] = constants.CALL_FINISHED_STATE if isinstance(retval, TaskResult): task_status['result'] = retval.return_value if retval.error: task_status['error'] = retval.error.to_dict() if retval.spawned_tasks: task_list = [] for spawned_task in retval.spawned_tasks: if isinstance(spawned_task, AsyncResult): task_list.append(spawned_task.task_id) elif isinstance(spawned_task, dict): task_list.append(spawned_task['task_id']) task_status['spawned_tasks'] = task_list if isinstance(retval, AsyncResult): task_status['spawned_tasks'] = [retval.task_id, ] task_status['result'] = None task_status.save() if config.get('profiling', 'enabled') is True: profile_directory = config.get('profiling', 'directory') self.pr.disable() self.pr.dump_stats("%s/%s" % (profile_directory, task_id)) common_utils.delete_working_directory()
def test_reset(self, mock_get_collection): mock_update = mock_get_collection.return_value.update utils.reset_failure_count(self.schedule_id) self.assertEqual(mock_update.call_count, 1) self.assertEqual(len(mock_update.call_args[0]), 0) self.assertEqual(mock_update.call_args[1]['spec'], {'_id': ObjectId(self.schedule_id)}) self.assertEqual(mock_update.call_args[1]['document']['$set']['consecutive_failures'], 0) last_updated = mock_update.call_args[1]['document']['$set']['last_updated'] # make sure the last_updated value is within the last tenth of a second self.assertTrue(time.time() - last_updated < .1)
def test_reset(self, mock_get_collection): mock_update = mock_get_collection.return_value.update utils.reset_failure_count(self.schedule_id) self.assertEqual(mock_update.call_count, 1) self.assertEqual(len(mock_update.call_args[0]), 0) self.assertEqual(mock_update.call_args[1]["spec"], {"_id": ObjectId(self.schedule_id)}) self.assertEqual(mock_update.call_args[1]["document"]["$set"]["consecutive_failures"], 0) last_updated = mock_update.call_args[1]["document"]["$set"]["last_updated"] # make sure the last_updated value is within the last tenth of a second self.assertTrue(time.time() - last_updated < 0.1)
def handle_succeeded_task(self, event): """ Celery event handler for succeeded tasks. This will check if we are watching the task for failure, and if so, ensure that the corresponding schedule's failure count either already was 0 when the task was queued or that it gets reset to 0. :param event: dictionary of poorly-documented data about a celery task. At a minimum, this method depends on the key 'uuid' being present and representing the task's ID. :type event: dict """ event_id = event['uuid'] schedule_id, has_failure = self.pop(event_id) if schedule_id: return_value = AsyncResult(event_id, app=app).result if isinstance(return_value, AsyncResult): _logger.debug(_('watching child event %(id)s for failure') % {'id': return_value.id}) self.add(return_value.id, schedule_id, has_failure) elif has_failure: _logger.info(_('resetting consecutive failure count for schedule %(id)s') % {'id': schedule_id}) utils.reset_failure_count(schedule_id)