def setUp(self): logger.addHandler(YabiDBHandler()) create_workflow_with_job_and_a_task(self) self.wfl_logger = create_workflow_logger(logger, self.workflow.pk) self.job_logger = create_job_logger(logger, self.job.pk) self.task_logger = create_task_logger(logger, self.task.pk) self.other_wfl_logger = create_workflow_logger(logger, self.workflow.pk + 1) self.other_job_logger = create_job_logger(logger, self.job.pk + 1) self.other_task_logger = create_task_logger(logger, self.task.pk + 1)
def mark_job_as_error(job_id): job = Job.objects.get(pk=job_id) wfl_logger = create_workflow_logger(logger, job.workflow.pk) job.status = STATUS_ERROR job.save() job.workflow.update_status() wfl_logger.info("Workflow {0} encountered an error.".format(job.workflow.pk))
def create_jobs(self): wfl_logger = create_workflow_logger(logger, self.pk) logger.debug('----- Creating jobs for workflow id %d -----' % self.pk) try: with transaction.atomic(): workflow_dict = json.loads(self.original_json) self.stageout = self._determine_stageout_dir(workflow_dict) self.save() for i, job_dict in enumerate(workflow_dict["jobs"]): job = EngineJob(workflow=self, order=i, start_time=datetime.datetime.now()) job.add_job(job_dict) wfl_logger.info("Created %d jobs for workflow %d", i, self.pk) self.status = const.STATUS_READY self.save() except Exception: wfl_logger.exception( "Exception during creating jobs for workflow {0}".format( self.pk)) self.status = const.STATUS_ERROR self.save() raise
def create_jobs(self): wfl_logger = create_workflow_logger(logger, self.pk) logger.debug('----- Creating jobs for workflow id %d -----' % self.pk) try: with transaction.atomic(): workflow_dict = json.loads(self.original_json) self.stageout = self._determine_stageout_dir(workflow_dict) self.save() for i, job_dict in enumerate(workflow_dict["jobs"]): job = EngineJob(workflow=self, order=i, start_time=datetime.datetime.now()) job.add_job(job_dict) wfl_logger.info("Created %d jobs for workflow %d", i, self.pk) self.status = const.STATUS_READY self.save() except Exception: wfl_logger.exception("Exception during creating jobs for workflow {0}".format(self.pk)) self.status = const.STATUS_ERROR self.save() raise
def mark_job_as_error(job_id): job = Job.objects.get(pk=job_id) wfl_logger = create_workflow_logger(logger, job.workflow.pk) job.status = STATUS_ERROR job.save() job.workflow.update_status() wfl_logger.info("Workflow {0} encountered an error.".format( job.workflow.pk))
def test_logging_with_workflow_logger(self): logger = logging.getLogger('yabi.backend.celerytasks') wfl_logger = create_workflow_logger(logger, WORKFLOW_ID) wfl_logger.debug(MSG) self.assertEquals(1, Syslog.objects.filter(message=MSG).count(), "Message should been syslogged") syslog = Syslog.objects.get(message=MSG) self.assertEquals('workflow', syslog.table_name) self.assertEquals(WORKFLOW_ID, syslog.table_id)
def abort_workflow(workflow_id): wfl_logger = create_workflow_logger(logger, workflow_id) workflow = EngineWorkflow.objects.get(pk=workflow_id) if workflow.status == STATUS_ABORTED: return not_aborted_tasks = EngineTask.objects.filter(job__workflow__id=workflow.pk).exclude(job__status=STATUS_ABORTED) running_tasks = list(filter(lambda x: x.status == STATUS_EXEC, not_aborted_tasks)) wfl_logger.info("Found %s running tasks", len(running_tasks)) for task in running_tasks: abort_task.apply_async((task.pk,))
def test_exception_info_is_logged(self): logger = logging.getLogger('yabi') wfl_logger = create_workflow_logger(logger, WORKFLOW_ID) try: raise MyVerySpecificException("my error message") except MyVerySpecificException: wfl_logger.exception("Exception caught") syslog = Syslog.objects.get(table_name='workflow', table_id=WORKFLOW_ID, message__startswith="Exception caught") self.assertTrue('MyVerySpecificException' in syslog.message, "Information about the exception should be logged") self.assertTrue('my error message' in syslog.message, "The excpetions value should be logged")
def abort_workflow(workflow_id): wfl_logger = create_workflow_logger(logger, workflow_id) workflow = EngineWorkflow.objects.get(pk=workflow_id) if workflow.status == STATUS_ABORTED: return not_aborted_tasks = EngineTask.objects.filter( job__workflow__id=workflow.pk).exclude(job__status=STATUS_ABORTED) running_tasks = list( filter(lambda x: x.status == STATUS_EXEC, not_aborted_tasks)) wfl_logger.info("Found %s running tasks", len(running_tasks)) for task in running_tasks: abort_task.apply_async((task.pk, ))