def test_abort_running(self, data_builder_mock, signaller_mock): client = mock.Mock() store = mock.Mock() data_builder = mock.Mock() data_builder_mock.return_value = data_builder schedule = WorkflowSchedule(workflow='some_workflow') failed_instance = WorkflowInstanceData('some_workflow', '123', status=Status.FAILURE) running_instance = WorkflowInstanceData('some_workflow', '12345', status=Status.RUNNING) data_builder.get_instances.return_value = [ failed_instance, running_instance ] signaller = mock.Mock() signaller_mock.return_value = signaller schedule.abort_running(client, store) signaller_mock.assert_called_once_with(client, workflow='some_workflow', instance='12345') signaller.set_action.assert_called_once_with(Signal.ABORT)
def test_workflow_data_from_instances_data4(self): wf_instance_list = [ WorkflowInstanceData('wf', '22346', Status.ABORTED, 12345, 12392), WorkflowInstanceData('wf', '22347', Status.SUCCESS, 12346, 12393), WorkflowInstanceData('wf', '22345', Status.FAILURE, 12391, sys.maxint), ] wf_data = DataBuilder._workflow_data_from_instances_data( wf_instance_list) self.assertEquals(wf_data.workflow, 'wf') self.assertEquals(wf_data.status, Status.SUCCESS) self.assertEquals(wf_data.last_instance, '22347') self.assertEquals(wf_data.last_start_time, 12346) self.assertEquals(wf_data.last_end_time, 12393) self.assertEquals(wf_data.running_instance_number, 0)
def test_workflow_data_from_instances_data2(self): wf_instance_list = [ WorkflowInstanceData('wf', '22346', Status.ABORTED, 12355, sys.maxint), WorkflowInstanceData('wf', '22347', Status.SUCCESS, 12365, 12390), WorkflowInstanceData('wf', '22345', Status.RUNNING, 12345, None), ] wf_data = DataBuilder._workflow_data_from_instances_data( wf_instance_list) self.assertEquals(wf_data.workflow, 'wf') self.assertEquals(wf_data.status, Status.RUNNING) self.assertEquals(wf_data.last_instance, '22345') self.assertEquals(wf_data.last_start_time, 12345) self.assertEquals(wf_data.last_end_time, None) self.assertEquals(wf_data.running_instance_number, 1)
def test_send_instance_end_message(self, smtplib_mock): emailer = Emailer('some_host', '8080') instance_data = WorkflowInstanceData(workflow='some_workflow', instance='123', status=Status.FAILURE, start_time=10, end_time=100) parent_job_data = JobData('some_workflow', '123', 'parent', 'ShellJob', False, 'some_command', ['workflow_input'], ['parent_output'], ['*****@*****.**'], 1, 2, 100, 200, 1, Status.SUCCESS, 10, 50) child_job_data = JobData('some_workflow', '123', 'child', 'ShellJob', False, 'some_command', ['parent_output'], [], ['*****@*****.**'], 1, 2, 100, 200, 2, Status.FAILURE, 50, 100) smtp = mock.Mock() smtplib_mock.SMTP.return_value = smtp sendmail = mock.Mock() smtp.sendmail = sendmail emailer.send_instance_end_message( ['*****@*****.**', '*****@*****.**'], instance_data, [child_job_data, parent_job_data]) self.assertEqual(PinballConfig.DEFAULT_EMAIL, sendmail.call_args[0][0]) self.assertEqual( ['*****@*****.**', '*****@*****.**'], sendmail.call_args[0][1]) msg = sendmail.call_args[0][2] self.assertTrue('FAILURE for workflow some_workflow' in msg) self.assertTrue('123' in msg) self.assertTrue('parent' in msg) self.assertTrue('child' in msg) self.assertTrue('SUCCESS' in msg) # Make sure that jobs are ordered on the end time. parent_index = msg.find('parent') child_index = msg.find('child') self.assertLess(parent_index, child_index)
def _instance_data_from_job_tokens(self, job_tokens): """Extract instance data from job tokens in that instance. Args: job_tokens: Job tokens that belong to a single workflow instance. Returns: Workflow data describing the workflow instance identified by the input job tokens. """ assert job_tokens start_time = time.time() end_time = 0 failed = False for job_token in job_tokens: job = pickle.loads(job_token.data) if job.history: first_execution_record = job.history[0] if (first_execution_record.start_time and first_execution_record.start_time < start_time): start_time = first_execution_record.start_time last_execution_record = job.history[-1] if not last_execution_record.end_time: end_time = sys.maxint else: if last_execution_record.end_time > end_time: end_time = last_execution_record.end_time if (not job.disabled and last_execution_record.exit_code != 0): failed = True if not job_tokens: is_active = False else: is_active = True job_name = job_tokens[0].name archived_tokens = self._store.read_archived_tokens( name_prefix=job_name) for token in archived_tokens: if token.name == job_name: is_active = False break name = Name.from_job_token_name(job_tokens[0].name) is_scheduled_for_archive = False abort_signal = None if is_active: archive_signal = self._get_signal(name.workflow, name.instance, Signal.ARCHIVE, True) is_scheduled_for_archive = (archive_signal and Signal.TIMESTAMP_ATTR in archive_signal.attributes) else: abort_signal = self._get_signal(name.workflow, name.instance, Signal.ABORT, False) if abort_signal: status = Status.ABORTED if end_time == 0: # This can happen only if all jobs have an empty history. timestamp = abort_signal.attributes.get(Signal.TIMESTAMP_ATTR) start_time = timestamp end_time = timestamp elif (end_time == 0 or end_time == sys.maxint or (is_active and not is_scheduled_for_archive)): status = Status.RUNNING end_time = None elif failed: status = Status.FAILURE else: status = Status.SUCCESS return WorkflowInstanceData(name.workflow, name.instance, status, start_time, end_time)