def test_show_running_executions(self): exe = Execution.start(self.session, self.project, 'foo') sleep(2) d = self.session.get_workflow_executions(self.project, 'foo') running = [e for e in d['executions'] if e['status'] == 'RUNNING'] eq_(len(running), 1) eq_(running[0]['execId'], exe.exec_id)
def test_show_executions(self): exe = Execution.start(self.session, self.project, 'foo') sleep(5) d = self.session.get_workflow_executions(self.project, 'foo') ok_(len(d['executions'])) executions = [e for e in d['executions'] if e['execId'] == exe.exec_id] eq_(len(executions), 1) eq_(executions[0]['status'], 'SUCCEEDED')
def _run_workflow(self, flow, **kwargs): with temppath() as path: self.project.build(path) self.session.upload_project(self.project, path) exe = Execution.start(self.session, self.project, flow, properties=kwargs) for i in range(5): # wait until workflow is launched sleep(1) try: status = exe.status except AzkabanError: pass else: if status['status'] != 'PREPARING': break return exe
def test_running(self): exe = Execution.start(self.session, self.project, 'foo') sleep(2) d = self.session.get_running_workflows(self.project, 'foo') eq_(d, {'execIds': [exe.exec_id]})
def test_execution_logs(self): exe = Execution.start(self.session, self.project, 'foo') sleep(1) logs = '\n'.join(exe.logs()) ok_('Submitting job \'foo\' to run.' in logs)
def test_execution_cancel(self): exe = Execution.start(self.session, self.project, 'foo') sleep(1) exe.cancel() sleep(1) eq_(exe.status['status'], 'KILLED')
def test_execution_start(self): exe = Execution.start(self.session, self.project, 'foo') sleep(2) eq_(exe.status['status'], 'RUNNING') sleep(4) eq_(exe.status['status'], 'SUCCEEDED')
def test_execution_cancel(self): exe = Execution.start(self.session, self.project, 'foo') sleep(1) exe.cancel() sleep(1) eq_(exe.status['status'],'KILLED')
def test_execution_start(self): exe = Execution.start(self.session, self.project, 'foo') sleep(2) eq_(exe.status['status'], 'RUNNING') sleep(4) eq_(exe.status['status'],'SUCCEEDED')