async def test_workflow_state_changes(tmp_path, active_before, active_after): """It correctly identifies workflow state changes from the filesystem.""" tmp_path /= str(random()) tmp_path.mkdir() # mock the results of the previous scan wfm = WorkflowsManager(None, LOG, context=None, run_dir=tmp_path) wid = Tokens(user=wfm.owner, workflow='a').id ret = ( {wid} if active_before == 'active' else set(), {wid} if active_before == 'inactive' else set(), ) wfm.get_workflows = lambda: ret # mock the filesystem in the new state if active_after == 'active': mk_flow(tmp_path, 'a', active=True) if active_after == 'inactive': mk_flow(tmp_path, 'a', active=False) # see what state changes the workflow manager detects changes = [] async for change in wfm._workflow_state_changes(): changes.append(change) # compare those changes to expectations if active_before == active_after: assert changes == [] else: assert len(changes) == 1 assert (wid, active_before, active_after) == changes[0][:3]
async def test_workflow_state_changes(tmp_path, active_before, active_after): """It correctly identifies workflow state changes from the filesystem.""" tmp_path /= str(random()) tmp_path.mkdir() # mock the results of the previous scan wfm = WorkflowsManager(None, LOG, context=None, run_dir=tmp_path) wid = f'{wfm.owner}{ID_DELIM}a' if active_before == 'active': wfm.active[wid] = {CFF.API: API, CFF.UUID: '42'} elif active_before == 'inactive': wfm.inactive.add(wid) # mock the filesystem in the new state if active_after == 'active': mk_flow(tmp_path, 'a', active=True) if active_after == 'inactive': mk_flow(tmp_path, 'a', active=False) # see what state changes the workflow manager detects changes = [] async for change in wfm._workflow_state_changes(): changes.append(change) # compare those changes to expectations if active_before == active_after: assert changes == [] else: assert len(changes) == 1 assert (wid, active_before, active_after) == changes[0][:3]
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.workflows_mgr = WorkflowsManager(self, log=self.log) self.data_store_mgr = DataStoreMgr(self.workflows_mgr, self.log) self.resolvers = Resolvers( self.data_store_mgr, log=self.log, workflows_mgr=self.workflows_mgr, )
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.executor = ProcessPoolExecutor(max_workers=self.max_workers) self.workflows_mgr = WorkflowsManager(self, log=self.log) self.data_store_mgr = DataStoreMgr(self.workflows_mgr, self.log) self.resolvers = Resolvers( self.data_store_mgr, log=self.log, executor=self.executor, workflows_mgr=self.workflows_mgr, )
async def test_workflow_state_change_restart(tmp_path): """It identifies workflows which have restarted between scans.""" # mock the result of the previous scan wfm = WorkflowsManager(None, LOG, context=None, run_dir=tmp_path) wid = f'{wfm.owner}{ID_DELIM}a' wfm.active[wid] = {CFF.API: API, CFF.UUID: '41'} # create a new workflow with the same name but a different UUID mk_flow(tmp_path, 'a', active=True) # see what state changes the workflow manager detects changes = [] async for change in wfm._workflow_state_changes(): changes.append(change) # the flow should be marked as becoming inactive then active again assert [change[:3] for change in changes] == [(wid, 'active', 'inactive'), (wid, 'inactive', 'active')] # it should have picked up the new uuid too assert changes[1][3][CFF.UUID] == '42'
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.workflows_mgr = WorkflowsManager(self, log=self.log) self.data_store_mgr = DataStoreMgr(self.workflows_mgr, self.log) self.resolvers = Resolvers( self.data_store_mgr, log=self.log, workflows_mgr=self.workflows_mgr, ) self.subscription_server = TornadoSubscriptionServer( schema, backend=CylcGraphQLBackend(), middleware=[IgnoreFieldMiddleware], ) ioloop.IOLoop.current().add_callback(self.workflows_mgr.update)
async def test_workflow_state_change_uuid(tmp_path, active_before, active_after): """It identifies discontinuities in workflow runs. A workflow run is defined by its UUID. If this changes the workflow manager must detect the change and re-register the workflow because any data known about the workflow is now out of date. This can happen because: * A workflow was deleted and installed between scans. * The user deleted the workflow database. """ # mock the result of the previous scan wfm = WorkflowsManager(None, LOG, context=None, run_dir=tmp_path) wid = Tokens(user=wfm.owner, workflow='a').id wfm.workflows[wid] = {CFF.API: API, CFF.UUID: '41'} if active_before: wfm.get_workflows = lambda: ({wid}, set()) else: wfm.get_workflows = lambda: (set(), {wid}) if active_after: # create a workflow with the same name but a different UUID # (simulates a new workflow run being started) mk_flow(tmp_path, 'a', active=True) else: # create a workflow without a database # (simulates the database being removed or workflow re-created) mk_flow(tmp_path, 'a', active=False, database=False) # see what state changes the workflow manager detects changes = [] async for change in wfm._workflow_state_changes(): changes.append(change) # the flow should be marked as becoming inactive then active again assert [change[:3] for change in changes] == [( wid, ('/active' if active_before else '/inactive'), ('active' if active_after else 'inactive'), )] # it should have picked up the new uuid too if active_after: assert changes[0][3][CFF.UUID] == '42'
def dummy_uis(): calls = [] async def async_capture(func, *_): nonlocal calls calls.append(func) def sync_capture(func, *_): nonlocal calls calls.append(func) data_store_mgr = SimpleNamespace( register_workflow=partial(async_capture, 'register'), unregister_workflow=partial(async_capture, 'unregister'), connect_workflow=partial(async_capture, 'connect'), disconnect_workflow=partial(sync_capture, 'disconnect'), calls=calls, ) uis = SimpleNamespace(data_store_mgr=data_store_mgr) wfm = WorkflowsManager(uis, logging.getLogger()) uis.workflows_mgr = wfm return uis
def workflows_manager() -> WorkflowsManager: return WorkflowsManager(None)