def setUp(self) -> None: super(TestSuiteRuntimeServer, self).setUp() self.scheduler.ws_data_mgr = DataStoreMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy(task_name=name, suite_config=self.suite_config, is_startup=True) warnings = self.task_pool.insert_tasks(items=[task_proxy.identity], stopcp=None, no_check=False) assert 0 == warnings self.task_pool.release_runahead_tasks() self.scheduler.ws_data_mgr.initiate_data_model() self.workflow_id = self.scheduler.ws_data_mgr.workflow_id create_auth_files(self.suite_name) # auth keys are required for comms barrier = Barrier(2, timeout=10) self.server = SuiteRuntimeServer(self.scheduler, context=SERVER_CONTEXT, threaded=True, barrier=barrier, daemon=True) self.server.public_priv = Priv.CONTROL self.server.start(*PORT_RANGE) # barrier.wait() doesn't seem to work properly here # so this workaround will do while barrier.n_waiting < 1: sleep(0.2) barrier.wait() sleep(0.5)
def setUp(self) -> None: super(TestWorkflowSubscriber, self).setUp() self.scheduler.ws_data_mgr = DataStoreMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy( task_name=name, suite_config=self.suite_config, is_startup=True ) warnings = self.task_pool.insert_tasks( items=[task_proxy.identity], stopcp=None, no_check=False ) assert warnings == 0 self.task_pool.release_runahead_tasks() self.scheduler.ws_data_mgr.initiate_data_model() self.workflow_id = self.scheduler.ws_data_mgr.workflow_id self.publisher = WorkflowPublisher( self.suite_name, threaded=False, daemon=True) self.publisher.start(*PORT_RANGE) self.subscriber = WorkflowSubscriber( self.suite_name, host=self.scheduler.host, port=self.publisher.port, topics=[b'workflow']) # delay to allow subscriber to connection, # otherwise it misses the first message sleep(1.0) self.topic = None self.data = None
def setUp(self) -> None: super(TestDataStoreMgr, self).setUp() self.ws_data_mgr = DataStoreMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy(task_name=name, suite_config=self.suite_config, is_startup=True) warnings = self.task_pool.insert_tasks(items=[task_proxy.identity], stopcp=None, no_check=False) assert 0 == warnings self.task_pool.release_runahead_tasks() self.data = self.ws_data_mgr.data[self.ws_data_mgr.workflow_id]
def setUp(self) -> None: super(TestSuiteRuntimeServer, self).setUp() self.scheduler.ws_data_mgr = WsDataMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy(task_name=name, suite_config=self.suite_config, is_startup=True) warnings = self.task_pool.insert_tasks(items=[task_proxy.identity], stopcp=None, no_check=False) assert 0 == warnings self.task_pool.release_runahead_tasks() self.scheduler.ws_data_mgr.initiate_data_model() self.workflow_id = self.scheduler.ws_data_mgr.workflow_id self.server = SuiteRuntimeServer(self.scheduler) self.server.public_priv = Priv.CONTROL
def setUp(self) -> None: super(TestWorkflowPublisher, self).setUp() self.scheduler.ws_data_mgr = DataStoreMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy(task_name=name, suite_config=self.suite_config, is_startup=True) warnings = self.task_pool.insert_tasks(items=[task_proxy.identity], stopcp=None, no_check=False) assert 0 == warnings self.task_pool.release_runahead_tasks() self.scheduler.ws_data_mgr.initiate_data_model() self.workflow_id = self.scheduler.ws_data_mgr.workflow_id self.publisher = WorkflowPublisher(self.suite_name, threaded=False, daemon=True) self.pub_data = self.scheduler.ws_data_mgr.get_publish_deltas()
def setUp(self) -> None: super(TestResolvers, self).setUp() self.scheduler.ws_data_mgr = WsDataMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy(task_name=name, suite_config=self.suite_config, is_startup=True) warnings = self.task_pool.insert_tasks(items=[task_proxy.identity], stopcp=None, no_check=False) assert 0 == warnings self.task_pool.release_runahead_tasks() self.scheduler.ws_data_mgr.initiate_data_model() self.workflow_id = self.scheduler.ws_data_mgr.workflow_id self.data = self.scheduler.ws_data_mgr.data[self.workflow_id] self.node_ids = [node.id for node in self.data[TASK_PROXIES].values()] self.edge_ids = [edge.id for edge in self.data[EDGES].values()] self.resolvers = Resolvers(self.scheduler.ws_data_mgr.data, schd=self.scheduler)
def setUp(self) -> None: super(TestSuiteRuntimeClient, self).setUp() self.scheduler.data_store_mgr = DataStoreMgr(self.scheduler) for name in self.scheduler.config.taskdefs: task_proxy = create_task_proxy( task_name=name, suite_config=self.suite_config, is_startup=True ) warnings = self.task_pool.insert_tasks( items=[task_proxy.identity], stopcp=None, check_point=True ) assert warnings == 0 self.task_pool.release_runahead_tasks() self.scheduler.data_store_mgr.initiate_data_model() self.workflow_id = self.scheduler.data_store_mgr.workflow_id create_auth_files(self.suite_name) # auth keys are required for comms barrier = Barrier(2, timeout=20) self.server = SuiteRuntimeServer( self.scheduler, context=SERVER_CONTEXT, threaded=True, barrier=barrier, daemon=True) port_range = glbl_cfg().get(['suite servers', 'run ports']) self.server.start(port_range[0], port_range[-1]) # barrier.wait() doesn't seem to work properly here # so this workaround will do while barrier.n_waiting < 1: sleep(0.2) barrier.wait() sleep(0.5) self.client = SuiteRuntimeClient( self.scheduler.suite, host=self.scheduler.host, port=self.server.port) sleep(0.5)