def initialize(self): ''' First, configures PYTHONPATH so that the pipeline specification can be parsed. Then, loads pipeline and creates the design time graph. Finally, it prepares all for executing the pipeline by creating a runtime context and instantiating a graph traverser. ''' add_to_path([self.pkgRepository, self.pipelineDir]) self.pipeline = load_pipeline_from_file(self.path_to_script) # build the design time dataflow graph self.dataflow = build_graph(self.pipeline) # initialize the context self.data[CONTEXT] = context.create_context(self) # instantiate the traverser self.callbacks = NodeCallbacks(self.config, self.credentials, self.pkgRepository) self.traverser = Traverser(self.callbacks.schedule_refinement, self.callbacks.submit_task)
def setUp(self): self.config = _testconfig() _prepare_testpkgrepos(self.config.pkgRepository) self.node_callbacks = NodeCallbacks(self.config, self.config.credentials, self.config.pkgRepository) self.g = _create_graph(self.config.pkgRepository) execname = 'test_exec' self.task = ExecTask(execname, PackageSource("testpkg", "pkgdefs"), ("a", "b"), ("c", )) context = { CHECKSTATUS_TIMEOUT: self.config.drmConfig.statusCheckTimeout, CHECKSTATUS_TIME: self.config.drmConfig.statusCheckPollTime } context[WORKDIR] = os.path.join(self.config.wsConfig.workspaceRoot, "testrun") context[LOGDIR] = os.path.join(self.config.wsConfig.workspaceRoot, "testrun", "logs") self.inputs = { "a": "dummypath_a", "b": "dummypath_b", CONTEXT: context }