async def test_dataflow_usage_example(self): # Write out shouldi dataflow orig = self.mktempfile() + ".json" pathlib.Path(orig).write_text(json.dumps(self.DATAFLOW.export())) # Import from feature/git transform_to_repo = Operation.load("dffml.mapping.create") lines_of_code_by_language, lines_of_code_to_comments = list( load( "dffml_feature_git.feature.operations:lines_of_code_by_language", "dffml_feature_git.feature.operations:lines_of_code_to_comments", relative=relative_path("..", "..", "feature", "git"), )) # Create new dataflow override = DataFlow.auto( transform_to_repo, lines_of_code_by_language, lines_of_code_to_comments, ) # TODO Modify and compare against yaml in docs example # Write out override dataflow created = self.mktempfile() + ".json" pathlib.Path(created).write_text(json.dumps(override.export())) # Merge the two with contextlib.redirect_stdout(self.stdout): await CLI.cli("dataflow", "merge", orig, created) DataFlow._fromdict(**json.loads(self.stdout.getvalue()))
async def test_run(self): self.required_plugins("dffml-config-yaml", "dffml-model-scratch") # Load get_single and model_predict get_single = Operation.load("get_single") model_predict = list(load("dffml.operation.model:model_predict"))[0] # Create new dataflow from operations dataflow = DataFlow.auto(get_single, model_predict) # Add the seed inputs dataflow.seed.append( Input( value=[ definition.name for definition in model_predict.op.outputs.values() ], definition=get_single.inputs["spec"], )) # Write out the dataflow dataflow_yaml = pathlib.Path(self.mktempfile() + ".yaml") async with BaseConfigLoader.load("yaml").withconfig( {}) as configloader: async with configloader() as loader: dataflow_yaml.write_bytes(await loader.dumpb( dataflow.export(linked=True))) # TODO Figure out how nested model config options will work # print(dataflow_yaml.read_text()) return
async def setUp(self): await super().setUp() # Use shouldi's dataflow for tests self.DATAFLOW = list(load("shouldi.cli:DATAFLOW"))[0]