async def test_run(self): passwords = [str(random.random()) for _ in range(0, 20)] # Orchestrate the running of these operations async with MemoryOrchestrator.basic_config(*OPIMPS) as orchestrator: definitions = Operation.definitions(*OPERATIONS) passwords = [ Input(value=password, definition=definitions['UnhashedPassword'], parents=None) for password in passwords ] output_spec = Input(value=['ScryptPassword'], definition=definitions['get_single_spec'], parents=None) async with orchestrator() as octx: # Add our inputs to the input network with the context being the URL for password in passwords: await octx.ictx.add( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(password.value), inputs=[password, output_spec]))) try: async for _ctx, results in octx.run_operations( strict=True): self.assertTrue(results) except AttributeError as error: if "module 'hashlib' has no attribute 'scrypt'" \ in str(error): return raise
async def multicomm_dataflow(self, config, request): # Seed the network with inputs given by caller # TODO(p0,security) allowlist of valid definitions to seed (set # Input.origin to something other than seed) inputs = [] # If data was sent add those inputs if request.method == "POST": # Accept a list of input data # TODO validate that input data is dict of list of inputs each item # has definition and value properties for ctx, client_inputs in (await request.json()).items(): for input_data in client_inputs: if ( not input_data["definition"] in config.dataflow.definitions ): return web.json_response( { "error": f"Missing definition for {input_data['definition']} in dataflow" }, status=HTTPStatus.NOT_FOUND, ) inputs.append( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(ctx), inputs=[ Input( value=input_data["value"], definition=config.dataflow.definitions[ input_data["definition"] ], ) for input_data in client_inputs ], ) ) ) # Run the operation in an orchestrator # TODO(dfass) Create the orchestrator on startup of the HTTP API itself async with MemoryOrchestrator.basic_config() as orchestrator: # TODO(dfass) Create octx on dataflow registration async with orchestrator(config.dataflow) as octx: results = { str(ctx): result async for ctx, result in octx.run(*inputs) } # TODO Implement input and presentation stages? """ if config.presentation == "blob": return web.Response(body=results) elif config.presentation == "text": return web.Response(text=results) else: """ return web.json_response(results)
async def test_run(self): calc_strings_check = {"add 40 and 2": 42, "multiply 42 and 10": 420} # TODO(p0) Implement and test asyncgenerator callstyles_no_expand = [ "asyncgenerator", "dict", "dict_custom_input_set_context", ] callstyles = { "dict": { to_calc: [ Input(value=to_calc, definition=parse_line.op.inputs["line"]), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ] for to_calc in calc_strings_check.keys() }, "dict_custom_input_set_context": { CustomInputSetContext(to_calc): [ Input(value=to_calc, definition=parse_line.op.inputs["line"]), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ] for to_calc in calc_strings_check.keys() }, "list_input_sets": [ MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(to_calc), inputs=[ Input( value=to_calc, definition=parse_line.op.inputs["line"], ), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ], )) for to_calc in calc_strings_check.keys() ], "uctx": [[ Input(value=to_calc, definition=parse_line.op.inputs["line"]), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ] for to_calc in calc_strings_check.keys()], } async with self.create_octx() as octx: for callstyle, inputs in callstyles.items(): with self.subTest(callstyle=callstyle): if callstyle in callstyles_no_expand: run_coro = self.run_dataflow(octx, inputs) else: run_coro = self.run_dataflow(octx, *inputs) async for ctx, results in run_coro: ctx_str = (await ctx.handle()).as_string() if callstyle == "uctx": self.assertIn( results[add.op.outputs["sum"].name], dict( zip( calc_strings_check.values(), calc_strings_check.keys(), )), ) else: if callstyle == "dict_custom_input_set_context": self.assertTrue( isinstance(ctx, CustomInputSetContext)) self.assertEqual( calc_strings_check[ctx_str], results[add.op.outputs["sum"].name], )
async def _multicomm_dataflow(self, config, request): # Seed the network with inputs given by caller # TODO(p0,security) allowlist of valid definitions to seed (set # Input.origin to something other than seed) inputs = [] # If data was sent add those inputs if request.method == "POST": # Accept a list of input data according to config.input_mode if config.input_mode == "default": # TODO validate that input data is dict of list of inputs each item # has definition and value properties for ctx, client_inputs in (await request.json()).items(): for input_data in client_inputs: if (not input_data["definition"] in config.dataflow.definitions): return web.json_response( { "error": f"Missing definition for {input_data['definition']} in dataflow" }, status=HTTPStatus.NOT_FOUND, ) inputs.append( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(ctx), inputs=[ Input( value=input_data["value"], definition=config.dataflow.definitions[ input_data["definition"]], ) for input_data in client_inputs ] + ([ Input( value=request.headers, definition=config.dataflow.definitions[ config.forward_headers], ) ] if config.forward_headers else []), ))) elif ":" in config.input_mode: preprocess_mode, *input_def = config.input_mode.split(":") input_def = ":".join(input_def) if input_def not in config.dataflow.definitions: return web.json_response( { "error": f"Missing definition for {input_def} in dataflow" }, status=HTTPStatus.NOT_FOUND, ) if preprocess_mode == "json": value = await request.json() elif preprocess_mode == "text": value = await request.text() elif preprocess_mode == "bytes": value = await request.read() elif preprocess_mode == "stream": value = request.content else: return web.json_response( { "error": f"preprocess tag must be one of {self.IO_MODES}, got {preprocess_mode}" }, status=HTTPStatus.NOT_FOUND, ) inputs.append( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext("post_input"), inputs=[ Input( value=value, definition=config.dataflow. definitions[input_def], ) ] + ([ Input( value=request.headers, definition=config.dataflow.definitions[ config.forward_headers], ) ] if config.forward_headers else []), ))) else: raise NotImplementedError( "Input modes other than default,preprocess:definition_name not yet implemented" ) # Run the operation in an orchestrator # TODO(dfass) Create the orchestrator on startup of the HTTP API itself async with MemoryOrchestrator() as orchestrator: # TODO(dfass) Create octx on dataflow registration async with orchestrator(config.dataflow) as octx: results = { str(ctx): result async for ctx, result in octx.run(*inputs) } if config.output_mode == "json": return web.json_response(results) # content_info is a List[str] ([content_type,output_keys]) # in case of stream,bytes and string in others postprocess_mode, *content_info = config.output_mode.split(":") if postprocess_mode == "stream": # stream:text/plain:get_single.beef raise NotImplementedError( "output mode not yet implemented") elif postprocess_mode == "bytes": content_type, output_keys = content_info output_data = traverse_get(results, output_keys) return web.Response(body=output_data) elif postprocess_mode == "text": output_data = traverse_get(results, content_info[0]) return web.Response(text=output_data) else: return web.json_response( {"error": f"output mode not valid"}, status=HTTPStatus.NOT_FOUND, )
async def test_run(self): calc_strings_check = {"add 40 and 2": 42, "multiply 42 and 10": 420} dataflow = DataFlow.auto(*OPIMPS) # TODO(p0) Implement and test asyncgenerator callstyles_no_expand = ["asyncgenerator", "dict"] callstyles = { "dict": { to_calc: [ Input(value=to_calc, definition=parse_line.op.inputs["line"]), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ] for to_calc in calc_strings_check.keys() }, "list_input_sets": [ MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(to_calc), inputs=[ Input( value=to_calc, definition=parse_line.op.inputs["line"], ), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ], )) for to_calc in calc_strings_check.keys() ], "uctx": [[ Input(value=to_calc, definition=parse_line.op.inputs["line"]), Input( value=[add.op.outputs["sum"].name], definition=GetSingle.op.inputs["spec"], ), ] for to_calc in calc_strings_check.keys()], } async with MemoryOrchestrator.withconfig({}) as orchestrator: async with orchestrator(dataflow) as octx: for callstyle, inputs in callstyles.items(): with self.subTest(callstyle=callstyle): if callstyle in callstyles_no_expand: run_coro = octx.run(inputs) else: run_coro = octx.run(*inputs) async for ctx, results in run_coro: ctx_str = (await ctx.handle()).as_string() if callstyle == "uctx": self.assertIn( results[add.op.outputs["sum"].name], dict( zip( calc_strings_check.values(), calc_strings_check.keys(), )), ) else: self.assertEqual( calc_strings_check[ctx_str], results[add.op.outputs["sum"].name], )