def test_traverse_get(self): test_dict = {"who": {"am": "i"}} val = traverse_get(test_dict, "who", "am",) self.assertEqual(val, "i") val = traverse_get(test_dict, "who.am",) self.assertEqual(val, "i") test_dict = {"who.am": {"i": "u"}} val = traverse_get(test_dict, '"who.am".i',) self.assertEqual(val, "u") val = traverse_get(test_dict, "'who.am'.i",) self.assertEqual(val, "u")
async def _multicomm_dataflow(self, config, request): # Seed the network with inputs given by caller # TODO(p0,security) allowlist of valid definitions to seed (set # Input.origin to something other than seed) inputs = [] # If data was sent add those inputs if request.method == "POST": # Accept a list of input data according to config.input_mode if config.input_mode == "default": # TODO validate that input data is dict of list of inputs each item # has definition and value properties for ctx, client_inputs in (await request.json()).items(): for input_data in client_inputs: if (not input_data["definition"] in config.dataflow.definitions): return web.json_response( { "error": f"Missing definition for {input_data['definition']} in dataflow" }, status=HTTPStatus.NOT_FOUND, ) inputs.append( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext(ctx), inputs=[ Input( value=input_data["value"], definition=config.dataflow.definitions[ input_data["definition"]], ) for input_data in client_inputs ] + ([ Input( value=request.headers, definition=config.dataflow.definitions[ config.forward_headers], ) ] if config.forward_headers else []), ))) elif ":" in config.input_mode: preprocess_mode, *input_def = config.input_mode.split(":") input_def = ":".join(input_def) if input_def not in config.dataflow.definitions: return web.json_response( { "error": f"Missing definition for {input_def} in dataflow" }, status=HTTPStatus.NOT_FOUND, ) if preprocess_mode == "json": value = await request.json() elif preprocess_mode == "text": value = await request.text() elif preprocess_mode == "bytes": value = await request.read() elif preprocess_mode == "stream": value = request.content else: return web.json_response( { "error": f"preprocess tag must be one of {self.IO_MODES}, got {preprocess_mode}" }, status=HTTPStatus.NOT_FOUND, ) inputs.append( MemoryInputSet( MemoryInputSetConfig( ctx=StringInputSetContext("post_input"), inputs=[ Input( value=value, definition=config.dataflow. definitions[input_def], ) ] + ([ Input( value=request.headers, definition=config.dataflow.definitions[ config.forward_headers], ) ] if config.forward_headers else []), ))) else: raise NotImplementedError( "Input modes other than default,preprocess:definition_name not yet implemented" ) # Run the operation in an orchestrator # TODO(dfass) Create the orchestrator on startup of the HTTP API itself async with MemoryOrchestrator() as orchestrator: # TODO(dfass) Create octx on dataflow registration async with orchestrator(config.dataflow) as octx: results = { str(ctx): result async for ctx, result in octx.run(*inputs) } if config.output_mode == "json": return web.json_response(results) # content_info is a List[str] ([content_type,output_keys]) # in case of stream,bytes and string in others postprocess_mode, *content_info = config.output_mode.split(":") if postprocess_mode == "stream": # stream:text/plain:get_single.beef raise NotImplementedError( "output mode not yet implemented") elif postprocess_mode == "bytes": content_type, output_keys = content_info output_data = traverse_get(results, output_keys) return web.Response(body=output_data) elif postprocess_mode == "text": output_data = traverse_get(results, content_info[0]) return web.Response(text=output_data) else: return web.json_response( {"error": f"output mode not valid"}, status=HTTPStatus.NOT_FOUND, )