def test_no_input_ports_to_json(tmpdir): """Test the edge case of exporting a model to JSON when it has a node with no input ports""" mod = Model(id="ABCD") mod_graph = Graph(id="abcd_example") mod.graphs.append(mod_graph) input_node = Node(id="input0") input_node.parameters.append(Parameter(id="input_level", value=10.0)) op1 = OutputPort(id="out_port") op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node) tmpfile = f"{tmpdir}/test.json" mod.to_json_file(tmpfile) # FIXME: Doesn't seem like we have any methods for deserialization. Just do some quick and dirty checks # This should really be something like assert mod_graph == deserialized_mod_graph import json with open(tmpfile) as f: data = json.load(f) print(data) assert (data["ABCD"]["graphs"]["abcd_example"]["nodes"]["input0"] ["parameters"]["input_level"]["value"] == 10.0) mod_graph2 = load_mdf(tmpfile) print(mod_graph2) assert mod_graph2.graphs[0].nodes[0].parameters[0].value == 10.0
def test_graph_inputs(): r"""Test whether we can retrieve graph input node\ports via the inputs property.""" mod = Model(id="ABCD") mod_graph = Graph(id="abcd_example") mod.graphs.append(mod_graph) input_node = Node(id="input0") input_node.parameters.append(Parameter(id="input_level", value=10.0)) op1 = OutputPort(id="out_port") op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node)
def simple_model_mdf(): """ A simple MDF model with two nodes. Input node has an input port with no receiving edge but it is not used because the output port uses a parameter instead. """ mod = Model(id="Simple") mod_graph = Graph(id="simple_example") mod.graphs.append(mod_graph) input_node = Node(id="input_node") input_node.parameters.append(Parameter(id="input_level", value=0.5)) op1 = OutputPort(id="out_port") op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node) processing_node = Node(id="processing_node") mod_graph.nodes.append(processing_node) processing_node.parameters.append(Parameter(id="lin_slope", value=0.5)) processing_node.parameters.append(Parameter(id="lin_intercept", value=0)) processing_node.parameters.append(Parameter(id="log_gain", value=3)) ip1 = InputPort(id="input_port1") processing_node.input_ports.append(ip1) f1 = Parameter( id="linear_1", function="linear", args={"variable0": ip1.id, "slope": "lin_slope", "intercept": "lin_intercept"}, ) f2 = Parameter( id="logistic_1", function="logistic", args={"variable0": f1.id, "gain": "log_gain", "bias": 0, "offset": 0}, ) processing_node.parameters.append(f1) processing_node.parameters.append(f2) processing_node.output_ports.append(OutputPort(id="output_1", value="logistic_1")) e1 = Edge( id="input_edge", parameters={"weight": 0.55}, sender=input_node.id, sender_port=op1.id, receiver=processing_node.id, receiver_port=ip1.id, ) mod_graph.edges.append(e1) return mod
def test_include_metadata_to_json(tmpdir): """ Test for serialization """ mod = Model(id="ABCD", metadata={"info": "model_test"}) mod_graph = Graph( id="abcd_example", metadata={"info": { "graph_test": { "environment_x": "xyz" } }}) mod.graphs.append(mod_graph) input_node = Node(id="input0", metadata={"color": ".8 0 .8"}) input_node.parameters.append(Parameter(id="input_level", value=10.0)) op1 = OutputPort(id="out_port", metadata={"info": "value at OutputPort"}) op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node) tmpfile = f"{tmpdir}/test.json" mod_graph.to_json_file(tmpfile) # FIXME: Doesn't seem like we have any methods for deserialization. Just do some quick and dirty checks # This should really be something like assert mod_graph == deserialized_mod_graph import json with open(tmpfile) as f: data = json.load(f) assert data["abcd_example"]["metadata"] == { "info": { "graph_test": { "environment_x": "xyz" } } } assert data["abcd_example"]["nodes"]["input0"]["metadata"] == { "color": ".8 0 .8" } assert data["abcd_example"]["nodes"]["input0"]["output_ports"]["out_port"][ "metadata"] == { "info": "value at OutputPort" }
def main(): mod = Model(id="Simple") mod_graph = Graph(id="simple_example") mod.graphs.append(mod_graph) input_node = Node(id="input_node") input_node.parameters.append(Parameter(id="input_level", value=0.5)) op1 = OutputPort(id="out_port") op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node) processing_node = Node(id="processing_node") mod_graph.nodes.append(processing_node) processing_node.parameters.append(Parameter(id="lin_slope", value=0.5)) processing_node.parameters.append(Parameter(id="lin_intercept", value=0)) processing_node.parameters.append(Parameter(id="log_gain", value=3)) ip1 = InputPort(id="input_port1") processing_node.input_ports.append(ip1) f1 = Parameter( id="linear_1", function="linear", args={"variable0": ip1.id, "slope": "lin_slope", "intercept": "lin_intercept"}, ) f2 = Parameter( id="logistic_1", function="logistic", args={"variable0": f1.id, "gain": "log_gain", "bias": 0, "offset": 0}, ) processing_node.parameters.append(f1) processing_node.parameters.append(f2) processing_node.output_ports.append(OutputPort(id="output_1", value="logistic_1")) e1 = Edge( id="input_edge", parameters={"weight": 0.55}, sender=input_node.id, sender_port=op1.id, receiver=processing_node.id, receiver_port=ip1.id, ) mod_graph.edges.append(e1) print(mod) print("------------------") print(mod.to_json()) new_file = mod.to_json_file("%s.json" % mod.id) new_file = mod.to_yaml_file("%s.yaml" % mod.id) if "-run" in sys.argv: verbose = True # verbose = False from modeci_mdf.execution_engine import EvaluableGraph from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY eg = EvaluableGraph(mod_graph, verbose=verbose) eg.evaluate(array_format=format) if "-graph" in sys.argv: mod.to_graph_image( engine="dot", output_format="png", view_on_render=False, level=1, filename_root="simple", only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions ) mod.to_graph_image( engine="dot", output_format="png", view_on_render=False, level=3, filename_root="simple_3", only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions )
def main(): mod = Model(id="abc_conditions") mod_graph = Graph(id="abc_conditions_example") mod.graphs.append(mod_graph) input_node = Node(id="input0") input_node.parameters.append(Parameter(id="input_level", value=0.0)) input_node.parameters.append(Parameter(id="count_0", value="count_0 + 1")) op1 = OutputPort(id="out_port") op1.value = "input_level" input_node.output_ports.append(op1) mod_graph.nodes.append(input_node) def create_simple_node(graph, id_, sender=None): n = Node(id=id_) graph.nodes.append(n) ip1 = InputPort(id="input_port1", shape="(1,)") n.input_ports.append(ip1) n.output_ports.append(OutputPort(id="output_1", value=ip1.id)) if sender is not None: simple_connect(sender, n, graph) return n a = create_simple_node(mod_graph, "A", input_node) a.parameters.append(Parameter(id="count_A", value="count_A + 1")) b = create_simple_node(mod_graph, "B", a) b.parameters.append(Parameter(id="count_B", value="count_B + 1")) c = create_simple_node(mod_graph, "C", a) c.parameters.append(Parameter(id="count_C", value="count_C+ 1")) cond_i = Condition(type="BeforeNCalls", dependencies=input_node.id, n=1) cond_a = Condition(type="Always") cond_b = Condition(type="EveryNCalls", dependencies=a.id, n=2) cond_c = Condition(type="EveryNCalls", dependencies=a.id, n=3) cond_term = Condition( type="And", dependencies=[ Condition(type="AfterNCalls", dependencies=c.id, n=2), Condition(type="JustRan", dependencies=a.id), ], ) mod_graph.conditions = ConditionSet( node_specific={ input_node.id: cond_i, a.id: cond_a, b.id: cond_b, c.id: cond_c }, termination={"environment_state_update": cond_term}, ) mod.to_json_file( os.path.join(os.path.dirname(__file__), "%s.json" % mod.id)) mod.to_yaml_file( os.path.join(os.path.dirname(__file__), "%s.yaml" % mod.id)) print_summary(mod_graph) import sys if "-run" in sys.argv: verbose = True # verbose = False from modeci_mdf.execution_engine import EvaluableGraph from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY eg = EvaluableGraph(mod_graph, verbose=verbose) eg.evaluate(array_format=format) if "-graph" in sys.argv: mod.to_graph_image( engine="dot", output_format="png", view_on_render=False, level=3, filename_root="abc_conditions", only_warn_on_fail= True, # Makes sure test of this doesn't fail on Windows on GitHub Actions )