Ejemplo n.º 1
0
def main():

    file_path = "Simple.json"
    data = convert_states_to_stateful_parameters("../" + file_path)
    # print(data)
    with open("Translated_" + file_path, "w") as fp:
        json.dump(data, fp, indent=4)

    if "-run" in sys.argv:

        verbose = True

        mod_graph = load_mdf("Translated_%s" % file_path).graphs[0]
        eg = EvaluableGraph(mod_graph, verbose)

        mod_graph_old = load_mdf("../" + file_path).graphs[0]
        eg_old = EvaluableGraph(mod_graph_old, verbose)

        format = FORMAT_NUMPY

        eg.evaluate(array_format=format)

        eg_old.evaluate(array_format=format)

        print(
            "New file output value>>>",
            eg.enodes["processing_node"].evaluable_outputs["output_1"].
            curr_value,
        )

        print(
            "Old file output value>>>",
            eg_old.enodes["processing_node"].evaluable_outputs["output_1"].
            curr_value,
        )
Ejemplo n.º 2
0
def test_abc():
    base_path = Path(__file__).parent

    filename = "examples/ONNX/abc_basic-mdf.json"
    file_path = (base_path / "../../.." / filename).resolve()

    # Load the MDF model
    mdf_model = load_mdf(str(file_path))

    # Test input
    test_input = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float32)

    # Get the result of MDF execution
    mdf_executable = EvaluableGraph(mdf_model.graphs[0], verbose=False)
    mdf_executable.evaluate(initializer={"input": test_input})
    mdf_output = mdf_executable.enodes["Cos_2"].evaluable_outputs[
        "_3"].curr_value

    # Get the translated ONNX model
    onnx_models = mdf_to_onnx(mdf_model)

    # Bluffing onnx that our model is 13 when it is actually 15. This is needed for older onnxruntime
    # installations to run this model. See https://github.com/onnx/onnx/issues/3205
    onnx_models[0].opset_import[0].version = 13

    # Get the result of running the ONNX model
    session = backend.prepare(onnx_models[0])
    onnx_output = session.run(
        test_input)  # run returns a list with the actual result and type
    onnx_res_output = np.array(onnx_output[0])

    assert np.array_equal(onnx_res_output, mdf_output)
Ejemplo n.º 3
0
def test_inception(inception_model_pytorch):
    """Test the InceptionBlocks model that WebGME folks provided us."""

    galaxy_images_output = torch.zeros((1, 5, 64, 64))
    ebv_output = torch.zeros((1,))
    # Run the model once to get some ground truth outpot (from PyTorch)
    output = inception_model_pytorch(galaxy_images_output, ebv_output).detach().numpy()

    # Convert to MDF
    mdf_model, params_dict = pytorch_to_mdf(
        model=inception_model_pytorch,
        args=(galaxy_images_output, ebv_output),
        example_outputs=output,
        trace=True,
    )

    # Get the graph
    mdf_graph = mdf_model.graphs[0]

    # Add inputs to the parameters dict so we can feed this to the EvaluableGraph for initialization of all
    # graph inputs.
    params_dict["input1"] = galaxy_images_output.numpy()
    params_dict["input2"] = ebv_output.numpy()

    eg = EvaluableGraph(graph=mdf_graph, verbose=False)

    eg.evaluate(initializer=params_dict)

    assert np.allclose(
        output,
        eg.enodes["Add_381"].evaluable_outputs["_381"].curr_value,
    )
Ejemplo n.º 4
0
def test_ab():
    base_path = Path(__file__).parent

    filename = "examples/ONNX/ab.json"
    file_path = (base_path / "../../.." / filename).resolve()

    # Load the MDF model
    mdf_model = load_mdf(str(file_path))

    # Test input
    test_input = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float32)

    # Get the result of MDF execution
    mdf_executable = EvaluableGraph(mdf_model.graphs[0], verbose=False)
    # TODO: the int type cast is necessaryf or now because the nodes' parameters are constants and inputs must have
    #  the same type
    mdf_executable.evaluate(initializer={"input": test_input.astype(int)})
    mdf_output = mdf_executable.enodes["Mul_3"].evaluable_outputs[
        "_4"].curr_value

    # Get the translated ONNX model
    onnx_models = mdf_to_onnx(mdf_model)

    # Bluffing onnx that our model is 13 when it is actually 15. This is needed for older onnxruntime
    # installations to run this model. See https://github.com/onnx/onnx/issues/3205
    onnx_models[0].opset_import[0].version = 13

    # Get the result of running the ONNX model
    session = backend.prepare(onnx_models[0])
    onnx_output = session.run(
        test_input)  # run returns a list with the actual result and type
    onnx_res_output = np.array(onnx_output[0])
    # print(f"Output calculated by onnxruntime: {onnx_res_output} and MDF: {mdf_output.astype(float)}")

    assert np.array_equal(onnx_res_output, mdf_output)
Ejemplo n.º 5
0
def main():
    """Takes count.lisp, converts to MDF, and runs using the scheduler."""
    file_name = os.path.dirname(os.path.realpath(__file__)) + "/count.lisp"
    print(file_name)
    mod = actr_to_mdf(file_name)
    mdf_graph = load_mdf(file_name[:-5] + ".json").graphs[0]
    eg = EvaluableGraph(graph=mdf_graph, verbose=False)
    term = False
    goal = {}
    retrieval = {}
    while not term:
        eg.evaluate(initializer={"goal_input": goal, "dm_input": retrieval})
        term = (eg.enodes["check_termination"].
                evaluable_outputs["check_output"].curr_value)
        goal = (eg.enodes["fire_production"].
                evaluable_outputs["fire_prod_output_to_goal"].curr_value)
        retrieval = (
            eg.enodes["fire_production"].
            evaluable_outputs["fire_prod_output_to_retrieval"].curr_value)
    print("Final Goal:")
    print(eg.enodes["goal_buffer"].evaluable_outputs["goal_output"].curr_value)
Ejemplo n.º 6
0
def main():
    mod_graph = generate_test_model("small_test", save_to_file=True)

    scale = 2
    mod_graph = generate_test_model(
        "medium_test",
        input_shape=(scale, scale),
        hidden_shape=(scale, scale),
        hidden_layers=5,
        save_to_file=True,
    )

    if "-run" in sys.argv:

        from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

        format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
        print("------------------")
        eg = EvaluableGraph(mod_graph, verbose=False)
        eg.evaluate(array_format=format)

        print("Finished evaluating graph using array format %s" % format)
Ejemplo n.º 7
0
def main():

    dt = 0.01
    file_path = "States.json"
    data = convert_states_to_stateful_parameters("../" + file_path, dt)

    with open("Translated_" + file_path, "w") as fp:
        json.dump(data, fp, indent=4)

    if "-run" in sys.argv:

        verbose = True

        mod_graph = load_mdf("Translated_%s" % file_path).graphs[0]
        eg = EvaluableGraph(mod_graph, verbose)

        mod_graph_old = load_mdf("../" + file_path).graphs[0]
        eg_old = EvaluableGraph(mod_graph_old, verbose)

        duration = 2
        t = 0
        recorded = {}
        times = []
        s = []
        s_old = []
        while t <= duration:

            print("======   Evaluating at t = %s  ======" % (t))

            # levels.append(eg.enodes['sine_node'].evaluable_stateful_parameters['level'].curr_value)
            # t+=args.dt

            # print("time first>>>",type(t))
            t = float(
                eg.enodes["sine_node"].evaluable_parameters["time"].curr_value)

            # times.append(float(eg.enodes['sine_node'].evaluable_parameters['time'].curr_value))

            times.append(t)

            if t == 0:
                eg_old.evaluate()  # replace with initialize?
            else:
                eg_old.evaluate(time_increment=dt)
            s_old.append(eg_old.enodes["sine_node"].
                         evaluable_outputs["out_port"].curr_value)
            eg.evaluate()

            s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].
                     curr_value)
            # t+=dt

        print(s_old[:10], s[:10], times[:10])
        import matplotlib.pyplot as plt

        plt.plot(times, s)

        plt.show()
        plt.savefig("translated_levelratesineplot.jpg")
Ejemplo n.º 8
0
def main():
    mod = Model(id="Simple")
    mod_graph = Graph(id="simple_example")
    mod.graphs.append(mod_graph)

    input_node = Node(id="input_node")
    input_node.parameters.append(Parameter(id="input_level", value=0.5))
    op1 = OutputPort(id="out_port")
    op1.value = "input_level"
    input_node.output_ports.append(op1)
    mod_graph.nodes.append(input_node)

    processing_node = Node(id="processing_node")
    mod_graph.nodes.append(processing_node)

    processing_node.parameters.append(Parameter(id="lin_slope", value=0.5))
    processing_node.parameters.append(Parameter(id="lin_intercept", value=0))
    processing_node.parameters.append(Parameter(id="log_gain", value=3))
    ip1 = InputPort(id="input_port1")
    processing_node.input_ports.append(ip1)

    f1 = Parameter(
        id="linear_1",
        function="linear",
        args={"variable0": ip1.id, "slope": "lin_slope", "intercept": "lin_intercept"},
    )
    f2 = Parameter(
        id="logistic_1",
        function="logistic",
        args={"variable0": f1.id, "gain": "log_gain", "bias": 0, "offset": 0},
    )
    processing_node.parameters.append(f1)
    processing_node.parameters.append(f2)

    processing_node.output_ports.append(OutputPort(id="output_1", value="logistic_1"))

    e1 = Edge(
        id="input_edge",
        parameters={"weight": 0.55},
        sender=input_node.id,
        sender_port=op1.id,
        receiver=processing_node.id,
        receiver_port=ip1.id,
    )

    mod_graph.edges.append(e1)

    print(mod)

    print("------------------")
    print(mod.to_json())

    new_file = mod.to_json_file("%s.json" % mod.id)
    new_file = mod.to_yaml_file("%s.yaml" % mod.id)

    if "-run" in sys.argv:
        verbose = True
        # verbose = False
        from modeci_mdf.execution_engine import EvaluableGraph

        from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

        format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
        eg = EvaluableGraph(mod_graph, verbose=verbose)
        eg.evaluate(array_format=format)

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=1,
            filename_root="simple",
            only_warn_on_fail=True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="simple_3",
            only_warn_on_fail=True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
Ejemplo n.º 9
0
def main():
    # changed import call
    from modeci_mdf.execution_engine import EvaluableGraph

    # Create some test inputs for the model
    galaxy_images_output = torch.zeros((1, 5, 64, 64))
    ebv_output = torch.zeros((1, ))

    # Seed the random number generator to get deterministic behavior for weight initialization
    torch.manual_seed(0)

    model = InceptionBlocks()

    # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout
    model.eval()

    # Run the model once to get some ground truth outpot (from PyTorch)
    output = model(galaxy_images_output, ebv_output).detach().numpy()

    # Convert to MDF
    mdf_model, params_dict = pytorch_to_mdf(
        model=model,
        args=(galaxy_images_output, ebv_output),
        example_outputs=output,
        trace=True,
    )

    # Get the graph
    mdf_graph = mdf_model.graphs[0]

    # Add inputs to the parameters dict so we can feed this to the EvaluableGraph for initialization of all
    # graph inputs.
    params_dict["input1"] = galaxy_images_output.numpy()
    params_dict["input2"] = ebv_output.numpy()

    # Evaluate the model via the MDF scheduler
    eg = EvaluableGraph(graph=mdf_graph, verbose=False)
    eg.evaluate(initializer=params_dict)

    # Make sure the results are the same betweeen PyTorch and MDF
    assert np.allclose(
        output,
        eg.enodes["Add_381"].evaluable_outputs["_381"].curr_value,
    )
    print("Passed all comparison tests!")

    # Output the model to JSON
    mdf_model.to_json_file("inception.json")

    import sys

    if "-graph" in sys.argv:
        mdf_model.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=1,
            filename_root="inception",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
Ejemplo n.º 10
0
def main():

    verbose = True
    dt = 5e-05
    file_path = "mlp_pure_mdf.json"
    data = convert_states_to_stateful_parameters(file_path, dt)
    # print(data)
    with open("Translated_" + file_path, "w") as fp:
        json.dump(data, fp, indent=4)

    test_all = "-test" in sys.argv

    mod_graph = load_mdf("Translated_%s" % file_path).graphs[0]

    # mdf_to_graphviz(mod_graph,view_on_render=not test_all, level=3)

    from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

    format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY

    eg = EvaluableGraph(mod_graph, verbose=False)
    eg.evaluate(array_format=format)

    print("Finished evaluating graph using array format %s" % format)

    for n in [
            "mlp_input_layer",
            "mlp_relu_1",
            "mlp_hidden_layer_with_relu",
            "mlp_output_layer",
    ]:
        out = eg.enodes[n].evaluable_outputs["out_port"].curr_value
        print(f"Final output value of node {n}: {out}, shape: {out.shape}")

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=2,
            filename_root="mlp_pure_mdf",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )

    if test_all:
        # Iterate on training data, feed forward and log accuracy
        imgs = np.load("example_data/imgs.npy")
        labels = np.load("example_data/labels.npy")

        import torch.nn

        matches = 0
        imgs_to_test = imgs[:300]

        start = time.time()
        for i in range(len(imgs_to_test)):
            ii = imgs[i, :, :]
            target = labels[i]
            img = torch.Tensor(ii).view(-1, 14 * 14).numpy()
            # plot_img(img, 'Post_%i (%s)'%(i, img.shape))
            print("***********\nTesting image %i (label: %s): %s\n%s" %
                  (i, target, np.array2string(img, threshold=5,
                                              edgeitems=2), img.shape))
            # print(mod_graph.nodes[0].parameters['input'])
            mod_graph.nodes[0].get_parameter("input").value = img
            eg = EvaluableGraph(mod_graph, verbose=False)
            eg.evaluate(array_format=format)
            for n in ["mlp_output_layer"]:
                out = eg.enodes[n].evaluable_outputs["out_port"].curr_value
                print("Output of evaluated graph: %s %s (%s)" %
                      (out, out.shape, type(out).__name__))
                prediction = np.argmax(out)

            match = target == int(prediction)
            if match:
                matches += 1
            print(
                f"Target: {target}, prediction: {prediction}, match: {match}")
        t = time.time() - start
        print(
            "Matches: %i/%i, accuracy: %s%%. Total time: %.4f sec (%.4fs per run)"
            % (
                matches,
                len(imgs_to_test),
                (100.0 * matches) / len(imgs_to_test),
                t,
                t / len(imgs_to_test),
            ))
Ejemplo n.º 11
0
Archivo: FNrun.py Proyecto: kmantel/MDF
def execute(multi=False):

    mdf_model = load_mdf("FN.mdf.yaml")
    mod_graph = mdf_model.graphs[0]

    dt = 0.00005
    duration = 0.1

    if not multi:

        fn_node = mod_graph.nodes[0]
        fn_node.get_parameter("initial_v").value = [-1.0]
        fn_node.get_parameter("initial_w").value = [0.0]
        input = np.array([0])

    else:
        size = 15
        max_amp = 0.5
        input = np.array(
            [max_amp * (-1 + 2 * i / size) for i in range(size + 1)])
        # input = [-0.4,-0.2, 0.,0.2,0.4]
        input_node = Node(id="input_node", parameters={"input_level": input})

        op1 = OutputPort(id="out_port", value="input_level")
        input_node.output_ports.append(op1)
        mod_graph.nodes.append(input_node)

        fn_node = mod_graph.nodes[0]
        fn_node.get_parameter("initial_v").value = np.array([1.0] * len(input))
        fn_node.get_parameter("initial_w").value = np.array([0.0] * len(input))

        print(fn_node)

        e1 = Edge(
            id="input_edge",
            sender=input_node.id,
            sender_port=op1.id,
            receiver="FNpop_0",
            receiver_port="INPUT",
        )

        mod_graph.edges.append(e1)

        mdf_model.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="FNmulti",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )

        duration = 0.1

    eg = EvaluableGraph(mod_graph, verbose)
    # duration= 2
    t = 0

    times = []
    vv = {}
    ww = {}

    format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY

    while t < duration + 0.00005:
        times.append(t)
        print("======   Evaluating at t = %s  ======" % (t))
        if t == 0:
            eg.evaluate(array_format=format)  # replace with initialize?
        else:
            eg.evaluate(array_format=format, time_increment=dt)

        for i in range(
                len(eg.enodes["FNpop_0"].evaluable_parameters["V"].curr_value)
        ):
            if not i in vv:
                vv[i] = []
                ww[i] = []
            v = eg.enodes["FNpop_0"].evaluable_parameters["V"].curr_value[i]
            w = eg.enodes["FNpop_0"].evaluable_parameters["W"].curr_value[i]
            vv[i].append(v)
            ww[i].append(w)
            if i == 0:
                print(f"    Value at {t}: v={v}, w={w}")
        t += dt

    import matplotlib.pyplot as plt

    for vi in vv:
        plt.plot(times, vv[vi], label="V %.3f" % input[vi])
        plt.plot(times, ww[vi], label="W %.3f" % input[vi])
    plt.legend()

    if not multi:
        plt.savefig("MDFFNrun.png", bbox_inches="tight")

    if not "-nogui" in sys.argv:
        plt.show()
Ejemplo n.º 12
0
def main():

    model = AB()
    dummy_input = torch.zeros(2, 3)

    output = model(dummy_input)

    print(
        f"PyTorch model: {model} created. Evaluates {dummy_input} as {output}")

    torch.onnx.export(
        model,
        (dummy_input),
        "ab.onnx",
        verbose=True,
        input_names=["input"],
        opset_version=9,
    )

    # Load it back in using ONNX package
    onnx_model = onnx.load("ab.onnx")
    onnx.checker.check_model(onnx_model)

    import onnxruntime as rt

    sess = rt.InferenceSession("ab.onnx")

    res = sess.run([sess.get_outputs()[0].name],
                   {sess.get_inputs()[0].name: dummy_input.numpy()})
    print(f"Output calculated by onnxruntime (input: {dummy_input}):  {res}")

    mdf_model = onnx_to_mdf(onnx_model)
    mdf_model.to_json_file("ab.json")
    mdf_model.to_yaml_file("ab.yaml")

    mdf_model.to_graph_image(
        engine="dot",
        output_format="png",
        view_on_render=False,
        level=3,
        filename_root="ab",
        only_warn_on_fail=
        True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
    )
    if "-run" in sys.argv:
        verbose = True
        verbose = False

        from modeci_mdf.execution_engine import EvaluableGraph

        eg = EvaluableGraph(mdf_model.graphs[0], verbose=verbose)

        print("Evaluating graph...")
        test_values = [0, 1, [1, 2], dummy_input.numpy()]
        test_values = [0, 1, [1, 2]]

        for t in test_values:
            print("===================\nEvaluating MDF model with input: %s" %
                  t)
            eg.evaluate(initializer={"input": t})
            print("Output: %s" %
                  eg.enodes["Mul_3"].evaluable_outputs["_4"].curr_value)
Ejemplo n.º 13
0
def main():
    mod = Model(id="abc_conditions")
    mod_graph = Graph(id="abc_conditions_example")
    mod.graphs.append(mod_graph)

    input_node = Node(id="input0")
    input_node.parameters.append(Parameter(id="input_level", value=0.0))
    input_node.parameters.append(Parameter(id="count_0", value="count_0 + 1"))
    op1 = OutputPort(id="out_port")
    op1.value = "input_level"
    input_node.output_ports.append(op1)
    mod_graph.nodes.append(input_node)

    def create_simple_node(graph, id_, sender=None):
        n = Node(id=id_)
        graph.nodes.append(n)

        ip1 = InputPort(id="input_port1", shape="(1,)")
        n.input_ports.append(ip1)

        n.output_ports.append(OutputPort(id="output_1", value=ip1.id))

        if sender is not None:
            simple_connect(sender, n, graph)

        return n

    a = create_simple_node(mod_graph, "A", input_node)
    a.parameters.append(Parameter(id="count_A", value="count_A + 1"))

    b = create_simple_node(mod_graph, "B", a)
    b.parameters.append(Parameter(id="count_B", value="count_B + 1"))

    c = create_simple_node(mod_graph, "C", a)

    c.parameters.append(Parameter(id="count_C", value="count_C+ 1"))

    cond_i = Condition(type="BeforeNCalls", dependencies=input_node.id, n=1)
    cond_a = Condition(type="Always")
    cond_b = Condition(type="EveryNCalls", dependencies=a.id, n=2)
    cond_c = Condition(type="EveryNCalls", dependencies=a.id, n=3)
    cond_term = Condition(
        type="And",
        dependencies=[
            Condition(type="AfterNCalls", dependencies=c.id, n=2),
            Condition(type="JustRan", dependencies=a.id),
        ],
    )

    mod_graph.conditions = ConditionSet(
        node_specific={
            input_node.id: cond_i,
            a.id: cond_a,
            b.id: cond_b,
            c.id: cond_c
        },
        termination={"environment_state_update": cond_term},
    )

    mod.to_json_file(
        os.path.join(os.path.dirname(__file__), "%s.json" % mod.id))
    mod.to_yaml_file(
        os.path.join(os.path.dirname(__file__), "%s.yaml" % mod.id))

    print_summary(mod_graph)

    import sys

    if "-run" in sys.argv:
        verbose = True
        # verbose = False
        from modeci_mdf.execution_engine import EvaluableGraph
        from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

        format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
        eg = EvaluableGraph(mod_graph, verbose=verbose)
        eg.evaluate(array_format=format)

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="abc_conditions",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
Ejemplo n.º 14
0
def main():
    mod = Model(id="ABCD")
    mod_graph = Graph(id="abcd_example")
    mod.graphs.append(mod_graph)

    input_node = Node(id="input0", metadata={"color": ".8 .8 .8"})
    input_node.parameters.append(Parameter(id="input_level", value=0.0))
    op1 = OutputPort(id="out_port")
    op1.value = "input_level"
    input_node.output_ports.append(op1)
    mod_graph.nodes.append(input_node)

    print(input_node)
    print(input_node.output_ports)

    # a = create_example_node('A', mod_graph)
    a = Node(id="A", metadata={"color": ".8 0 0"})
    mod_graph.nodes.append(a)
    ip1 = InputPort(id="input_port1")
    a.input_ports.append(ip1)

    a.parameters.append(Parameter(id="slope", value=abcd.A_slope))
    a.parameters.append(Parameter(id="intercept", value=abcd.A_intercept))

    f1 = Parameter(
        id="linear_func",
        function="linear",
        args={
            "variable0": ip1.id,
            "slope": "slope",
            "intercept": "intercept"
        },
    )
    a.parameters.append(f1)
    a.output_ports.append(OutputPort(id="output_1", value="linear_func"))

    e1 = simple_connect(input_node, a, mod_graph)

    b = Node(id="B", metadata={"color": "0 .8 0"})
    mod_graph.nodes.append(b)
    ip1 = InputPort(id="input_port1")
    b.input_ports.append(ip1)

    b.parameters.append(Parameter(id="gain", value=abcd.B_gain))
    b.parameters.append(Parameter(id="bias", value=abcd.B_bias))
    b.parameters.append(Parameter(id="offset", value=abcd.B_offset))

    f1 = Parameter(
        id="logistic_func",
        function="logistic",
        args={
            "variable0": ip1.id,
            "gain": "gain",
            "bias": "bias",
            "offset": "offset"
        },
    )
    b.parameters.append(f1)
    b.output_ports.append(OutputPort(id="output_1", value="logistic_func"))

    simple_connect(a, b, mod_graph)

    c = Node(id="C", metadata={"color": "0 0 .8"})
    mod_graph.nodes.append(c)
    ip1 = InputPort(id="input_port1", shape="(1,)")
    c.input_ports.append(ip1)

    c.parameters.append(Parameter(id="scale", value=abcd.C_scale))
    c.parameters.append(Parameter(id="rate", value=abcd.C_rate))
    c.parameters.append(Parameter(id="bias", value=abcd.C_bias))
    c.parameters.append(Parameter(id="offset", value=abcd.C_offset))

    f1 = Parameter(
        id="exponential_func",
        function="exponential",
        args={
            "variable0": ip1.id,
            "scale": "scale",
            "rate": "rate",
            "bias": "bias",
            "offset": "offset",
        },
    )
    c.parameters.append(f1)
    c.output_ports.append(OutputPort(id="output_1", value="exponential_func"))

    simple_connect(b, c, mod_graph)

    d = Node(id="D", metadata={"color": ".8 0 .8"})
    mod_graph.nodes.append(d)

    ip1 = InputPort(id="input_port1", shape="(1,)")
    d.input_ports.append(ip1)
    d.parameters.append(Parameter(id="scale", value=abcd.D_scale))

    f1 = Parameter(id="sin_func",
                   function="sin",
                   args={
                       "variable0": ip1.id,
                       "scale": "scale"
                   })
    d.parameters.append(f1)
    d.output_ports.append(OutputPort(id="output_1", value="sin_func"))

    simple_connect(c, d, mod_graph)

    print(mod)

    print("------------------")
    # print(mod.to_json())
    new_file = mod.to_json_file("%s.json" % mod.id)
    new_file = mod.to_yaml_file("%s.yaml" % mod.id)

    print_summary(mod_graph)

    import sys

    if "-run" in sys.argv:
        verbose = True
        # verbose = False
        from modeci_mdf.execution_engine import EvaluableGraph
        from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

        format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
        eg = EvaluableGraph(mod_graph, verbose=verbose)
        eg.evaluate(array_format=format)

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=1,
            filename_root="abcd",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="abcd_3",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
Ejemplo n.º 15
0
def main():
    mod = Model(id="Arrays")
    mod_graph = Graph(id="array_example")
    mod.graphs.append(mod_graph)

    input_node = Node(id="input_node")

    input_node.parameters.append(
        Parameter(id="input_level", value=[[1, 2.0], [3, 4]]))

    op1 = OutputPort(id="out_port", value="input_level")
    input_node.output_ports.append(op1)
    mod_graph.nodes.append(input_node)

    middle_node = Node(id="middle_node")
    middle_node.parameters.append(Parameter(id="slope", value=0.5))
    middle_node.parameters.append(
        Parameter(id="intercept", value=np.array([[0, 1.0], [2, 2]])))

    ip1 = InputPort(id="input_port1")
    middle_node.input_ports.append(ip1)
    mod_graph.nodes.append(middle_node)

    f1 = Parameter(
        id="linear_1",
        function="linear",
        args={
            "variable0": ip1.id,
            "slope": "slope",
            "intercept": "intercept"
        },
    )
    middle_node.parameters.append(f1)

    middle_node.output_ports.append(OutputPort(id="output_1",
                                               value="linear_1"))

    e1 = Edge(
        id="input_edge",
        parameters={"weight": [[1, 0], [0, 1]]},
        sender=input_node.id,
        sender_port=op1.id,
        receiver=middle_node.id,
        receiver_port=ip1.id,
    )

    mod_graph.edges.append(e1)

    new_file = mod.to_json_file("%s.json" % mod.id)
    new_file = mod.to_yaml_file("%s.yaml" % mod.id)

    if "-run" in sys.argv:
        verbose = True
        # verbose = False
        from modeci_mdf.execution_engine import EvaluableGraph

        from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW

        format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
        eg = EvaluableGraph(mod_graph, verbose=True)
        eg.evaluate(array_format=format)

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="arrays",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )
Ejemplo n.º 16
0
def main():

    dt = 5e-05
    file_path = "FN.mdf.json"
    data = convert_states_to_stateful_parameters("../" + file_path, dt)
    # print(data)
    with open("Translated_" + file_path, "w") as fp:
        json.dump(data, fp, indent=4)

    if "-run" in sys.argv:

        verbose = True

        mod_graph = load_mdf("Translated_%s" % file_path).graphs[0]
        eg = EvaluableGraph(mod_graph, verbose)

        mod_graph_old = load_mdf("../" + file_path).graphs[0]
        eg_old = EvaluableGraph(mod_graph_old, verbose)

        duration = 0.1
        t = 0
        recorded = {}
        times = []
        s = []
        vv = []
        ww = []

        vv_old = []
        ww_old = []

        while t <= duration + dt:
            print("======   Evaluating at t = %s  ======" % (t))

            vv.append(
                float(
                    eg.enodes["FNpop_0"].evaluable_parameters["V"].curr_value))
            ww.append(
                float(
                    eg.enodes["FNpop_0"].evaluable_parameters["W"].curr_value))

            # levels.append(eg.enodes['sine_node'].evaluable_stateful_parameters['level'].curr_value)

            # print("time first>>>",type(t))
            t = float(
                eg.enodes["FNpop_0"].evaluable_parameters["time"].curr_value)
            times.append(t)

            if t == 0:

                eg_old.evaluate()  # replace with initialize?
            else:

                eg_old.evaluate(time_increment=dt)

            vv_old.append(
                eg_old.enodes["FNpop_0"].evaluable_parameters["V"].curr_value)
            ww_old.append(
                eg_old.enodes["FNpop_0"].evaluable_parameters["W"].curr_value)

            eg.evaluate()

        print("Times>>>", times[:10])
        print("Translated file W and V>>>", ww[:10], vv[:10])

        print("Old file W and V>>>", ww_old[:10], vv_old[:10])

        import matplotlib.pyplot as plt

        plt.plot(times, vv, label="V")
        plt.plot(times, ww, label="W")
        plt.legend()
        plt.show()
        plt.savefig("translated_FN_stateful_vw_plot.jpg")
Ejemplo n.º 17
0
def main():
    mod = Model(id="States")
    mod_graph = Graph(id="state_example")
    mod.graphs.append(mod_graph)

    ## Counter node
    counter_node = Node(id="counter_node")

    p1 = Parameter(id="increment", value=1)
    counter_node.parameters.append(p1)

    p2 = Parameter(id="count", value="count + increment")
    counter_node.parameters.append(p2)

    op1 = OutputPort(id="out_port", value=p2.id)
    counter_node.output_ports.append(op1)

    mod_graph.nodes.append(counter_node)

    ## Sine node...
    sine_node = Node(id="sine_node")

    sine_node.parameters.append(Parameter(id="amp", value=3))
    sine_node.parameters.append(Parameter(id="period", value=0.4))

    s1 = Parameter(id="level",
                   default_initial_value=0,
                   time_derivative="6.283185 * rate / period")
    sine_node.parameters.append(s1)

    s2 = Parameter(
        id="rate",
        default_initial_value=1,
        time_derivative="-1 * 6.283185 * level / period",
    )
    sine_node.parameters.append(s2)

    op1 = OutputPort(id="out_port", value="amp * level")
    sine_node.output_ports.append(op1)

    mod_graph.nodes.append(sine_node)

    new_file = mod.to_json_file("%s.json" % mod.id)
    new_file = mod.to_yaml_file("%s.yaml" % mod.id)

    if "-run" in sys.argv:
        verbose = True
        # verbose = False
        from modeci_mdf.utils import load_mdf, print_summary

        from modeci_mdf.execution_engine import EvaluableGraph

        eg = EvaluableGraph(mod_graph, verbose)
        dt = 0.01

        duration = 2
        t = 0
        recorded = {}
        times = []
        s = []
        while t <= duration:
            times.append(t)
            print("======   Evaluating at t = %s  ======" % (t))
            if t == 0:
                eg.evaluate()  # replace with initialize?
            else:
                eg.evaluate(time_increment=dt)

            s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].
                     curr_value)
            t += dt

        if "-nogui" not in sys.argv:
            import matplotlib.pyplot as plt

            plt.plot(times, s)
            plt.show()

    if "-graph" in sys.argv:
        mod.to_graph_image(
            engine="dot",
            output_format="png",
            view_on_render=False,
            level=3,
            filename_root="states",
            only_warn_on_fail=
            True,  # Makes sure test of this doesn't fail on Windows on GitHub Actions
        )

    return mod_graph