示例#1
0
    def test_epoch_decorators(self, clean_up):
        data_source = RealFunctionDataLayer(n=24, batch_size=12)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        epoch_start_counter = [0]
        epoch_end_counter = [0]

        @on_epoch_start
        def count_epoch_starts(state, counter=epoch_start_counter):
            counter[0] += 1

        @on_epoch_end
        def count_epoch_ends(state, counter=epoch_end_counter):
            counter[0] -= 1

        callbacks = [count_epoch_starts, count_epoch_ends]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        assert epoch_start_counter[0] == 2
        assert epoch_end_counter[0] == -2
示例#2
0
    def test_nm_tensors_producer_args(self):
        """
            Tests whether nmTensors are correct - producers and their args.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        # check producers' bookkeeping
        assert loss_tensor.producer_name == loss.name
        assert loss_tensor.producer_args == {
            "predictions": y_pred,
            "target": y
        }
        assert y_pred.producer_name == trainable_module.name
        assert y_pred.producer_args == {"x": x}
        assert y.producer_name == data_source.name
        assert y.producer_args == {}
        assert x.producer_name == data_source.name
        assert x.producer_args == {}
示例#3
0
 def wrong():
     data_source = RealFunctionDataLayer(n=10000, batch_size=128)
     trainable_module = TaylorNet(dim=4)
     loss = MSELoss()
     x, y = data_source()
     loss_tensor = loss(predictions=x, target=x)
     _ = trainable_module(x=loss_tensor)
    def test_graph_serialization_7_arbitrary_graph_with_loops(self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (manual port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="dl")
        tn = TaylorNet(dim=4, name="tn")
        loss = MSELoss(name="loss")

        # Build a graph with a loop.
        with NeuralGraph(name="graph") as graph:
            # Add modules to graph.
            x, t = dl()
            # First call to TN.
            p1 = tn(x=x)
            # Second call to TN.
            p2 = tn(x=p1)
            # Take output of second, pass it to loss.
            lss = loss(predictions=p2, target=t)

        # Make sure all connections are there!
        assert len(graph.tensor_list) == 5
        # 4 would mean that we have overwritten the "p1" (tn->y_pred) tensor!

        # Serialize the graph.
        serialized_graph = graph.serialize()

        # Create the second graph - deserialize with "module reusing".
        graph2 = NeuralGraph.deserialize(serialized_graph,
                                         reuse_existing_modules=True)
        serialized_graph2 = graph2.serialize()

        # Must be the same.
        assert serialized_graph == serialized_graph2
    def test_graph_serialization_2_simple_graph_output_binding(self):
        """ 
            Tests whether serialization of a simple graph with output binding works.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs2_dl")
        tn = TaylorNet(dim=4, name="tgs2_tn")
        loss = MSELoss(name="tgs2_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.evaluation) as g1:
            x, t = dl()
            prediction1 = tn(x=x)
            _ = loss(predictions=prediction1, target=t)
        # Manually bind the selected outputs.
        g1.outputs["ix"] = x
        g1.outputs["te"] = t
        g1.outputs["prediction"] = prediction1

        # Serialize graph
        serialized_g1 = g1.serialize()

        # Create the second graph - deserialize with reusing.
        g2 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=True)
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2
    def test_graph_simple_import_export(self, tmpdir):
        """
            Tests whether the Neural Module can instantiate a simple module by loading a configuration file.

            Args:
                tmpdir: Fixture which will provide a temporary directory.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgio1_dl")
        tn = TaylorNet(dim=4, name="tgio1_tn")
        loss = MSELoss(name="tgio1_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.training) as g1:
            x, t = dl()
            p = tn(x=x)
            _ = loss(predictions=p, target=t)

        # Serialize graph
        serialized_g1 = g1.serialize()

        # Generate filename in the temporary directory.
        tmp_file_name = str(tmpdir.mkdir("export").join("simple_graph.yml"))

        # Export graph to file.
        g1.export_to_config(tmp_file_name)

        # Create the second graph - import!
        g2 = NeuralGraph.import_from_config(tmp_file_name,
                                            reuse_existing_modules=True)
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2
示例#7
0
    def test_SimpleLogger(self, clean_up):
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        # Mock up both std and stderr streams.
        with logging.patch_stdout_handler(StringIO()) as std_out:
            self.nf.train(
                tensors_to_optimize=[loss_tensor],
                callbacks=[SimpleLogger(step_freq=1)],
                optimization_params={
                    "max_steps": 4,
                    "lr": 0.01
                },
                optimizer="sgd",
            )

        output_lines = std_out.getvalue().splitlines()
        assert len(output_lines) == 4
        for line in output_lines:
            assert "loss" in line
示例#8
0
    def test_explicit_graph_with_activation(self):
        """ 
            Tests initialization of an `explicit` graph and decoupling of graph creation from its activation. 
            Also tests modules access.
        """
        # Create modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1, name="dl")
        fx = TaylorNet(dim=4, name="fx")
        loss = MSELoss(name="loss")

        # Create the g0 graph.
        g0 = NeuralGraph()

        # Activate the "g0 graph context" - all operations will be recorded to g0.
        with g0:
            x, t = dl()
            p = fx(x=x)
            lss = loss(predictions=p, target=t)

        # Assert that there are 3 modules in the graph.
        assert len(g0) == 3

        # Test access modules.
        assert g0["dl"] is dl
        assert g0["fx"] is fx
        assert g0["loss"] is loss

        with pytest.raises(KeyError):
            g0["other_module"]
示例#9
0
    def test_nm_tensors_producer_consumers(self):
        """
            Tests whether nmTensors are correct - checking producers and consumers.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=10, batch_size=1, name="source")
        trainable_module = TaylorNet(dim=4, name="tm")
        loss = MSELoss(name="loss")
        loss2 = MSELoss(name="loss2")

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        lss = loss(predictions=y_pred, target=y)
        lss2 = loss2(predictions=y_pred, target=y)

        # Check tensor x producer and consumers.
        p = x.producer_step_module_port
        cs = x.consumers
        assert p.module_name == "source"
        assert p.port_name == "x"
        assert len(cs) == 1
        assert cs[0].module_name == "tm"
        assert cs[0].port_name == "x"

        # Check tensor y producer and consumers.
        p = y.producer_step_module_port
        cs = y.consumers
        assert p.module_name == "source"
        assert p.port_name == "y"
        assert len(cs) == 2
        assert cs[0].module_name == "loss"
        assert cs[0].port_name == "target"
        assert cs[1].module_name == "loss2"
        assert cs[1].port_name == "target"

        # Check tensor y_pred producer and consumers.
        p = y_pred.producer_step_module_port
        cs = y_pred.consumers
        assert p.module_name == "tm"
        assert p.port_name == "y_pred"
        assert len(cs) == 2
        assert cs[0].module_name == "loss"
        assert cs[0].port_name == "predictions"
        assert cs[1].module_name == "loss2"
        assert cs[1].port_name == "predictions"
    def test_graph_serialization_6_graph_after_nesting_with_manual_binding(
            self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (manual port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs6_dl")
        tn = TaylorNet(dim=4, name="tgs6_tn")
        loss = MSELoss(name="tgs6_loss")

        # Create "model".
        with NeuralGraph(operation_mode=OperationMode.both,
                         name="tgs6_model") as model:
            # Manually bind input port: "input" -> "x"
            model.inputs["input"] = tn.input_ports["x"]
            # Add module to graph and bind it input port 'x'.
            y = tn(x=model.inputs["input"])
            # Manual output bind.
            model.outputs["output"] = y

        # Serialize "model".
        serialized_model = model.serialize()

        # Delete model-related stuff.
        del model
        del tn

        # Deserialize the "model copy".
        model_copy = NeuralGraph.deserialize(serialized_model,
                                             name="tgs6_model_copy")

        # Build the "training graph" - using the model copy.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgs6_training") as training:
            # Add modules to graph.
            x, t = dl()
            # Incorporate modules from the existing "model" graph.
            p = model_copy(
                input=x
            )  # Note: this output should actually be named "output", not "y_pred"!
            lss = loss(predictions=p, target=t)

        # Serialize the "training graph".
        serialized_training = training.serialize()

        # Delete everything.
        del dl
        del loss
        del model_copy
        del training

        # Create the second graph - deserialize without "module reusing".
        training2 = NeuralGraph.deserialize(serialized_training)
        serialized_training2 = training2.serialize()

        # Must be the same.
        assert serialized_training == serialized_training2
示例#11
0
    def test_graph_outputs_binding1(self):
        # Create modules.
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        tn = TaylorNet(dim=4)
        loss = MSELoss()

        with NeuralGraph() as g:
            # Create the graph by connnecting the modules.
            x, y = data_source()
            y_pred = tn(x=x)
            lss = loss(predictions=y_pred, target=y)

        # Test default binding.
        bound_outputs = GraphOutputs(g.tensors)

        bound_outputs.bind([x, y])
        bound_outputs.bind([y_pred])
        bound_outputs.bind([lss])

        # Delete not allowed.
        with pytest.raises(TypeError):
            del bound_outputs["loss"]

        assert len(bound_outputs) == 4
        assert len(bound_outputs.tensors) == 4
        assert len(bound_outputs.tensor_list) == 4

        defs = bound_outputs.definitions
        assert defs["x"].compare(
            data_source.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert defs["y"].compare(
            data_source.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert defs["y_pred"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert defs["loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        with pytest.raises(KeyError):
            _ = defs["lss"]

        # Bound manually.
        bound_outputs["my_prediction"] = y_pred
        bound_outputs["my_loss"] = lss

        # Delete not allowed.
        with pytest.raises(TypeError):
            del bound_outputs["my_prediction"]

        assert len(bound_outputs) == 2
        defs = bound_outputs.definitions
        assert defs["my_prediction"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert defs["my_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        with pytest.raises(KeyError):
            _ = defs["x"]
示例#12
0
    def test_step_batch_decorators(self, clean_up):
        """Showcase the difference between step and batch"""
        data_source = RealFunctionDataLayer(n=24, batch_size=12)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        epoch_step_counter = [0]
        epoch_batch_counter = [0]

        @on_step_end
        def count_steps(state, counter=epoch_step_counter):
            counter[0] += 1

        @on_batch_end
        def count_batches(state, counter=epoch_batch_counter):
            counter[0] += 1

        callbacks = [count_steps, count_batches]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        # when grad accumlation steps (aka iter_per_step or batches_per_step) = 1, num_steps == num_batches
        assert epoch_step_counter[0] == 4
        assert epoch_batch_counter[0] == 4

        epoch_step_counter[0] = 0
        epoch_batch_counter[0] = 0

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
            reset=True,
            batches_per_step=2,
        )

        # when grad accumlation steps != 1, num_steps != num_batches
        assert epoch_step_counter[0] == 4
        assert epoch_batch_counter[0] == 8
示例#13
0
    def test_graph_nesting8_topology_copy_two_modules(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs in the inner graph.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn8_ds")
        tn = TaylorNet(dim=4, name="tgn8_tn")
        loss = MSELoss(name="tgn8_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn8_g1") as g1:
            # Create input port definitions.
            g1.inputs["inner_x"] = tn.input_ports["x"]
            g1.inputs["inner_target"] = loss.input_ports["target"]

            # Connect modules and bound inputs.
            y_pred1 = tn(x=g1.inputs["inner_x"])
            lss1 = loss(predictions=y_pred1, target=g1.inputs["inner_target"])

            # Manually bind the output ports.
            g1.outputs["inner_y_pred"] = y_pred1
            g1.outputs["inner_loss"] = lss1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn8_g2") as g2:
            x, y = ds()
            # Nest the inner graph.
            y_pred2, lss2 = g1(inner_x=x, inner_target=y)
            # Manually bind the output ports.
            g2.outputs["outer_y_pred"] = y_pred2
            g2.outputs["outer_loss"] = lss2

        # Check modules and steps.
        assert len(g2.steps) == 3
        assert len(g2) == 3

        # Check the output tensors.
        assert len(g2.output_tensors) == 2
        assert g2.output_tensors["outer_y_pred"] == y_pred2
        assert g2.output_tensors["outer_loss"] == lss2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert lss2 is not lss1
        assert g2.tensors[0]["x"] == x
        assert g2.tensors[0]["y"] == y
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"] == y_pred2
        # Analogically with "loss".
        assert g2.tensors[2]["loss"] == lss2
示例#14
0
    def test_graph_nesting9_topology_copy_whole_graph(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs in the inner graph. Manual binding of outer graph outputs.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn9_ds")
        tn = TaylorNet(dim=4, name="tgn9_tn")
        loss = MSELoss(name="tgn9_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn9_g1") as g1:
            # Connect modules.
            x, y = ds()
            y_pred1 = tn(x=x)
            lss1 = loss(predictions=y_pred1, target=y)

            # Manually bind the output ports.
            g1.outputs["inner_y_pred"] = y_pred1
            g1.outputs["inner_loss"] = lss1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn9_g2") as g2:
            y_pred2, lss2 = g1()
            # Manually bind the output ports.
            g2.outputs["outer_y_pred"] = y_pred2
            g2.outputs["outer_loss"] = lss2

        # Check modules and steps.
        assert len(g2.steps) == 3
        assert len(g2) == 3

        # Check the output tensors.
        assert len(g2.output_tensors) == 2
        assert g2.output_tensors["outer_y_pred"] == y_pred2
        assert g2.output_tensors["outer_loss"] == lss2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert lss2 is not lss1
        assert g2.tensors[0]["x"].ntype.compare(
            ds.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g2.tensors[0]["y"].ntype.compare(
            ds.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"].ntype.compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        # Analogically with "loss".
        assert g2.tensors[2]["loss"].ntype.compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
    def test_graph_serialization_1_simple_graph_no_binding(self):
        """ 
            Tests whether serialization of a simple graph works.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs1_dl")
        tn = TaylorNet(dim=4, name="tgs1_tn")
        loss = MSELoss(name="tgs1_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="g1") as g1:
            x, t = dl()
            prediction1 = tn(x=x)
            _ = loss(predictions=prediction1, target=t)

        # Serialize the graph.
        serialized_g1 = g1.serialize()

        # Create a second graph - deserialize with reusing.
        g2 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=True,
                                     name="g2")
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2

        # Delete modules.
        del dl
        del tn
        del loss
        # Delete graphs as they contain "hard" references to those modules.
        del g1
        del g2

        # Create a third graph - deserialize without reusing, should create new modules.
        g3 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=False,
                                     name="g3")
        serialized_g3 = g3.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g3

        # Deserialize graph - without reusing modules not allowed.
        with pytest.raises(KeyError):
            _ = NeuralGraph.deserialize(serialized_g1,
                                        reuse_existing_modules=False)
示例#16
0
    def test_graph_inputs_binding1_default(self):
        # Create modules.
        tn = TaylorNet(dim=4, name="tgi1_tn")
        loss = MSELoss(name="tgi1_loss")

        # Test default binding.
        with NeuralGraph() as g1:
            y_pred = tn(x=g1)
            lss = loss(predictions=y_pred, target=g1)

        assert len(g1.inputs) == 2
        assert g1.input_ports["x"].compare(
            tn.input_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g1.input_ports["target"].compare(
            loss.input_ports["target"]) == NeuralTypeComparisonResult.SAME
示例#17
0
    def test_graph_outputs_binding2(self):
        # Create modules.
        data_source = RealFunctionDataLayer(n=100,
                                            batch_size=1,
                                            name="tgo2_ds")
        tn = TaylorNet(dim=4, name="tgo2_tn")
        loss = MSELoss(name="tgo2_loss")

        # Test default binding.
        with NeuralGraph(operation_mode=OperationMode.training) as g1:
            # Create the graph by connnecting the modules.
            x, y = data_source()
            y_pred = tn(x=x)
            lss = loss(predictions=y_pred, target=y)

        assert len(g1.outputs) == 4
        # Test ports.
        for (module, port, tensor) in [
            (data_source, "x", x),
            (data_source, "y", y),
            (tn, "y_pred", y_pred),
            (loss, "loss", lss),
        ]:
            # Compare definitions - from outputs.
            assert g1.outputs[port].ntype.compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Compare definitions - from output_ports.
            assert g1.output_ports[port].compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Compare definitions - from output_tensors.
            assert g1.output_tensors[port].compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Make sure that tensor was bound, i.e. input refers to the same object instance!
            assert g1.output_tensors[port] is tensor

        # Test manual binding.
        g1.outputs["my_prediction"] = y_pred
        g1.outputs["my_loss"] = lss

        assert len(g1.outputs) == 2
        assert g1.output_tensors["my_prediction"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert g1.output_tensors["my_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        # Finally, make sure that the user cannot "bind" "output_ports"!
        with pytest.raises(TypeError):
            g1.output_ports["my_prediction"] = y_pred
示例#18
0
    def test_dag(self):
        data_source = RealFunctionDataLayer(n=10000, batch_size=128)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()
        x, y = data_source()
        y_pred = trainable_module(x=x)
        _ = loss(predictions=y_pred, target=y)

        def wrong():
            data_source = RealFunctionDataLayer(n=10000, batch_size=128)
            trainable_module = TaylorNet(dim=4)
            loss = MSELoss()
            x, y = data_source()
            loss_tensor = loss(predictions=x, target=x)
            _ = trainable_module(x=loss_tensor)

        self.assertRaises(NeuralPortNmTensorMismatchError, wrong)
示例#19
0
    def test_default_output_ports(self):
        """ Tests automatic binding of default output ports. """
        dl = RealFunctionDataLayer(n=10, batch_size=1)
        m2 = TaylorNet(dim=4)
        loss = MSELoss()

        with NeuralGraph() as g1:
            x, t = dl()
            p = m2(x=x)

        # Tests output ports.
        assert len(g1.output_ports) == 3
        assert g1.output_ports["x"].compare(
            x) == NeuralTypeComparisonResult.SAME
        assert g1.output_ports["y"].compare(
            t) == NeuralTypeComparisonResult.SAME
        assert g1.output_ports["y_pred"].compare(
            p) == NeuralTypeComparisonResult.SAME
    def test_graph_serialization_4_graph_after_nesting_with_default_binding_reuse_modules(
            self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (default port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs4_dl")
        tn = TaylorNet(dim=4, name="tgs4_tn")
        loss = MSELoss(name="tgs4_loss")

        # Create "model".
        with NeuralGraph(operation_mode=OperationMode.both,
                         name="model") as model:
            # Add module to graph and bind it input port 'x'.
            y = tn(x=model)
            # NOTE: For some reason after this call both the "tgs4_tn" and "model" objects
            # remains on the module/graph registries.
            # (So somewhere down there remains a strong reference to module or graph).
            # This happens ONLY when passing graph as argument!
            # (Check out the next test which actually removes module and graph!).
            # Still, that is not an issue, as we do not expect the users
            # to delete and recreate modules in their "normal" applications.

        # Build the "training graph" - using the model copy.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgs4_training") as training:
            # Add modules to graph.
            x, t = dl()
            # Incorporate modules from the existing "model" graph.
            p = model(x=x)
            lss = loss(predictions=p, target=t)

        # Serialize the "training graph".
        serialized_training = training.serialize()

        # Create the second graph - deserialize withoput "module reusing".
        training2 = NeuralGraph.deserialize(serialized_training,
                                            reuse_existing_modules=True)
        serialized_training2 = training2.serialize()

        # Must be the same.
        assert serialized_training == serialized_training2
示例#21
0
    def test_explicit_graph(self):
        """
            Tests the integration of an `explicit` graph with actions API.
            In particular, checks whether user can pass NG instance to train().
        """
        # Create modules.
        dl = RealFunctionDataLayer(n=100, batch_size=4)
        fx = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the g0 graph.
        g0 = NeuralGraph()

        # Activate the "g0 graph context" - all operations will be recorded to g0.
        with g0:
            x, t = dl()
            p = fx(x=x)
            lss = loss(predictions=p, target=t)
            # Bind the loss output.
            g0.outputs["loss"] = lss

        # Instantiate an optimizer to perform the `train` action.
        optimizer = PtActions()

        # Make sure user CANNOT pass training graph and tensors_to_optimize.
        with pytest.raises(ValueError):
            optimizer.train(
                tensors_to_optimize=lss,
                training_graph=g0,
                optimization_params={
                    "max_steps": 1,
                    "lr": 0.0003
                },
                optimizer="sgd",
            )

        # But user can invoke "train" action using graph only.
        optimizer.train(training_graph=g0,
                        optimization_params={
                            "max_steps": 1,
                            "lr": 0.0003
                        },
                        optimizer="sgd")
示例#22
0
    def test_nm_tensors_types(self):
        """
            Tests whether nmTensors are correct - checking type property.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=10, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        lss = loss(predictions=y_pred, target=y)

        # Check types.
        assert x.ntype.compare(
            data_source.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert y.ntype.compare(
            data_source.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert y_pred.ntype.compare(trainable_module.output_ports["y_pred"]
                                    ) == NeuralTypeComparisonResult.SAME
        assert lss.ntype.compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
示例#23
0
    def test_graph_inputs_binding2_manual(self):
        # Create modules.
        tn = TaylorNet(dim=4, name="tgi2_tn")
        loss = MSELoss(name="tgi2_loss")

        # Test "manual" binding.
        with NeuralGraph() as g1:
            # Bind the "x" input to tn.
            g1.inputs["i"] = tn.input_ports["x"]
            y_pred = tn(x=g1.inputs["i"])
            # Bing the "target" input to loss.
            g1.inputs["t"] = loss.input_ports["target"]
            lss = loss(predictions=y_pred, target=g1.inputs["t"])

        assert len(g1.inputs) == 2
        assert g1.input_ports["i"].compare(
            tn.input_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g1.input_ports["t"].compare(
            loss.input_ports["target"]) == NeuralTypeComparisonResult.SAME

        # Finally, make sure that the user cannot "bind" "input_ports"!
        with pytest.raises(TypeError):
            g1.input_ports["my_prediction"] = y_pred
示例#24
0
    def test_TensorboardLogger(self, clean_up, tmpdir):
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        logging_dir = tmpdir.mkdir("temp")

        writer = SummaryWriter(logging_dir)

        tb_logger = TensorboardLogger(writer, step_freq=1)
        callbacks = [tb_logger]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        # efi.inspect("temp", tag="loss")
        inspection_units = efi.get_inspection_units(str(logging_dir), "",
                                                    "loss")

        # Make sure there is only 1 tensorboard file
        assert len(inspection_units) == 1

        # Assert that there the loss scalars has been logged 4 times
        assert len(inspection_units[0].field_to_obs['scalars']) == 4
示例#25
0
        # Return deserialized parameters.
        return deserialized_params


# Run on CPU.
nf = NeuralModuleFactory(placement=DeviceType.CPU)

# Instantitate RealFunctionDataLayer defaults to f=torch.sin, sampling from x=[-1, 1]
dl = RealFunctionDataLayer(n=100, f_name="cos", x_lo=-1, x_hi=1, batch_size=32)

# Instantiate a simple feed-forward, single layer neural network.
fx = CustomTaylorNet(dim=4, status=Status.error)

# Instantitate loss.
mse_loss = MSELoss()

# Export the model configuration.
fx.export_to_config("/tmp/custom_taylor_net.yml")

# Create a second instance, using the parameters loaded from the previously created configuration.
# Please note that we are calling the overriden method from the CustomTaylorNet class.
fx2 = CustomTaylorNet.import_from_config("/tmp/custom_taylor_net.yml")

# Create a graph by connecting the outputs with inputs of modules.
x, y = dl()
# Please note that in the graph we are using the "second" instance.
p = fx2(x=x)
loss = mse_loss(predictions=p, target=y)

# SimpleLossLoggerCallback will print loss values to console.
示例#26
0
    def test_graph_nesting7_topology_copy_one_module_all_manual_connect(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs, connects to other modules.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn7_ds")
        tn = TaylorNet(dim=4, name="tgn7_tn")
        loss = MSELoss(name="tgn7_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn7_g1") as g1:
            # Copy the input type.
            g1.inputs["inner_x"] = tn.input_ports["x"]
            # Manually bind the input port.
            y_pred1 = tn(x=g1.inputs["inner_x"])
            # Manually bind the output port.
            g1.outputs["inner_y_pred"] = y_pred1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn7_g2") as g2:
            x, y = ds()
            y_pred2 = g1(inner_x=x)
            lss = loss(predictions=y_pred2, target=y)

        # Check steps.
        assert len(g2.steps) == 3
        assert g2.steps[1] == g1.steps[0]

        # Make sure that the modules are the same.
        assert len(g2) == 3
        assert g2["tgn7_tn"] is g1["tgn7_tn"]

        # Make sure that inputs are ok.
        assert len(g2.inputs) == 0

        # Check outputs.
        assert len(g2.outputs) == 4
        assert g2.output_ports["x"].compare(
            ds.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_ports["y"].compare(
            ds.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_ports["loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
        # The manually bound name!
        assert g2.output_ports["inner_y_pred"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME

        # Check the output tensors.
        assert len(g2.output_tensors) == 4
        assert g2.output_tensors["x"] == x
        assert g2.output_tensors["y"] == y
        assert g2.output_tensors["loss"] == lss
        # The manually bound name!
        assert g2.output_tensors["inner_y_pred"] == y_pred2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert g2.tensors[0]["x"] == x
        assert g2.tensors[0]["y"] == y
        assert g2.tensors[2]["loss"] == lss
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"] == y_pred2

        # Update g2: manually bound only one output.
        with g2:
            g2.outputs["outer_loss"] = lss

        # Make sure that outputs are ok.
        assert len(g2.outputs) == 1
        assert g2.output_ports["outer_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_tensors["outer_loss"] is lss
示例#27
0
    def test_rename_and_log(self, clean_up):
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        class DummyNM(NonTrainableNM):
            def __init__(self):
                super().__init__()

            @property
            def input_ports(self):
                """Returns definitions of module input ports.

                Returns:
                  A (dict) of module's input ports names to NeuralTypes mapping
                """
                return {"x": NeuralType(('B', 'D'), ChannelType())}

            @property
            def output_ports(self):
                """Returns definitions of module output ports.

                Returns:
                  A (dict) of module's output ports names to NeuralTypes mapping
                """
                return {"y_pred": NeuralType(('B', 'D'), ChannelType())}

            def forward(self, x):
                return x + 1

        test = DummyNM()
        extra_tensor = test(x=y_pred)

        y_pred.rename("y_pred")
        assert y_pred.name == "y_pred"

        # Mock up both std and stderr streams.
        with logging.patch_stdout_handler(StringIO()) as std_out:
            self.nf.train(
                tensors_to_optimize=[loss_tensor],
                callbacks=[
                    SimpleLogger(step_freq=1,
                                 tensors_to_log=['y_pred', extra_tensor])
                ],
                optimization_params={
                    "max_steps": 4,
                    "lr": 0.01
                },
                optimizer="sgd",
            )

        output_lines = std_out.getvalue().splitlines()
        assert len(output_lines) == 8
        for i, line in enumerate(output_lines):
            if i % 2 == 0:
                assert y_pred.name in line
            else:
                assert extra_tensor.name in line