示例#1
0
    def test_epoch_decorators(self, clean_up):
        data_source = RealFunctionDataLayer(n=24, batch_size=12)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        epoch_start_counter = [0]
        epoch_end_counter = [0]

        @on_epoch_start
        def count_epoch_starts(state, counter=epoch_start_counter):
            counter[0] += 1

        @on_epoch_end
        def count_epoch_ends(state, counter=epoch_end_counter):
            counter[0] -= 1

        callbacks = [count_epoch_starts, count_epoch_ends]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        assert epoch_start_counter[0] == 2
        assert epoch_end_counter[0] == -2
示例#2
0
    def test_SimpleLogger(self, clean_up):
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        # Mock up both std and stderr streams.
        with logging.patch_stdout_handler(StringIO()) as std_out:
            self.nf.train(
                tensors_to_optimize=[loss_tensor],
                callbacks=[SimpleLogger(step_freq=1)],
                optimization_params={
                    "max_steps": 4,
                    "lr": 0.01
                },
                optimizer="sgd",
            )

        output_lines = std_out.getvalue().splitlines()
        assert len(output_lines) == 4
        for line in output_lines:
            assert "loss" in line
示例#3
0
 def wrong():
     data_source = RealFunctionDataLayer(n=10000, batch_size=128)
     trainable_module = TaylorNet(dim=4)
     loss = MSELoss()
     x, y = data_source()
     loss_tensor = loss(predictions=x, target=x)
     _ = trainable_module(x=loss_tensor)
示例#4
0
    def test_graph_save_load(self, tmpdir):
        """
            Tests graph saving and loading.
        
            Args:
                tmpdir: Fixture which will provide a temporary directory.
        """

        dl = RealFunctionDataLayer(n=10, batch_size=1)
        tn = TaylorNet(dim=4)
        # Get the "original" weights.
        weights1 = get_state_dict(tn)

        # Create a simple graph.
        with NeuralGraph() as g1:
            x, t = dl()
            p = tn(x=x)

        # Generate filename in the temporary directory.
        tmp_file_name = str(tmpdir.join("tgsl_g1.chkpt"))
        # Save graph.
        g1.save_to(tmp_file_name)

        # Load graph.
        g1.restore_from(tmp_file_name)

        # Get the "restored" weights.
        weights2 = get_state_dict(tn)

        # Compare state dicts.
        for key in weights1:
            assert array_equal(weights1[key].cpu().numpy(), weights2[key].cpu().numpy())
    def test_graph_serialization_2_simple_graph_output_binding(self):
        """ 
            Tests whether serialization of a simple graph with output binding works.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs2_dl")
        tn = TaylorNet(dim=4, name="tgs2_tn")
        loss = MSELoss(name="tgs2_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.evaluation) as g1:
            x, t = dl()
            prediction1 = tn(x=x)
            _ = loss(predictions=prediction1, target=t)
        # Manually bind the selected outputs.
        g1.outputs["ix"] = x
        g1.outputs["te"] = t
        g1.outputs["prediction"] = prediction1

        # Serialize graph
        serialized_g1 = g1.serialize()

        # Create the second graph - deserialize with reusing.
        g2 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=True)
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2
    def test_graph_serialization_7_arbitrary_graph_with_loops(self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (manual port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="dl")
        tn = TaylorNet(dim=4, name="tn")
        loss = MSELoss(name="loss")

        # Build a graph with a loop.
        with NeuralGraph(name="graph") as graph:
            # Add modules to graph.
            x, t = dl()
            # First call to TN.
            p1 = tn(x=x)
            # Second call to TN.
            p2 = tn(x=p1)
            # Take output of second, pass it to loss.
            lss = loss(predictions=p2, target=t)

        # Make sure all connections are there!
        assert len(graph.tensor_list) == 5
        # 4 would mean that we have overwritten the "p1" (tn->y_pred) tensor!

        # Serialize the graph.
        serialized_graph = graph.serialize()

        # Create the second graph - deserialize with "module reusing".
        graph2 = NeuralGraph.deserialize(serialized_graph,
                                         reuse_existing_modules=True)
        serialized_graph2 = graph2.serialize()

        # Must be the same.
        assert serialized_graph == serialized_graph2
    def test_graph_simple_import_export(self, tmpdir):
        """
            Tests whether the Neural Module can instantiate a simple module by loading a configuration file.

            Args:
                tmpdir: Fixture which will provide a temporary directory.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgio1_dl")
        tn = TaylorNet(dim=4, name="tgio1_tn")
        loss = MSELoss(name="tgio1_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.training) as g1:
            x, t = dl()
            p = tn(x=x)
            _ = loss(predictions=p, target=t)

        # Serialize graph
        serialized_g1 = g1.serialize()

        # Generate filename in the temporary directory.
        tmp_file_name = str(tmpdir.mkdir("export").join("simple_graph.yml"))

        # Export graph to file.
        g1.export_to_config(tmp_file_name)

        # Create the second graph - import!
        g2 = NeuralGraph.import_from_config(tmp_file_name,
                                            reuse_existing_modules=True)
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2
示例#8
0
    def test_explicit_graph_with_activation(self):
        """ 
            Tests initialization of an `explicit` graph and decoupling of graph creation from its activation. 
            Also tests modules access.
        """
        # Create modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1, name="dl")
        fx = TaylorNet(dim=4, name="fx")
        loss = MSELoss(name="loss")

        # Create the g0 graph.
        g0 = NeuralGraph()

        # Activate the "g0 graph context" - all operations will be recorded to g0.
        with g0:
            x, t = dl()
            p = fx(x=x)
            lss = loss(predictions=p, target=t)

        # Assert that there are 3 modules in the graph.
        assert len(g0) == 3

        # Test access modules.
        assert g0["dl"] is dl
        assert g0["fx"] is fx
        assert g0["loss"] is loss

        with pytest.raises(KeyError):
            g0["other_module"]
示例#9
0
    def test_nm_tensors_producer_args(self):
        """
            Tests whether nmTensors are correct - producers and their args.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        # check producers' bookkeeping
        assert loss_tensor.producer_name == loss.name
        assert loss_tensor.producer_args == {
            "predictions": y_pred,
            "target": y
        }
        assert y_pred.producer_name == trainable_module.name
        assert y_pred.producer_args == {"x": x}
        assert y.producer_name == data_source.name
        assert y.producer_args == {}
        assert x.producer_name == data_source.name
        assert x.producer_args == {}
    def test_graph_serialization_6_graph_after_nesting_with_manual_binding(
            self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (manual port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs6_dl")
        tn = TaylorNet(dim=4, name="tgs6_tn")
        loss = MSELoss(name="tgs6_loss")

        # Create "model".
        with NeuralGraph(operation_mode=OperationMode.both,
                         name="tgs6_model") as model:
            # Manually bind input port: "input" -> "x"
            model.inputs["input"] = tn.input_ports["x"]
            # Add module to graph and bind it input port 'x'.
            y = tn(x=model.inputs["input"])
            # Manual output bind.
            model.outputs["output"] = y

        # Serialize "model".
        serialized_model = model.serialize()

        # Delete model-related stuff.
        del model
        del tn

        # Deserialize the "model copy".
        model_copy = NeuralGraph.deserialize(serialized_model,
                                             name="tgs6_model_copy")

        # Build the "training graph" - using the model copy.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgs6_training") as training:
            # Add modules to graph.
            x, t = dl()
            # Incorporate modules from the existing "model" graph.
            p = model_copy(
                input=x
            )  # Note: this output should actually be named "output", not "y_pred"!
            lss = loss(predictions=p, target=t)

        # Serialize the "training graph".
        serialized_training = training.serialize()

        # Delete everything.
        del dl
        del loss
        del model_copy
        del training

        # Create the second graph - deserialize without "module reusing".
        training2 = NeuralGraph.deserialize(serialized_training)
        serialized_training2 = training2.serialize()

        # Must be the same.
        assert serialized_training == serialized_training2
示例#11
0
    def test_graph_outputs_binding1(self):
        # Create modules.
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        tn = TaylorNet(dim=4)
        loss = MSELoss()

        with NeuralGraph() as g:
            # Create the graph by connnecting the modules.
            x, y = data_source()
            y_pred = tn(x=x)
            lss = loss(predictions=y_pred, target=y)

        # Test default binding.
        bound_outputs = GraphOutputs(g.tensors)

        bound_outputs.bind([x, y])
        bound_outputs.bind([y_pred])
        bound_outputs.bind([lss])

        # Delete not allowed.
        with pytest.raises(TypeError):
            del bound_outputs["loss"]

        assert len(bound_outputs) == 4
        assert len(bound_outputs.tensors) == 4
        assert len(bound_outputs.tensor_list) == 4

        defs = bound_outputs.definitions
        assert defs["x"].compare(
            data_source.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert defs["y"].compare(
            data_source.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert defs["y_pred"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert defs["loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        with pytest.raises(KeyError):
            _ = defs["lss"]

        # Bound manually.
        bound_outputs["my_prediction"] = y_pred
        bound_outputs["my_loss"] = lss

        # Delete not allowed.
        with pytest.raises(TypeError):
            del bound_outputs["my_prediction"]

        assert len(bound_outputs) == 2
        defs = bound_outputs.definitions
        assert defs["my_prediction"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert defs["my_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        with pytest.raises(KeyError):
            _ = defs["x"]
示例#12
0
    def test_graph_nesting2_possible_operation_modes(self):
        """ 
            Tests whether invalid nesting (i.e. nesting of graphs with incompatible modes) throw exeptions.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1)

        with NeuralGraph(operation_mode=OperationMode.both) as both:
            _, _ = dl()

        with NeuralGraph(operation_mode=OperationMode.training) as training:
            _, _ = dl()

        with NeuralGraph(operation_mode=OperationMode.evaluation) as inference:
            _, _ = dl()

        # Allowed operations.
        # Can nest 'both' into 'training'.
        with NeuralGraph(operation_mode=OperationMode.training):
            _, _ = both()

        # Can nest 'both' into 'inference'.
        with NeuralGraph(operation_mode=OperationMode.evaluation):
            _, _ = both()

        # Can nest 'training' into 'training'.
        with NeuralGraph(operation_mode=OperationMode.training):
            _, _ = training()

        # Can nest 'inference' into 'inference'.
        with NeuralGraph(operation_mode=OperationMode.evaluation):
            _, _ = inference()

        # Can nest 'both' into 'both'.
        with NeuralGraph(operation_mode=OperationMode.both):
            _, _ = both()

        # Operations not allowed.
        # Cannot nest 'inference' into 'training'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.training):
                _, _ = inference()

        # Cannot nest 'training' into 'inference'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.evaluation):
                _, _ = training()

        # Cannot nest 'training' into 'both'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.both):
                _, _ = training()

        # Cannot nest 'inference' into 'both'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.both):
                _, _ = inference()
示例#13
0
    def test_step_batch_decorators(self, clean_up):
        """Showcase the difference between step and batch"""
        data_source = RealFunctionDataLayer(n=24, batch_size=12)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        epoch_step_counter = [0]
        epoch_batch_counter = [0]

        @on_step_end
        def count_steps(state, counter=epoch_step_counter):
            counter[0] += 1

        @on_batch_end
        def count_batches(state, counter=epoch_batch_counter):
            counter[0] += 1

        callbacks = [count_steps, count_batches]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        # when grad accumlation steps (aka iter_per_step or batches_per_step) = 1, num_steps == num_batches
        assert epoch_step_counter[0] == 4
        assert epoch_batch_counter[0] == 4

        epoch_step_counter[0] = 0
        epoch_batch_counter[0] = 0

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
            reset=True,
            batches_per_step=2,
        )

        # when grad accumlation steps != 1, num_steps != num_batches
        assert epoch_step_counter[0] == 4
        assert epoch_batch_counter[0] == 8
示例#14
0
    def test_graph_nesting8_topology_copy_two_modules(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs in the inner graph.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn8_ds")
        tn = TaylorNet(dim=4, name="tgn8_tn")
        loss = MSELoss(name="tgn8_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn8_g1") as g1:
            # Create input port definitions.
            g1.inputs["inner_x"] = tn.input_ports["x"]
            g1.inputs["inner_target"] = loss.input_ports["target"]

            # Connect modules and bound inputs.
            y_pred1 = tn(x=g1.inputs["inner_x"])
            lss1 = loss(predictions=y_pred1, target=g1.inputs["inner_target"])

            # Manually bind the output ports.
            g1.outputs["inner_y_pred"] = y_pred1
            g1.outputs["inner_loss"] = lss1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn8_g2") as g2:
            x, y = ds()
            # Nest the inner graph.
            y_pred2, lss2 = g1(inner_x=x, inner_target=y)
            # Manually bind the output ports.
            g2.outputs["outer_y_pred"] = y_pred2
            g2.outputs["outer_loss"] = lss2

        # Check modules and steps.
        assert len(g2.steps) == 3
        assert len(g2) == 3

        # Check the output tensors.
        assert len(g2.output_tensors) == 2
        assert g2.output_tensors["outer_y_pred"] == y_pred2
        assert g2.output_tensors["outer_loss"] == lss2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert lss2 is not lss1
        assert g2.tensors[0]["x"] == x
        assert g2.tensors[0]["y"] == y
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"] == y_pred2
        # Analogically with "loss".
        assert g2.tensors[2]["loss"] == lss2
示例#15
0
    def test_graph_nesting9_topology_copy_whole_graph(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs in the inner graph. Manual binding of outer graph outputs.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn9_ds")
        tn = TaylorNet(dim=4, name="tgn9_tn")
        loss = MSELoss(name="tgn9_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn9_g1") as g1:
            # Connect modules.
            x, y = ds()
            y_pred1 = tn(x=x)
            lss1 = loss(predictions=y_pred1, target=y)

            # Manually bind the output ports.
            g1.outputs["inner_y_pred"] = y_pred1
            g1.outputs["inner_loss"] = lss1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn9_g2") as g2:
            y_pred2, lss2 = g1()
            # Manually bind the output ports.
            g2.outputs["outer_y_pred"] = y_pred2
            g2.outputs["outer_loss"] = lss2

        # Check modules and steps.
        assert len(g2.steps) == 3
        assert len(g2) == 3

        # Check the output tensors.
        assert len(g2.output_tensors) == 2
        assert g2.output_tensors["outer_y_pred"] == y_pred2
        assert g2.output_tensors["outer_loss"] == lss2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert lss2 is not lss1
        assert g2.tensors[0]["x"].ntype.compare(
            ds.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g2.tensors[0]["y"].ntype.compare(
            ds.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"].ntype.compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        # Analogically with "loss".
        assert g2.tensors[2]["loss"].ntype.compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
示例#16
0
    def test_simple_train_named_output(self):
        """ Test named output """
        data_source = RealFunctionDataLayer(n=10, batch_size=1)
        # Get data
        data = data_source()

        # Check output class naming coherence.
        assert type(data).__name__ == 'RealFunctionDataLayerOutput'

        # Check types.
        assert data.x.compare(
            data_source.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert data.y.compare(
            data_source.output_ports["y"]) == NeuralTypeComparisonResult.SAME
    def test_graph_serialization_1_simple_graph_no_binding(self):
        """ 
            Tests whether serialization of a simple graph works.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs1_dl")
        tn = TaylorNet(dim=4, name="tgs1_tn")
        loss = MSELoss(name="tgs1_loss")

        # Create the graph.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="g1") as g1:
            x, t = dl()
            prediction1 = tn(x=x)
            _ = loss(predictions=prediction1, target=t)

        # Serialize the graph.
        serialized_g1 = g1.serialize()

        # Create a second graph - deserialize with reusing.
        g2 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=True,
                                     name="g2")
        serialized_g2 = g2.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g2

        # Delete modules.
        del dl
        del tn
        del loss
        # Delete graphs as they contain "hard" references to those modules.
        del g1
        del g2

        # Create a third graph - deserialize without reusing, should create new modules.
        g3 = NeuralGraph.deserialize(serialized_g1,
                                     reuse_existing_modules=False,
                                     name="g3")
        serialized_g3 = g3.serialize()

        # Must be the same.
        assert serialized_g1 == serialized_g3

        # Deserialize graph - without reusing modules not allowed.
        with pytest.raises(KeyError):
            _ = NeuralGraph.deserialize(serialized_g1,
                                        reuse_existing_modules=False)
示例#18
0
    def test_graph_outputs_binding2(self):
        # Create modules.
        data_source = RealFunctionDataLayer(n=100,
                                            batch_size=1,
                                            name="tgo2_ds")
        tn = TaylorNet(dim=4, name="tgo2_tn")
        loss = MSELoss(name="tgo2_loss")

        # Test default binding.
        with NeuralGraph(operation_mode=OperationMode.training) as g1:
            # Create the graph by connnecting the modules.
            x, y = data_source()
            y_pred = tn(x=x)
            lss = loss(predictions=y_pred, target=y)

        assert len(g1.outputs) == 4
        # Test ports.
        for (module, port, tensor) in [
            (data_source, "x", x),
            (data_source, "y", y),
            (tn, "y_pred", y_pred),
            (loss, "loss", lss),
        ]:
            # Compare definitions - from outputs.
            assert g1.outputs[port].ntype.compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Compare definitions - from output_ports.
            assert g1.output_ports[port].compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Compare definitions - from output_tensors.
            assert g1.output_tensors[port].compare(
                module.output_ports[port]) == NeuralTypeComparisonResult.SAME
            # Make sure that tensor was bound, i.e. input refers to the same object instance!
            assert g1.output_tensors[port] is tensor

        # Test manual binding.
        g1.outputs["my_prediction"] = y_pred
        g1.outputs["my_loss"] = lss

        assert len(g1.outputs) == 2
        assert g1.output_tensors["my_prediction"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME
        assert g1.output_tensors["my_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME

        # Finally, make sure that the user cannot "bind" "output_ports"!
        with pytest.raises(TypeError):
            g1.output_ports["my_prediction"] = y_pred
示例#19
0
    def test_dag(self):
        data_source = RealFunctionDataLayer(n=10000, batch_size=128)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()
        x, y = data_source()
        y_pred = trainable_module(x=x)
        _ = loss(predictions=y_pred, target=y)

        def wrong():
            data_source = RealFunctionDataLayer(n=10000, batch_size=128)
            trainable_module = TaylorNet(dim=4)
            loss = MSELoss()
            x, y = data_source()
            loss_tensor = loss(predictions=x, target=x)
            _ = trainable_module(x=loss_tensor)

        self.assertRaises(NeuralPortNmTensorMismatchError, wrong)
示例#20
0
    def test_nm_tensors_producer_consumers(self):
        """
            Tests whether nmTensors are correct - checking producers and consumers.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=10, batch_size=1, name="source")
        trainable_module = TaylorNet(dim=4, name="tm")
        loss = MSELoss(name="loss")
        loss2 = MSELoss(name="loss2")

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        lss = loss(predictions=y_pred, target=y)
        lss2 = loss2(predictions=y_pred, target=y)

        # Check tensor x producer and consumers.
        p = x.producer_step_module_port
        cs = x.consumers
        assert p.module_name == "source"
        assert p.port_name == "x"
        assert len(cs) == 1
        assert cs[0].module_name == "tm"
        assert cs[0].port_name == "x"

        # Check tensor y producer and consumers.
        p = y.producer_step_module_port
        cs = y.consumers
        assert p.module_name == "source"
        assert p.port_name == "y"
        assert len(cs) == 2
        assert cs[0].module_name == "loss"
        assert cs[0].port_name == "target"
        assert cs[1].module_name == "loss2"
        assert cs[1].port_name == "target"

        # Check tensor y_pred producer and consumers.
        p = y_pred.producer_step_module_port
        cs = y_pred.consumers
        assert p.module_name == "tm"
        assert p.port_name == "y_pred"
        assert len(cs) == 2
        assert cs[0].module_name == "loss"
        assert cs[0].port_name == "predictions"
        assert cs[1].module_name == "loss2"
        assert cs[1].port_name == "predictions"
示例#21
0
    def test_graph_nesting4_1_topology_copy_one_module_manual_outputs_bound_only_in_inner(
            self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: binding of outputs, manual port names - only in the inner graph.
            Testing whether outputs of outer graph have the manually bound names.
        """

        dl = RealFunctionDataLayer(n=10, batch_size=1, name="tgn41_dl")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn41_g1") as g1:
            xg1, tg1 = dl()
            # Set port binding manually, with different names - and their number!
            g1.outputs["inner_x"] = xg1
            g1.outputs["inner_t"] = tg1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn41_g2") as g2:
            # Get them as a tuple.
            outputs = g1()

        # Retrieve tensors from tuple.
        assert outputs._fields[0] == "inner_x"
        assert outputs._fields[1] == "inner_t"
        xg2 = outputs.inner_x
        tg2 = outputs.inner_t

        # Make sure that outer graph has objects of the same names
        assert len(g1.outputs) == len(g2.outputs)
        for inter_port, outer_port in [("inner_x", "inner_x"),
                                       ("inner_t", "inner_t")]:
            # Definitions are the same: test two "paths" of accessing the type.
            assert g1.output_ports[inter_port].compare(
                g2.output_ports[outer_port]) == NeuralTypeComparisonResult.SAME
            assert (g1.outputs[inter_port].ntype.compare(
                g2.outputs[outer_port].ntype) ==
                    NeuralTypeComparisonResult.SAME)
            # At the same time - those have to be two different port objects!
            assert g1.outputs[inter_port] is not g2.outputs[outer_port]
            # And different tensors (as those are "internally produced tensors"!)
            assert g1.output_tensors[inter_port] is not g2.output_tensors[
                outer_port]
示例#22
0
    def test_default_output_ports(self):
        """ Tests automatic binding of default output ports. """
        dl = RealFunctionDataLayer(n=10, batch_size=1)
        m2 = TaylorNet(dim=4)
        loss = MSELoss()

        with NeuralGraph() as g1:
            x, t = dl()
            p = m2(x=x)

        # Tests output ports.
        assert len(g1.output_ports) == 3
        assert g1.output_ports["x"].compare(
            x) == NeuralTypeComparisonResult.SAME
        assert g1.output_ports["y"].compare(
            t) == NeuralTypeComparisonResult.SAME
        assert g1.output_ports["y_pred"].compare(
            p) == NeuralTypeComparisonResult.SAME
示例#23
0
    def test_module_nesting1_change_operation_modes(self):
        """ 
            Tests whether invalid nesting (i.e. nesting of graphs with incompatible modes) throw exeptions.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1)

        with NeuralGraph(operation_mode=OperationMode.both):
            _, _ = dl()
            assert dl.operation_mode == OperationMode.both

        with NeuralGraph(operation_mode=OperationMode.training):
            _, _ = dl()
            assert dl.operation_mode == OperationMode.training

        with NeuralGraph(operation_mode=OperationMode.evaluation):
            _, _ = dl()
            assert dl.operation_mode == OperationMode.evaluation
示例#24
0
    def test_graph_nesting4_topology_copy_one_module_manual_outputs(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: binding of outputs, manual port names.
        """

        dl = RealFunctionDataLayer(n=10, batch_size=1, name="tgn4_dl")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn4_g1") as g1:
            xg1, tg1 = dl()
            # Set port binding manually, with different names - and their number!
            g1.outputs["inner_x"] = xg1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn4_g2") as g2:
            xg2 = g1()
            # Set port binding manually, with different names - and their number!
            g2.outputs["outer_x"] = xg2

        # We expect that both graphs will have the same steps.
        assert len(g1.steps) == len(g2.steps)
        assert g1.steps[0] == g2.steps[0]

        # Make sure that the modules are the same.
        assert len(g1) == len(g2)
        assert g1["tgn4_dl"] is g2["tgn4_dl"]

        # Make sure that outputs are ok.
        assert len(g1.outputs) == len(g2.outputs)
        for inter_port, outer_port in [("inner_x", "outer_x")]:
            # Definitions are the same: test two "paths" of accessing the type.
            assert g1.output_ports[inter_port].compare(
                g2.output_ports[outer_port]) == NeuralTypeComparisonResult.SAME
            assert (g1.outputs[inter_port].ntype.compare(
                g2.outputs[outer_port].ntype) ==
                    NeuralTypeComparisonResult.SAME)
            # At the same time - those have to be two different port objects!
            assert g1.outputs[inter_port] is not g2.outputs[outer_port]
            # And different tensors (as those are "internally produced tensors"!)
            assert g1.output_tensors[inter_port] is not g2.output_tensors[
                outer_port]
示例#25
0
    def test_explicit_graph(self):
        """
            Tests the integration of an `explicit` graph with actions API.
            In particular, checks whether user can pass NG instance to train().
        """
        # Create modules.
        dl = RealFunctionDataLayer(n=100, batch_size=4)
        fx = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the g0 graph.
        g0 = NeuralGraph()

        # Activate the "g0 graph context" - all operations will be recorded to g0.
        with g0:
            x, t = dl()
            p = fx(x=x)
            lss = loss(predictions=p, target=t)
            # Bind the loss output.
            g0.outputs["loss"] = lss

        # Instantiate an optimizer to perform the `train` action.
        optimizer = PtActions()

        # Make sure user CANNOT pass training graph and tensors_to_optimize.
        with pytest.raises(ValueError):
            optimizer.train(
                tensors_to_optimize=lss,
                training_graph=g0,
                optimization_params={
                    "max_steps": 1,
                    "lr": 0.0003
                },
                optimizer="sgd",
            )

        # But user can invoke "train" action using graph only.
        optimizer.train(training_graph=g0,
                        optimization_params={
                            "max_steps": 1,
                            "lr": 0.0003
                        },
                        optimizer="sgd")
    def test_graph_serialization_4_graph_after_nesting_with_default_binding_reuse_modules(
            self):
        """ 
            Tests whether serialization works in the case when we serialize a graph after a different graph
            was nested in it, with additionally bound input and output binding works (default port names).
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgs4_dl")
        tn = TaylorNet(dim=4, name="tgs4_tn")
        loss = MSELoss(name="tgs4_loss")

        # Create "model".
        with NeuralGraph(operation_mode=OperationMode.both,
                         name="model") as model:
            # Add module to graph and bind it input port 'x'.
            y = tn(x=model)
            # NOTE: For some reason after this call both the "tgs4_tn" and "model" objects
            # remains on the module/graph registries.
            # (So somewhere down there remains a strong reference to module or graph).
            # This happens ONLY when passing graph as argument!
            # (Check out the next test which actually removes module and graph!).
            # Still, that is not an issue, as we do not expect the users
            # to delete and recreate modules in their "normal" applications.

        # Build the "training graph" - using the model copy.
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgs4_training") as training:
            # Add modules to graph.
            x, t = dl()
            # Incorporate modules from the existing "model" graph.
            p = model(x=x)
            lss = loss(predictions=p, target=t)

        # Serialize the "training graph".
        serialized_training = training.serialize()

        # Create the second graph - deserialize withoput "module reusing".
        training2 = NeuralGraph.deserialize(serialized_training,
                                            reuse_existing_modules=True)
        serialized_training2 = training2.serialize()

        # Must be the same.
        assert serialized_training == serialized_training2
示例#27
0
    def test_explicit_graph_manual_activation(self):
        """  Tests initialization of an `explicit` graph using `manual` activation. """
        # Create modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1)
        fx = TaylorNet(dim=4)

        # Create the g0 graph.
        g0 = NeuralGraph()

        # Activate the "g0 graph context" "manually" - all steps will be recorded to g0.
        g0.activate()

        # Define g0 - connections between the modules.
        x, t = dl()
        p = fx(x=x)

        # Deactivate the "g0 graph context".
        # Note that this is really optional, as long as there are no other steps to be recorded.
        g0.deactivate()

        # Assert that there are 2 modules in the graph.
        assert len(g0) == 2
示例#28
0
    def test_nm_tensors_types(self):
        """
            Tests whether nmTensors are correct - checking type property.
        """
        # Create modules.
        data_source = RealFunctionDataLayer(n=10, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        lss = loss(predictions=y_pred, target=y)

        # Check types.
        assert x.ntype.compare(
            data_source.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert y.ntype.compare(
            data_source.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert y_pred.ntype.compare(trainable_module.output_ports["y_pred"]
                                    ) == NeuralTypeComparisonResult.SAME
        assert lss.ntype.compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
示例#29
0
    def test_TensorboardLogger(self, clean_up, tmpdir):
        data_source = RealFunctionDataLayer(n=100, batch_size=1)
        trainable_module = TaylorNet(dim=4)
        loss = MSELoss()

        # Create the graph by connnecting the modules.
        x, y = data_source()
        y_pred = trainable_module(x=x)
        loss_tensor = loss(predictions=y_pred, target=y)

        logging_dir = tmpdir.mkdir("temp")

        writer = SummaryWriter(logging_dir)

        tb_logger = TensorboardLogger(writer, step_freq=1)
        callbacks = [tb_logger]

        self.nf.train(
            tensors_to_optimize=[loss_tensor],
            callbacks=callbacks,
            optimization_params={
                "max_steps": 4,
                "lr": 0.01
            },
            optimizer="sgd",
        )

        # efi.inspect("temp", tag="loss")
        inspection_units = efi.get_inspection_units(str(logging_dir), "",
                                                    "loss")

        # Make sure there is only 1 tensorboard file
        assert len(inspection_units) == 1

        # Assert that there the loss scalars has been logged 4 times
        assert len(inspection_units[0].field_to_obs['scalars']) == 4
示例#30
0
        # Custom "deserialization" of the status.
        if init_params["status"] == 0:
            deserialized_params["status"] = Status.success
        else:
            deserialized_params["status"] = Status.error

        # Return deserialized parameters.
        return deserialized_params


# Run on CPU.
nf = NeuralModuleFactory(placement=DeviceType.CPU)

# Instantitate RealFunctionDataLayer defaults to f=torch.sin, sampling from x=[-1, 1]
dl = RealFunctionDataLayer(n=100, f_name="cos", x_lo=-1, x_hi=1, batch_size=32)

# Instantiate a simple feed-forward, single layer neural network.
fx = CustomTaylorNet(dim=4, status=Status.error)

# Instantitate loss.
mse_loss = MSELoss()

# Export the model configuration.
fx.export_to_config("/tmp/custom_taylor_net.yml")

# Create a second instance, using the parameters loaded from the previously created configuration.
# Please note that we are calling the overriden method from the CustomTaylorNet class.
fx2 = CustomTaylorNet.import_from_config("/tmp/custom_taylor_net.yml")

# Create a graph by connecting the outputs with inputs of modules.