Exemplo n.º 1
0
    def test_grads_no_error_correct_pred(self):
        """
        Case in which the prediction is correct.
        All learnable parameters should have a gradient value
        that does not cause errors with an optimizer.

        Passes if no errors occurs.
        """
        node = gen_node_at(ZERO)
        marble = gen_marble_at(ZERO)
        model = NenwinModel([node], [marble])
        model.make_timestep(1.0)
        optim = torch.optim.Adam(model.parameters())
        optim.zero_grad()

        loss_fun = NenwinLossFunction([node], model, 0, 1)

        target_index = 0
        result = loss_fun(target_index)

        try:
            result.backward()
            optim.step()
        except RuntimeError as e:
            self.fail(f"Error occurred during backprop/optim step: {e}")
Exemplo n.º 2
0
    def test_get_params_add_marbles(self):
        """
       Later added marbles should not be registered as submodules.
        """
        marbles = (generate_dummy_marble(), )
        nodes = (generate_dummy_node(), )
        model = NenwinModel(nodes)

        model.add_marbles(marbles)

        # 8 parameters per particle: init_pos, init_vel, init_acc, mass,
        # and the 4 stiffness / attraction params
        result = tuple(model.parameters())
        print(result)
        self.assertEqual(len(result), 8)
Exemplo n.º 3
0
 def test_get_params(self):
     """
     Nodes and Marbles should be registered as submodules
     of the model within the PyTorch framework,
     and hence all learnable parameters should be
     obtainable via model.
     """
     marbles = (generate_dummy_marble(), )
     nodes = (generate_dummy_node(), )
     model = NenwinModel(nodes, marbles)
     # 8 parameters per particle: init_pos, init_vel, init_acc, mass,
     # and the 4 stiffness / attraction params
     result = tuple(model.parameters())
     print(result)
     self.assertEqual(len(result), 16)
Exemplo n.º 4
0
def create_trainer(model: NenwinModel,
                   input_placer: InputPlacer,
                   output_nodes: Iterable[MarbleEaterNode],
                   dataset: BanknoteDataset,
                   loss_pos_weight: float,
                   loss_vel_weight: float,
                   architecture: ARCHITECTURES,
                    ) -> NenwinTrainer:

    loss_funct = NenwinLossFunction(output_nodes, model, loss_vel_weight,
                                    loss_pos_weight)
    optim = torch.optim.Adam(model.parameters())
    arch_name = architecture.value
    filename_gen = FilenameGenerator(BANKNOTE_CHECKPOINT_DIR,
                                     f"BANKNOTE_{arch_name}_", ".txt")
    trainer = NenwinTrainer(model, loss_funct, optim, filename_gen,
                            input_placer, dataset)
    return trainer, filename_gen
Exemplo n.º 5
0
class LossFunctionCallWrongPredTestCase(unittest.TestCase):
    """
    Testcases for NenwinLossFunction.__call__() for when another than the target
    output-MarbleEaterNodes has eaten a Marble.
    """

    def setUp(self):
        """

        Sketch:

         |
        ^|
        y|
         |
        0| N_1        <M        N_0
         |
         |
         +-------------------------------
           -10         0         10   x>


        Marble M starts with moving towards N_1, but should arrive at N_0.

        """
        self.pos_weight = 0.5
        self.vel_weight = 0.5

        self.nodes = (gen_node_at(torch.tensor([10.0, 0])),
                      gen_node_at(torch.tensor([-10.0, 0])))
        self.marble = gen_marble_at(ZERO,
                                    vel=torch.tensor([-3.0, 0]),
                                    datum="original")
        self.model = NenwinModel(self.nodes, [self.marble])

        for _ in range(50):  # Should be enough for the Marble to be eaten
            self.model.make_timestep(0.1)

        assert len(self.model.marbles) == 0, "Testcase badly designed."

        self.loss_fun = NenwinLossFunction(self.nodes, self.model,
                                           vel_weight=self.vel_weight,
                                           pos_weight=self.pos_weight)

        self.target_index = 0
        self.wrong_node_index = 1
        self.loss = self.loss_fun(self.target_index)

    def test_value_loss_wrong_pred_no_marble_left(self):
        """
        The loss should equal the velocity-weighted distance
        of the target Node to the nearest Marble,
        plus the *negative reciprocal* of the distance of the wrong node
        (self.nodes[1]) to the Marble.

        Case where no non-eaten Marble is available.
        """

        target_node = self.nodes[self.target_index]
        wrong_node = self.nodes[self.wrong_node_index]
        expected = velocity_weighted_distance(target_node, self.marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        expected -= 1/velocity_weighted_distance(wrong_node, self.marble,
                                                 pos_weight=self.pos_weight,
                                                 vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_value_loss_wrong_pred_some_marble_left(self):
        """
        The loss should equal the velocity-weighted distance
        of the target Node to the nearest Marble,
        plus the *negative reciprocal* of the distance of the wrong node
        (self.nodes[1]) to the Marble.

        Case where another non-eaten Marble is still available
        at time of loss computation.
        """
        second_marble = gen_marble_at(ZERO, datum="second")
        self.model.add_marbles([second_marble])
        self.loss = self.loss_fun(self.target_index)

        target_node = self.nodes[self.target_index]
        wrong_node = self.nodes[self.wrong_node_index]
        expected = velocity_weighted_distance(target_node, second_marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        expected -= 1/velocity_weighted_distance(wrong_node, self.marble,
                                                 pos_weight=self.pos_weight,
                                                 vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_grads_no_error_wrong_pred(self):
        """
        Case in which no prediction output is given.
        All learnable parameters should have a gradient value
        that does not cause errors with an optimizer.

        Case where another non-eaten Marble is still available
        at time of loss computation.
        """
        second_marble = gen_marble_at(ZERO)
        self.model.add_marbles([second_marble])

        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        try:
            self.loss.backward()
            optim.step()
        except RuntimeError as e:
            self.fail(f"Error occurred during backprop/optim step: {e}")

    def test_grads_value_wrong_pred(self):
        """
        Case in which no prediction output is given.
        The learnable parameters of the Marble should have a gradient value
        that does affect the values of the weights.
        The loss should be lower for the second run.
        """
        second_marble = gen_marble_at(ZERO)
        self.model.add_marbles([second_marble])

        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        self.loss.backward()

        self.assertIsNotNone(self.marble.init_pos.grad)
        self.assertIsNotNone(self.marble.init_vel.grad)
        self.assertIsNotNone(self.marble.mass.grad)

        optim.step()

        # Now verify the loss improved.
        self.model.reset()
        self.model.make_timestep(0.1)
        self.model.add_marbles([second_marble])
        new_loss = self.loss_fun(self.target_index)

        self.assertLess(new_loss.item(), self.loss.item())
Exemplo n.º 6
0
class LossFunctionCallNoPredTestCase(unittest.TestCase):
    """
    Testcases for NenwinLossFunction.__call__() in case none of the
    output-MarbleEaterNodes has eaten any Marble.
    """

    def setUp(self):

        self.pos_weight = 0.5
        self.vel_weight = 0.5

        self.node = gen_node_at(ZERO)
        self.marble = gen_marble_at(torch.tensor([10., 10.]))
        self.model = NenwinModel([self.node], [self.marble])
        self.model.make_timestep(0.1)  # too short to cross the distance

        self.loss_fun = NenwinLossFunction([self.node], self.model,
                                           vel_weight=self.vel_weight,
                                           pos_weight=self.pos_weight)

        assert self.node.num_marbles_eaten == 0, "Testcase badly desgined."

        self.target_index = 0
        self.loss = self.loss_fun(self.target_index)

    def test_value_loss_no_output(self):
        """
        If no Marble has been eaten, the loss
        should equal the velocity-weighted distance
        of the target Node to the nearest Marble.
        """
        expected = velocity_weighted_distance(self.node, self.marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_grads_no_error_no_output(self):
        """
        Case in which no prediction output is given.
        All learnable parameters should have a gradient value
        that does not cause errors with an optimizer.

        Passes if no errors occurs.
        """
        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        try:
            self.loss.backward()
            optim.step()
        except RuntimeError as e:
            self.fail(f"Error occurred during backprop/optim step: {e}")

    def test_grads_value_no_output(self):
        """
        Case in which no prediction output is given.
        The learnable parameters of the Marble should have a gradient value
        that does affect the values of the weights.
        The loss should be lower for the second run.
        """
        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        self.loss.backward()

        self.assertIsNotNone(self.marble.init_pos.grad)
        self.assertIsNotNone(self.marble.init_vel.grad)
        self.assertIsNotNone(self.marble.mass.grad)

        optim.step()

        # Now verify the loss improved.
        self.model.reset()
        self.model.make_timestep(0.1)
        new_loss = self.loss_fun(self.target_index)

        self.assertLess(new_loss.item(), self.loss.item())