Exemplo n.º 1
0
    def test_reset(self):
        # Node and Marble have some arbitrary nonzero initial motion vars.
        node = Node(torch.tensor([1.]), torch.tensor([2.]), torch.tensor([3.]),
                    4, NewtonianGravity(), 1, 1, 1, 1)
        marble = Marble(torch.tensor([1.1]), torch.tensor([2.2]),
                        torch.tensor([3.3]), 4.4, NewtonianGravity(), None)
        model = NenwinModel([node], [marble])
        model.make_timestep(10)
        model.make_timestep(10)
        model.make_timestep(10)

        # Verify that the motion variables have changed
        self.assertFalse(check_close(node.pos, node.init_pos))
        self.assertFalse(check_close(marble.vel, marble.init_vel))
        self.assertFalse(check_close(marble.acc, marble.init_acc))

        model.reset()

        # Now they should be the original values again.
        self.assertTrue(check_close(node.pos, node.init_pos))
        self.assertTrue(check_close(node.vel, node.init_vel))
        self.assertTrue(check_close(node.acc, node.init_acc))
        self.assertTrue(check_close(marble.pos, marble.init_pos))
        self.assertTrue(check_close(marble.vel, marble.init_vel))
        self.assertTrue(check_close(marble.acc, marble.init_acc))
Exemplo n.º 2
0
    def test_reset_removed_added_marbles(self):
        """
        NenwinModel.reset() should remove all Marbles
        added with NenwinModel.add_marbles()
        """
        original_marble = Marble(ZERO,
                                 ZERO,
                                 ZERO,
                                 10,
                                 NewtonianGravity(),
                                 datum=None)
        added_marble = original_marble.copy()

        assert added_marble is not original_marble

        model = NenwinModel([], [original_marble])
        model.add_marbles([added_marble])

        model.reset()

        self.assertIn(original_marble, model.marbles)
        self.assertIn(original_marble, model._NenwinModel__all_particles)
        self.assertNotIn(added_marble, model.marbles)
        self.assertNotIn(added_marble, model._NenwinModel__all_particles)
Exemplo n.º 3
0
class LossFunctionCallWrongPredTestCase(unittest.TestCase):
    """
    Testcases for NenwinLossFunction.__call__() for when another than the target
    output-MarbleEaterNodes has eaten a Marble.
    """

    def setUp(self):
        """

        Sketch:

         |
        ^|
        y|
         |
        0| N_1        <M        N_0
         |
         |
         +-------------------------------
           -10         0         10   x>


        Marble M starts with moving towards N_1, but should arrive at N_0.

        """
        self.pos_weight = 0.5
        self.vel_weight = 0.5

        self.nodes = (gen_node_at(torch.tensor([10.0, 0])),
                      gen_node_at(torch.tensor([-10.0, 0])))
        self.marble = gen_marble_at(ZERO,
                                    vel=torch.tensor([-3.0, 0]),
                                    datum="original")
        self.model = NenwinModel(self.nodes, [self.marble])

        for _ in range(50):  # Should be enough for the Marble to be eaten
            self.model.make_timestep(0.1)

        assert len(self.model.marbles) == 0, "Testcase badly designed."

        self.loss_fun = NenwinLossFunction(self.nodes, self.model,
                                           vel_weight=self.vel_weight,
                                           pos_weight=self.pos_weight)

        self.target_index = 0
        self.wrong_node_index = 1
        self.loss = self.loss_fun(self.target_index)

    def test_value_loss_wrong_pred_no_marble_left(self):
        """
        The loss should equal the velocity-weighted distance
        of the target Node to the nearest Marble,
        plus the *negative reciprocal* of the distance of the wrong node
        (self.nodes[1]) to the Marble.

        Case where no non-eaten Marble is available.
        """

        target_node = self.nodes[self.target_index]
        wrong_node = self.nodes[self.wrong_node_index]
        expected = velocity_weighted_distance(target_node, self.marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        expected -= 1/velocity_weighted_distance(wrong_node, self.marble,
                                                 pos_weight=self.pos_weight,
                                                 vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_value_loss_wrong_pred_some_marble_left(self):
        """
        The loss should equal the velocity-weighted distance
        of the target Node to the nearest Marble,
        plus the *negative reciprocal* of the distance of the wrong node
        (self.nodes[1]) to the Marble.

        Case where another non-eaten Marble is still available
        at time of loss computation.
        """
        second_marble = gen_marble_at(ZERO, datum="second")
        self.model.add_marbles([second_marble])
        self.loss = self.loss_fun(self.target_index)

        target_node = self.nodes[self.target_index]
        wrong_node = self.nodes[self.wrong_node_index]
        expected = velocity_weighted_distance(target_node, second_marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        expected -= 1/velocity_weighted_distance(wrong_node, self.marble,
                                                 pos_weight=self.pos_weight,
                                                 vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_grads_no_error_wrong_pred(self):
        """
        Case in which no prediction output is given.
        All learnable parameters should have a gradient value
        that does not cause errors with an optimizer.

        Case where another non-eaten Marble is still available
        at time of loss computation.
        """
        second_marble = gen_marble_at(ZERO)
        self.model.add_marbles([second_marble])

        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        try:
            self.loss.backward()
            optim.step()
        except RuntimeError as e:
            self.fail(f"Error occurred during backprop/optim step: {e}")

    def test_grads_value_wrong_pred(self):
        """
        Case in which no prediction output is given.
        The learnable parameters of the Marble should have a gradient value
        that does affect the values of the weights.
        The loss should be lower for the second run.
        """
        second_marble = gen_marble_at(ZERO)
        self.model.add_marbles([second_marble])

        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        self.loss.backward()

        self.assertIsNotNone(self.marble.init_pos.grad)
        self.assertIsNotNone(self.marble.init_vel.grad)
        self.assertIsNotNone(self.marble.mass.grad)

        optim.step()

        # Now verify the loss improved.
        self.model.reset()
        self.model.make_timestep(0.1)
        self.model.add_marbles([second_marble])
        new_loss = self.loss_fun(self.target_index)

        self.assertLess(new_loss.item(), self.loss.item())
Exemplo n.º 4
0
class LossFunctionCallNoPredTestCase(unittest.TestCase):
    """
    Testcases for NenwinLossFunction.__call__() in case none of the
    output-MarbleEaterNodes has eaten any Marble.
    """

    def setUp(self):

        self.pos_weight = 0.5
        self.vel_weight = 0.5

        self.node = gen_node_at(ZERO)
        self.marble = gen_marble_at(torch.tensor([10., 10.]))
        self.model = NenwinModel([self.node], [self.marble])
        self.model.make_timestep(0.1)  # too short to cross the distance

        self.loss_fun = NenwinLossFunction([self.node], self.model,
                                           vel_weight=self.vel_weight,
                                           pos_weight=self.pos_weight)

        assert self.node.num_marbles_eaten == 0, "Testcase badly desgined."

        self.target_index = 0
        self.loss = self.loss_fun(self.target_index)

    def test_value_loss_no_output(self):
        """
        If no Marble has been eaten, the loss
        should equal the velocity-weighted distance
        of the target Node to the nearest Marble.
        """
        expected = velocity_weighted_distance(self.node, self.marble,
                                              pos_weight=self.pos_weight,
                                              vel_weight=self.vel_weight)
        torch.testing.assert_allclose(self.loss, expected)

    def test_grads_no_error_no_output(self):
        """
        Case in which no prediction output is given.
        All learnable parameters should have a gradient value
        that does not cause errors with an optimizer.

        Passes if no errors occurs.
        """
        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        try:
            self.loss.backward()
            optim.step()
        except RuntimeError as e:
            self.fail(f"Error occurred during backprop/optim step: {e}")

    def test_grads_value_no_output(self):
        """
        Case in which no prediction output is given.
        The learnable parameters of the Marble should have a gradient value
        that does affect the values of the weights.
        The loss should be lower for the second run.
        """
        optim = torch.optim.Adam(self.model.parameters())
        optim.zero_grad()

        self.loss.backward()

        self.assertIsNotNone(self.marble.init_pos.grad)
        self.assertIsNotNone(self.marble.init_vel.grad)
        self.assertIsNotNone(self.marble.mass.grad)

        optim.step()

        # Now verify the loss improved.
        self.model.reset()
        self.model.make_timestep(0.1)
        new_loss = self.loss_fun(self.target_index)

        self.assertLess(new_loss.item(), self.loss.item())