def test_add_marbles_empty(self): """ Corner case: add 0 new Marbles. """ marble = generate_dummy_marble() model = NenwinModel([], set([marble])) new_marbles = [] model.add_marbles(new_marbles) expected = set([marble]) self.assertSetEqual(expected, model.marbles)
def test_add_marbles(self): """ Base case: add two marbles, should appear in marbles() getter. Old marbles should remain as well. """ marble = generate_dummy_marble() model = NenwinModel([], set([marble])) new_marbles = (generate_dummy_marble(), generate_dummy_marble()) model.add_marbles(new_marbles) expected = set([marble, *new_marbles]) self.assertSetEqual(expected, model.marbles) self.assertSetEqual(expected, model._NenwinModel__all_particles)
def test_get_params_add_marbles(self): """ Later added marbles should not be registered as submodules. """ marbles = (generate_dummy_marble(), ) nodes = (generate_dummy_node(), ) model = NenwinModel(nodes) model.add_marbles(marbles) # 8 parameters per particle: init_pos, init_vel, init_acc, mass, # and the 4 stiffness / attraction params result = tuple(model.parameters()) print(result) self.assertEqual(len(result), 8)
def test_reset_removed_added_marbles(self): """ NenwinModel.reset() should remove all Marbles added with NenwinModel.add_marbles() """ original_marble = Marble(ZERO, ZERO, ZERO, 10, NewtonianGravity(), datum=None) added_marble = original_marble.copy() assert added_marble is not original_marble model = NenwinModel([], [original_marble]) model.add_marbles([added_marble]) model.reset() self.assertIn(original_marble, model.marbles) self.assertIn(original_marble, model._NenwinModel__all_particles) self.assertNotIn(added_marble, model.marbles) self.assertNotIn(added_marble, model._NenwinModel__all_particles)
class LossFunctionCallWrongPredTestCase(unittest.TestCase): """ Testcases for NenwinLossFunction.__call__() for when another than the target output-MarbleEaterNodes has eaten a Marble. """ def setUp(self): """ Sketch: | ^| y| | 0| N_1 <M N_0 | | +------------------------------- -10 0 10 x> Marble M starts with moving towards N_1, but should arrive at N_0. """ self.pos_weight = 0.5 self.vel_weight = 0.5 self.nodes = (gen_node_at(torch.tensor([10.0, 0])), gen_node_at(torch.tensor([-10.0, 0]))) self.marble = gen_marble_at(ZERO, vel=torch.tensor([-3.0, 0]), datum="original") self.model = NenwinModel(self.nodes, [self.marble]) for _ in range(50): # Should be enough for the Marble to be eaten self.model.make_timestep(0.1) assert len(self.model.marbles) == 0, "Testcase badly designed." self.loss_fun = NenwinLossFunction(self.nodes, self.model, vel_weight=self.vel_weight, pos_weight=self.pos_weight) self.target_index = 0 self.wrong_node_index = 1 self.loss = self.loss_fun(self.target_index) def test_value_loss_wrong_pred_no_marble_left(self): """ The loss should equal the velocity-weighted distance of the target Node to the nearest Marble, plus the *negative reciprocal* of the distance of the wrong node (self.nodes[1]) to the Marble. Case where no non-eaten Marble is available. """ target_node = self.nodes[self.target_index] wrong_node = self.nodes[self.wrong_node_index] expected = velocity_weighted_distance(target_node, self.marble, pos_weight=self.pos_weight, vel_weight=self.vel_weight) expected -= 1/velocity_weighted_distance(wrong_node, self.marble, pos_weight=self.pos_weight, vel_weight=self.vel_weight) torch.testing.assert_allclose(self.loss, expected) def test_value_loss_wrong_pred_some_marble_left(self): """ The loss should equal the velocity-weighted distance of the target Node to the nearest Marble, plus the *negative reciprocal* of the distance of the wrong node (self.nodes[1]) to the Marble. Case where another non-eaten Marble is still available at time of loss computation. """ second_marble = gen_marble_at(ZERO, datum="second") self.model.add_marbles([second_marble]) self.loss = self.loss_fun(self.target_index) target_node = self.nodes[self.target_index] wrong_node = self.nodes[self.wrong_node_index] expected = velocity_weighted_distance(target_node, second_marble, pos_weight=self.pos_weight, vel_weight=self.vel_weight) expected -= 1/velocity_weighted_distance(wrong_node, self.marble, pos_weight=self.pos_weight, vel_weight=self.vel_weight) torch.testing.assert_allclose(self.loss, expected) def test_grads_no_error_wrong_pred(self): """ Case in which no prediction output is given. All learnable parameters should have a gradient value that does not cause errors with an optimizer. Case where another non-eaten Marble is still available at time of loss computation. """ second_marble = gen_marble_at(ZERO) self.model.add_marbles([second_marble]) optim = torch.optim.Adam(self.model.parameters()) optim.zero_grad() try: self.loss.backward() optim.step() except RuntimeError as e: self.fail(f"Error occurred during backprop/optim step: {e}") def test_grads_value_wrong_pred(self): """ Case in which no prediction output is given. The learnable parameters of the Marble should have a gradient value that does affect the values of the weights. The loss should be lower for the second run. """ second_marble = gen_marble_at(ZERO) self.model.add_marbles([second_marble]) optim = torch.optim.Adam(self.model.parameters()) optim.zero_grad() self.loss.backward() self.assertIsNotNone(self.marble.init_pos.grad) self.assertIsNotNone(self.marble.init_vel.grad) self.assertIsNotNone(self.marble.mass.grad) optim.step() # Now verify the loss improved. self.model.reset() self.model.make_timestep(0.1) self.model.add_marbles([second_marble]) new_loss = self.loss_fun(self.target_index) self.assertLess(new_loss.item(), self.loss.item())