def test_design_r(self): design = simple_mv_velocity_design(3) batch_design = design.for_batch(2, 1) cov = batch_design.R(0)[0] self.assertTupleEqual(cov.size(), (3, 3)) self.assertTrue(cov.requires_grad) cholesky_log_diag = design.measure_covariance.param_dict( )['cholesky_log_diag'] cholesky_off_diag = design.measure_covariance.param_dict( )['cholesky_off_diag'] cov = cov.data.numpy() self.assertTrue(np.isclose(cov, cov.T).all(), msg="Covariance is not symmetric.") chol = cholesky(cov) for a, b in zip( torch.exp(cholesky_log_diag).tolist(), np.diag(chol).tolist()): self.assertAlmostEqual(a, b, places=4) for a, b in zip(cholesky_off_diag.tolist(), chol[np.tril_indices_from(chol, k=-1)].tolist()): self.assertAlmostEqual(a, b, places=4)
def test_design_f(self): # design design = simple_mv_velocity_design() batch_design = design.for_batch(num_groups=2, num_timesteps=1) # F doesn't require grad: self.assertFalse(batch_design.F(0).requires_grad)
def test_equations(self): data = Tensor([[-50., 50., 1.]])[:, :, None] # _design = simple_mv_velocity_design(dims=1) torch_kf = KalmanFilter(processes=_design.processes.values(), measures=_design.measures) batch_design = torch_kf.design.for_batch(1, 1) pred = torch_kf(data) # filter_kf = filterpy_KalmanFilter(dim_x=2, dim_z=1) filter_kf.x = batch_design.initial_mean.detach().numpy().T filter_kf.P = batch_design.initial_covariance.detach().numpy().squeeze( 0) filter_kf.F = batch_design.F(0)[0].detach().numpy() filter_kf.H = batch_design.H(0)[0].detach().numpy() filter_kf.R = batch_design.R(0)[0].detach().numpy() filter_kf.Q = batch_design.Q(0)[0].detach().numpy() filter_kf.states = [] for t in range(data.shape[1]): filter_kf.states.append(filter_kf.x) filter_kf.update(data[:, t, :]) filter_kf.predict() filterpy_states = np.stack(filter_kf.states).squeeze() kf_states = pred.means.detach().numpy().squeeze() for r, c in product(*[range(x) for x in kf_states.shape]): self.assertAlmostEqual(filterpy_states[r, c], kf_states[r, c], places=3)
def test_design_h(self): # design design = simple_mv_velocity_design() batch_design = design.for_batch(num_groups=1, num_timesteps=1) design_H = batch_design.H(0) state_mean = Tensor([[[1.], [-.5], [-1.5], [0.]]]) measured_state = design_H.bmm(state_mean) self.assertListEqual(list1=measured_state.tolist(), list2=[[[1.0], [-1.5]]])
def test_design_q(self): # design design = simple_mv_velocity_design() batch_design = design.for_batch(num_groups=2, num_timesteps=1) # Q requires grad: self.assertTrue(batch_design.Q(0).requires_grad) # symmetric design_Q = batch_design.Q(0)[0].data.numpy() self.assertTrue(np.isclose(design_Q, design_Q.T).all(), msg="Covariance is not symmetric.")
def test_update(self): design = simple_mv_velocity_design(dims=1) batch_design = design.for_batch(1, 1) # make data w/ large value data = Tensor([1000.])[:, None, None] # initialize belief to zeros sb = Gaussian(means=torch.zeros((1, 2)), covs=torch.ones((1, 2, 2))) # call update sb.compute_measurement(H=batch_design.H(0), R=batch_design.R(0)) update1 = sb.update(obs=data[:, 0, :]) # try again, but override measurement-variance to be higher sb2 = Gaussian(means=torch.zeros((1, 2)), covs=torch.ones((1, 2, 2))) sb2.compute_measurement(H=batch_design.H(0), R=2 * batch_design.R(0)) update2 = sb2.update(obs=data[:, 0, :]) self.assertTrue((update2.means < update1.means).all())