def profile_random_instance(n, p, T): lam = 1e-6 A = npr.randn(n, n) W_neg_sqrt = npr.randn(n, n) C = npr.randn(p, n) V_neg_sqrt = npr.randn(p, p) y = npr.randn(T, p) K = np.zeros(T * p, dtype=int) K[:int(.5 * T * p)] = 1 np.random.shuffle(K) K = K.astype(bool).reshape((T, p)) tic = time.time() params = KalmanSmootherParameters(A, W_neg_sqrt, C, V_neg_sqrt) xhat, yhat, DT = kalman_smoother(params, y, K, lam) toc = time.time() forward_time = toc - tic tic = time.time() DT(y) toc = time.time() backward_time = toc - tic return forward_time * 1000, backward_time * 1000
def test_kalman_filter_derivative(self): np.random.seed(0) n = 5 p = 10 T = 100 lam = 1e-10 for _ in range(10): A = npr.randn(n, n) W_neg_sqrt = np.eye(n) C = npr.randn(p, n) V_neg_sqrt = npr.randn(p, p) y = npr.randn(T, p) K = np.zeros(T * p, dtype=int) K[:int(.7 * T * p)] = 1 np.random.shuffle(K) K = K.astype(bool).reshape((T, p)) params = auto_ks.KalmanSmootherParameters(A, W_neg_sqrt, C, V_neg_sqrt) xhat, yhat, DT = auto_ks.kalman_smoother(params, y, K, lam) f = np.sum(xhat) + np.sum(yhat) dxhat = np.ones(xhat.shape) dyhat = np.ones(yhat.shape) derivatives = DT(dxhat, dyhat) eps = 1e-6 xhat_p, yhat_p, _ = auto_ks.kalman_smoother( params + eps * derivatives, y, K, lam) fp = np.sum(xhat_p) + np.sum(yhat_p) increase = fp - f dA = derivatives.DA dW_neg_sqrt = derivatives.DW_neg_sqrt dC = derivatives.DC dV_neg_sqrt = derivatives.DV_neg_sqrt predicted_increase = eps * ( np.sum(dA * dA) + np.sum(dW_neg_sqrt * dW_neg_sqrt) + np.sum(dC * dC) + np.sum(dV_neg_sqrt * dV_neg_sqrt)) np.testing.assert_allclose(predicted_increase, increase, atol=1e-5)
def test_kalman_filter_vs_cvxpy(self): np.random.seed(0) n = 3 p = 6 T = 10 lam = 1e-10 for _ in range(5): A = npr.randn(n, n) W_neg_sqrt = npr.randn(n, n) C = npr.randn(p, n) V_neg_sqrt = npr.randn(p, p) y = npr.randn(T, p) K = np.zeros(T * p, dtype=int) K[:int(.7 * T * p)] = 1 np.random.shuffle(K) K = K.astype(bool).reshape((T, p)) params = auto_ks.KalmanSmootherParameters(A, W_neg_sqrt, C, V_neg_sqrt) xhat, yhat, DT = auto_ks.kalman_smoother(params, y, K, lam) xs = cp.Variable((T, n)) ys = cp.Variable((T, p)) objective = 0.0 for t in range(T): objective += cp.sum_squares(V_neg_sqrt @ (ys[t] - C @ xs[t])) if t < T - 1: objective += cp.sum_squares( W_neg_sqrt @ (xs[t + 1] - A @ xs[t])) objective += cp.sum_squares(lam * xs) + cp.sum_squares(lam * ys) constraints = [] rows, cols = K.nonzero() for t, i in zip(rows, cols): constraints += [ys[t, i] == y[t, i]] prob = cp.Problem(cp.Minimize(objective), constraints) prob.solve(solver='OSQP') np.testing.assert_allclose(xhat, xs.value) np.testing.assert_allclose(yhat, ys.value)
def test_loss(params): xhat, yhat, _ = auto_ks.kalman_smoother(params, y, K & ~M_test, lam) test_loss = np.linalg.norm(yhat[M_test] - y[M_test])**2 / M_test.sum() return test_loss
print("starting test loss:", test_loss(params)) tic = time.time() params, info = auto_ks.tune(params, prox, y, K & ~M_test, lam, M=M, lr=1e-4, verbose=True, niter=50) toc = time.time() print("time", toc - tic, "s") xhat, yhat, _ = auto_ks.kalman_smoother(params, y, K & ~M_test, lam) print("ending test_loss:", test_loss(params)) measurements = np.ma.masked_array(y) measurements[~(K & ~M_test)] = np.ma.masked kf = KalmanFilter(transition_matrices=A, observation_matrices=C, transition_covariance=np.linalg.inv(W_neg_sqrt @ W_neg_sqrt), observation_covariance=np.linalg.inv( V_neg_sqrt @ V_neg_sqrt)) kf = kf.em(measurements, n_iter=10) filtered_state_means, _ = kf.smooth(measurements) yhat_em = filtered_state_means @ C.T loss_em = np.linalg.norm(yhat_em[M_test] - y[M_test])**2 / M_test.sum() print("loss_em:", loss_em)
return auto_ks.KalmanSmootherParameters(A, W_neg_sqrt, C, V_neg_sqrt), r # choose missing measurements gps_meas_idx = np.where(K_gps)[0] np.random.shuffle(gps_meas_idx) M = np.zeros(y.shape, dtype=np.bool) M[gps_meas_idx[:int(gps_meas_idx.size * .2)], :3] = True # choose test measurements M_test = np.zeros(y.shape, dtype=np.bool) M_test[gps_meas_idx[int(gps_meas_idx.size * .2):int(gps_meas_idx.size * .4)], :3] = True # evaluate initial parameters xhat_initial, yhat_initial, DT = auto_ks.kalman_smoother( params_initial, y, K & ~M_test, lam) loss_auto = np.linalg.norm(yhat_initial[M_test] - y[M_test])**2 / M_test.sum() print("initial test loss", loss_auto) # run kalman smoother auto-tuning tic = time.time() params, info = auto_ks.tune(params_initial, prox, y, K & ~M_test, lam, M=M, lr=1e-2, verbose=True, niter=25) toc = time.time()