def test_null_projectors(self): mps = self.mps_0_4.right_canonicalise() for n in range(3): _, vR = mps.right_null_projector(n, get_vR=True, store_envs=True) _, vL = mps.left_null_projector(n, get_vL=True) self.assertTrue(allclose(ncon([mps[n]@ch(mps.r(n)), vR], [[1, -1, 3], [1, -2, 3]]), 0)) self.assertTrue(allclose(ncon([ch(mps.l(n-1))@mps[n], vL], [[1, 3, -1], [1, 3, -2]]), 0))
def right_null_projector(self, get_vL=False): _, _, r = self.eigs() R_ = sw(c(self[0])@r, 0, 1) R = R_.reshape(-1, self.d*R_.shape[-1]) vR = sw(null(R).reshape(self.d, R_.shape[-1], -1), 1, 2) pr = ncon([inv(ch(r)), vR, c(vR), inv(ch(r))], [[-2, 2], [-1, 1, 2], [-3, 1, 4], [-4, 4]]) self.vR = vR if get_vR: return pr, vR return pr
def ungauge(A, conj=False): def co(x): return x if not conj else c(x) tens = A.data[0] k = len(tens.shape[3:]) links = [1, 2, -2] + list(range(-3, -3 - k, -1)) l, r, vL = A.l, A.r, A.vL return ncon([ch(l) @ co(vL), ncon([tens, ch(r)], [[-1, -2, 1], [1, -3]])], [[1, 2, -1], links])
def left_null_projector(self, get_vL=False): """left_null_projector: | - inv(sqrt(l)) - vL = vL- inv(sqrt(l))- | replaces A in TDVP """ _, l, _ = self.eigs() L_ = sw(cT(self[0])@ch(l), 0, 1) L = L_.reshape(-1, self.d*L_.shape[-1]) vL = null(L).reshape((self.d, L.shape[1]//self.d, -1)) pr = ncon([inv(ch(l))@vL, inv(ch(l))@c(vL)], [[-1, -2, 1], [-3, -4, 1]]) self.vL = vL if get_vL: return pr, vL return pr
def optimise(H, D, method='euler', max_iters=500, tol=1e-5): if method == 'euler': A = iMPS().random(d, D).canonicalise('m') δt = -1e-1j e_ = A.energy(H) for _ in range(max_iters): A = (A + δt * A.dA_dt(H)).canonicalise('m') e = A.energy(H) ϵ = abs(e - e_) if ϵ < tol: break e_ = copy(e) return A, A.energy(H) elif method == 'unitary': A = iMPS().random(d, D).canonicalise('r') As = [transpose(a, [1, 0, 2]) for a in A.data] isometries = [a.reshape(-1, prod(a.shape[1:])) for a in As] Us = [unitary_extension(Q, 4) for Q in isometries] U = Us[0] # U = (A, vL) dA = A.dA_dt(H) l, r, vL = dA.l, dA.r, dA.vL # assume right canonical Λl = ch(l) Λr = ch(r) dx = ungauge(dA) ϵ = 1e-1 dA *= ϵ dx *= ϵ O1 = z([dx.shape[0]] * 2) O2 = z([dx.shape[1]] * 2) X = block([[O1, dx.conj().T], [dx, O2]]) L = direct_sum(eye(D), inv(Λl)) R = direct_sum(eye(D), inv(Λr)) Q = expm(-1j * X) U = L @ U @ Q @ R # (A, vL) -> (A+inv(l)-vL-dx-inv(r)-, v') (v' orthogonal) print(U @ U.conj().T) raise Exception
def lines4_16_wo_7(self): # Generate augmented mean and covariance self.mu_a = np.hstack([self.mu.T, np.zeros([1, 4])]) self.mu_a = np.reshape(self.mu_a, [7, 1]) self.SIG_a = block_diag(self.SIG, self.M, self.Q) # Generate Sigma points self.Chi_a = np.hstack([ self.mu_a, self.mu_a + self.gamma * ch(self.SIG_a), self.mu_a - self.gamma * ch(self.SIG_a) ]) self.Chi_a[[2, 6], :] = wrapper(self.Chi_a[[2, 6], :]) self.Chi_x = self.Chi_a[0:3, :] self.Chi_u = self.Chi_a[3:5, :] self.Chi_z = self.Chi_a[5:, :] self.Chi_bar = self.Chi_x self.mu_bar = self.Chi_x @ self.w_m.T self.SIG_bar = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Chi_bar - self.mu_bar).T # Predict observations at sigma points and compute Gaussian statistics self.Z_bar = self.h(self.Chi_bar, self.landmarks[:, self.current_landmark]) + self.Chi_z self.z_hat = self.Z_bar @ self.w_m.T self.S = np.multiply( self.w_c, (self.Z_bar - self.z_hat)) @ (self.Z_bar - self.z_hat).T self.SIG_xz = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Z_bar - self.z_hat).T # Update mean and covariance self.K = self.SIG_xz @ np.linalg.inv(self.S) z_diff = np.array([ self.r[self.current_landmark] - self.z_hat[0], wrapper(self.ph[self.current_landmark] - self.z_hat[1]) ]) self.mu = self.mu_bar + self.K @ z_diff self.SIG = self.SIG_bar - self.K @ self.S @ self.K.T
def __init__(self, R2D2, World): self.mu = np.array([[R2D2.x0], [R2D2.y0], [R2D2.theta0]]) self.SIG = np.diag([0.1, 0.1, 0.1]) self.v = 0 self.omega = 1 meas = R2D2.calculate_measurements(World.Number_Landmarks, World.Landmarks) self.r = meas[0] self.ph = meas[1] self.u = np.array([[self.v], [self.omega]]) self.current_landmark = 0 self.landmarks = World.Landmarks # Generate augmented mean and covariance self.ts = R2D2.ts self.alpha1 = R2D2.alpha1 self.alpha2 = R2D2.alpha2 self.alpha3 = R2D2.alpha3 self.alpha4 = R2D2.alpha4 self.M = np.array( [[self.alpha1 * self.v**2 + self.alpha2 * self.omega**2, 0], [0, self.alpha3 * self.v**2 + self.alpha4 * self.omega**2]]) self.Q = np.array([[R2D2.sigma_r**2, 0], [0, R2D2.sigma_theta**2]]) self.mu_a = np.hstack([self.mu.T, np.zeros([1, 4])]) self.mu_a = np.reshape(self.mu_a, [7, 1]) self.mu_a = np.reshape(self.mu_a, [7, 1]) self.SIG_a = block_diag(self.SIG, self.M, self.Q) # Generate Sigma points self.alpha = 0.4 self.kappa = 4 self.beta = 2 self.n = 7 self.lamb_duh = self.alpha**2 * (self.n + self.kappa) - self.n self.gamma = np.sqrt(self.n + self.lamb_duh) self.Chi_a = np.hstack([ self.mu_a, self.mu_a + self.gamma * ch(self.SIG_a), self.mu_a - self.gamma * ch(self.SIG_a) ]) self.Chi_a[[2, 6], :] = wrapper(self.Chi_a[[2, 6], :]) self.Chi_x = self.Chi_a[0:3, :] self.Chi_u = self.Chi_a[3:5, :] self.Chi_z = self.Chi_a[5:, :] # Pass sigma points through motion model and compute Gaussian statistics self.Chi_bar = self.g(self.u + self.Chi_u, self.Chi_x) # Calculate weights self.w_m = np.ones([1, 15]) self.w_c = np.ones([1, 15]) self.w_m[0] = self.lamb_duh / (self.n + self.lamb_duh) self.w_c[0] = self.w_m[0] + (1 - self.alpha**2 + self.beta) for spot in range(1, 15): self.w_m[0][spot] = 1 / (2 * (self.n + self.lamb_duh)) self.w_c[0][spot] = 1 / (2 * (self.n + self.lamb_duh)) self.mu_bar = self.Chi_x @ self.w_m.T self.SIG_bar = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Chi_bar - self.mu_bar).T # Predict observations at sigma points and compute Gaussian statistics self.Z_bar = self.h(self.Chi_bar, self.landmarks[:, self.current_landmark]) + self.Chi_z self.z_hat = self.Z_bar @ self.w_m.T self.S = np.multiply( self.w_c, (self.Z_bar - self.z_hat)) @ (self.Z_bar - self.z_hat).T self.SIG_xz = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Z_bar - self.z_hat).T # Update mean and covariance self.K = self.SIG_xz @ np.linalg.inv(self.S)
def update(self, mu, SIG, v, omega, r, ph): self.mu = mu self.SIG = SIG self.v = v self.omega = omega self.r = r self.ph = ph self.u = np.array([[self.v], [self.omega]]) self.current_landmark = 0 self.M = np.array( [[self.alpha1 * self.v**2 + self.alpha2 * self.omega**2, 0], [0, self.alpha3 * self.v**2 + self.alpha4 * self.omega**2]]) self.mu_a = np.hstack([self.mu.T, np.zeros([1, 4])]) self.mu_a = np.reshape(self.mu_a, [7, 1]) self.SIG_a = block_diag(self.SIG, self.M, self.Q) # Generate Sigma points self.alpha = 0.4 self.kappa = 4 self.beta = 2 self.n = 7 self.lamb_duh = self.alpha**2 * (self.n + self.kappa) - self.n self.gamma = np.sqrt(self.n + self.lamb_duh) self.Chi_a = np.hstack([ self.mu_a, self.mu_a + self.gamma * ch(self.SIG_a), self.mu_a - self.gamma * ch(self.SIG_a) ]) self.Chi_a[[2, 6], :] = wrapper(self.Chi_a[[2, 6], :]) self.Chi_x = self.Chi_a[0:3, :] self.Chi_u = self.Chi_a[3:5, :] self.Chi_z = self.Chi_a[5:, :] # Pass sigma points through motion model and compute Gaussian statistics self.Chi_bar = self.g(self.u + self.Chi_u, self.Chi_x) self.mu_bar = self.Chi_x @ self.w_m.T self.SIG_bar = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Chi_bar - self.mu_bar).T # Predict observations at sigma points and compute Gaussian statistics self.Z_bar = self.h(self.Chi_bar, self.landmarks[:, self.current_landmark]) + self.Chi_z self.z_hat = self.Z_bar @ self.w_m.T self.S = np.multiply( self.w_c, (self.Z_bar - self.z_hat)) @ (self.Z_bar - self.z_hat).T self.SIG_xz = np.multiply( self.w_c, (self.Chi_bar - self.mu_bar)) @ (self.Z_bar - self.z_hat).T # Update mean and covariance self.K = self.SIG_xz @ np.linalg.inv(self.S) z_diff = np.array([ self.r[self.current_landmark] - self.z_hat[0], wrapper(self.ph[self.current_landmark] - self.z_hat[1]) ]) self.mu = self.mu_bar + self.K @ z_diff self.SIG = self.SIG_bar - self.K @ self.S @ self.K.T self.current_landmark = 1 self.lines4_16_wo_7() self.current_landmark = 2 self.lines4_16_wo_7() return self.mu, self.SIG