def get_data(self): # initial condition is v=0 and x=0 curr_state = np.array([[0] , [0]]) # input and measurement at start at timestep 1 since KF starts at timestep 1 vtr = np.zeros((1,self.control_inputs.size)) xtr = np.zeros((1,self.control_inputs.size)) measurements = np.zeros((1,self.control_inputs.size)) # updates begin at timestep 1 since timestep 0 is the initial state for i in range(1, self.control_inputs.size): c_input = self.control_inputs[0 , i] c_input = np.reshape(c_input, (1,1)) # get the next state from the control input next_state = mm(self.A, curr_state) + \ mm(self.B, c_input) + \ self.make_process_noise() # save the ground truth state (v and x) vtr[0 , i] = next_state[0 , 0] xtr[0 , i] = next_state[1 , 0] # save the measurement measurements[0 , i] = \ (mm(self.C, next_state) + self.make_measurement_noise()) curr_state = next_state return vtr, xtr, measurements
def train_XCubed(W, dataset, patchshape, batch_size, iterations): for i in range(iterations): batch = get_batch(X, patchshape, batch_size=batch_size) W = normalize_columns(W) a = 0.5 * normalize_columns(mm(W.T, batch))**3 W += mm((batch - mm(W, a)), a.T) return W
def get_G_t(v, w, angle, dt, F_x_matrix): ratio = v/w a = np.array([ [0, 0, ( -ratio*cos(angle) ) + ( ratio*cos(angle + (w*dt)) ) ], [0, 0, ( -ratio*sin(angle) ) + ( ratio*sin(angle + (w*dt)) ) ], [0, 0, 0] ]) a = mm(mm(np.transpose(F_x_matrix), a), F_x_matrix) return np.identity(a.shape[0]) + a
def calculate_w(reg, x, y): d = x.shape[1] covar = mm(tp(x),x ) lambdai = np.diag( np.ones(d)*reg ) addedmatrix = lambdai + covar inverse = inv(addedmatrix) rightside = mm(tp(x), y) w = mm(inverse,rightside) return w
def get_batch(X, patchshape, batch_size=200): ix = randint(0, X.shape[1] - patchshape[0]) iy = randint(0, X.shape[2] - patchshape[1]) iz = randint(0, X.shape[3] - patchshape[2]) if patchshape[2] != X.shape[3] else 0 B = X[randint(0, X.shape[0], size=batch_size), ix:ix + patchshape[0], iy:iy + patchshape[1], iz:iz + patchshape[2]].reshape( batch_size, patchshape[0] * patchshape[1] * patchshape[2]) B = B.T B = B - np.mean(B, axis=0) U, S, V = svd(mm(B, B.T)) ZCAMatrix = mm(U, mm(np.diag(1.0 / np.sqrt(S + 1e-5)), U.T)) B = mm(B, ZCAMatrix) return B
def animate(i): for j in range(len(lm_uncertanties)): x_lm = lm_pred_x[j, i] y_lm = lm_pred_y[j, i] if (x_lm == 0) and (y_lm == 0): # haven't seen landmark yet continue # http://anuncommonlab.com/articles/how-kalman-filters-work/part3.html#ellipses sigma = lm_pred_uncertainty[j][:, :, i] U, S, _ = np.linalg.svd(sigma) C = U * 2 * np.sqrt(S) theta = np.linspace(0, 2 * np.pi, 100) circle = np.array([cos(theta), sin(theta)]) e = mm(C, circle) e[0, :] += x_lm e[1, :] += y_lm lm_uncertanties[j].set_data(e[0, :], e[1, :]) actual_path.set_data(x_tr[:i + 1], y_tr[:i + 1]) pred_path.set_data(mu_x[:i + 1], mu_y[:i + 1]) heading.set_data([x_tr[i], x_tr[i] + radius * cos(th_tr[i])], [y_tr[i], y_tr[i] + radius * sin(th_tr[i])]) particles.set_data(pose_particles[0, :, i], pose_particles[1, :, i]) robot.center = (x_tr[i], y_tr[i]) vision_beam.set_center((x_tr[i], y_tr[i])) vision_beam.theta1 = np.rad2deg(th_tr[i] - fov_bound) vision_beam.theta2 = np.rad2deg(th_tr[i] + fov_bound) return (actual_path, pred_path, heading, particles, robot, vision_beam) \ + tuple(lm_uncertanties)
def matmul(input1, input2): #입력받은 행렬의 행렬 곱 함수 if len(input1) and len(input2) and len(input1[0]) and len( input2[0]) == 2: # 2x2 행렬이 맞는지 판단 mat_ver = [[cul for cul in v] for v in zip(*input2)] result = [[ input1[n][0] * mat_ver[0][0] + input1[n][1] * mat_ver[0][1], input1[n][0] * mat_ver[1][0] + input1[n][1] * mat_ver[1][1] ] for n in [0, 1]] #8주차 과제에서 만든 2x2행렬 곱 함수 code print('2x2 두 행렬의 곱은 ', result, '입니다.') #8주차에서 만든 함수로 구한 2x2 두 행렬의 곱셈 값 else: print('2x2행렬의 곱이 아닙니다.') try: result = mm(input1, input2) #행렬 곱 print('두 행렬의 곱은 ', '\n', result, '입니다.') #결과 출력 except Exception: #결과 안나오는 예외 경우 print('MatrixValueError') print('곱하려는 두 행렬의 형식을 확인 후 다시 시도해주세요.') else: return result #두 행렬의 곱 값 반환
def low_variance_sampler(chi): # don't need the weights on the new particles new_particles = np.zeros((chi.shape[0] - 1, chi.shape[1])) saved_particle_indices = [] M = chi.shape[1] r = random.uniform(0, 1 / M) c = chi[-1, 0] # the first weight i = 0 for m in range(M): U = r + m * (1 / M) while U > c: i += 1 c += chi[-1, i] new_particles[:, m] = chi[:-1, i] saved_particle_indices.append(i) # dealing with particle deprivation (not in the original algorithm) P = np.cov(chi[:-1, :]) uniq = np.unique( saved_particle_indices).size # num. of unique particles in resampling if (uniq / M) < .025: # if we don't have much variety in our resampling Q = P / ((M * uniq)**(1 / new_particles.shape[0])) new_particles += mm(Q, randn(size=new_particles.shape)) return new_particles
def fitness(expander): correct = mm( np.arange(1, expander.shape[0] + 1).reshape(expander.shape[0], 1), np.ones((1, expander.shape[1]))) sorted_map = np.sort(expander, axis=0) fit = sum(sum(np.abs(sorted_map - correct))) return fit
def get_mu_bar(prev_mu, v, w, angle, dt, F_x_matrix): ratio = v/w m = np.array([ [(-ratio * sin(angle)) + (ratio * sin(angle + (w*dt)))], [(ratio * cos(angle)) - (ratio * cos(angle + (w*dt)))], [w*dt] ]) return prev_mu + mm(np.transpose(F_x_matrix), m)
def sn(alpha, beta, x): d = x.shape[1] alphamat = np.diag( np.ones(d)*alpha ) betamat = beta * mm(tp(x),x) add = alphamat + betamat sn = inv(add) return sn
def __fit_dff_elm(self, X, y, elm): assert elm.beta is not None assert elm.input_weight is not None assert elm.covariance_matrix is not None H = self.__sig_activation_function(X, elm) H_t = H.transpose() ep = y - mm(H, elm.beta) ks = mm(mm(H, elm.covariance_matrix), H_t) pp = (mm(elm.covariance_matrix, H_t) / (1 + ks)) * ep elm.beta += pp # Updating other parameters if ks > 0: eps = elm.l - (1 - elm.l) / ks elm.covariance_matrix -= mm( mm(mm(elm.covariance_matrix, H_t), H), elm.covariance_matrix) / (np.linalg.inv(eps) + ks) elm.la = elm.l * (elm.la + (ep * ep) / (1 + ks)) elm.ny = elm.l * (elm.ny + 1) te = (ep * ep) / elm.la elm.l = 1 / (1 + (1 + elm.ro) * (np.log(1 + ks) + ((((elm.ny + 1) * te) / (1 + ks + te)) - 1) * (ks / (1 + ks))))
def calculate_mse(w, x, y): n = x.shape[0] postweights = mm(x,w) errorvector = postweights - y squarederror = np.square(errorvector) mse = np.mean(squarederror) return mse
def get_mu_and_sig_bar(dimensionality_bound, w_m, w_c, chi_b_x): new_bel = np.zeros((3, 1)) for pt in range(dimensionality_bound): new_bel += w_m[0, pt] * np.reshape(chi_b_x[:, pt], new_bel.shape) new_sig = np.zeros((3, 3)) for pt in range(dimensionality_bound): state_diff = np.reshape(chi_b_x[:, pt], new_bel.shape) - new_bel new_sig += w_c[0, pt] * mm(state_diff, np.transpose(state_diff)) return new_bel, new_sig
def gaussian(x, mu, sigma): """ Arguments: - x: (m, n) - mu: (n) - sigma :(n, n) """ from numpy.linalg import pinv, det from numpy import matmul as mm from numpy import pi, sqrt, power, exp m, n = x.shape # reshape for convenience # x (m, n, 1) mu (n, 1) x = x[:, :, None] mu = mu[:, None] x_T = x.transpose(0, 2, 1) mu_T = mu.T # (m, 1, 1) -> (m) term = -0.5 * mm(x_T - mu_T, mm(pinv(sigma), x - mu))[:, 0, 0] return (1 / (power(2 * pi, n / 2) * sqrt(det(sigma))) * exp(term))
def __init__(self, ts, M=None): self.data=ts if M is None: M=int(len(ts)/4.0) self.M = M N=len(ts) self.N = N # create M dimensional phase spaces X = np.zeros([M,N-M+1]) X_norm = np.zeros([M,N-M+1]) for i in range(M): X[i,:] = ts[i:(N-M+1+i)] X_norm[i,:] = X[i,:] - np.mean(X[i,:]) self.trajectory = X # AUTO COVARIANCE MATRIX self.R = mm(X_norm, tp(X_norm)) / (N-M+1) self.cov = np.cov(X) # eigen stuff, Principal components self.evals, self.evecs = npla.eig(self.R) self.PC = mm(tp(self.evecs), X_norm) # scale this to unbias it, convolution end points are based on fewer additions RC=np.zeros([M,N]) for col in range(M): # use convolution to get reconstructed components RCconv = np.convolve(self.PC[:,col],self.evecs) if col<M-1: RC[:,col]=RCconv/float(col) elif col<N-M+1: RC[:,col]=RCconv/float(M) elif col<N: RC[:,col]=RCconv/float(N-col+1) self.RC=RC assert (np.sum(np.abs(np.sum(RC,axis=0) - ts)) < 0.001).all(), "Reconstruction failed"
def getUpdatedCovariance(I, K, H, P): res = mm(K, H) res = I - res res = mm(res, P) return res
def getUpdate(K, y): return mm(K, y)
def getInnovation(z, H, x): return z - mm(H, x)
def getKGain(P, H, S): res = mm(P, transpose(H)) res = mm(res, la.pinv(S)) return res
def getNewX(A, x): return mm(A, x)
def normalize_columns(X): return mm(X, np.diag(1. / (np.sqrt(np.sum(a**2, axis=0)) + 1e-6)))
Batch_size = 64 # Batch size Q = 1000 # Input size S = 100 # Number of neurons a = 10 # Network output size # ---------------------------------------------------------------------------- a0 = torch.randn(Batch_size, Q, device=device, dtype=dtype) t = torch.randn(Batch_size, a, device=device, dtype=dtype) # ---------------------------------------------------------------------------- w1 = torch.randn(Q, S, device=device, dtype=dtype) w2 = torch.randn(S, a, device=device, dtype=dtype) learning_rate = 1e-6 # ---------------------------------------------------------------------------- for index in range(10): n1 = (w1 * a0[index]) a1 = n1 if n1 > 0 else 0 n2 = np.mm(w2.t, a1) a2 = n2 loss = (a2 - t).pow(2).sum() print(index, loss) # h = p.mm(w1) #### Matmul # h_relu = h.clamp(min=0) ### Clamps everything to min of # a_net = h_relu.mm(w2) #Purelin output # loss = (a_net - t).pow(2).sum() # print(index, loss) grad_y_pred = 2.0 * (a2 - t) grad_w2 = a2.t().m, (grad_y_pred) ## .t() flips a 2D array grad_h_relu = grad_y_pred.mm(w2.t()) grad_h = grad_h_relu.clone()
def main(): # Independent material properties for Scotchply 1002 in US units E11 = 5.6 * (10**6) # psi E22 = 1.2 * (10**6) # psi V12 = 0.26 # unit-less V21 = (V12 * E22) / E11 # unit-less G12 = 0.6 * (10**6) # psi # Typical strengths of Scotchply 1002 in US units SLt = 154 * (10**3) # psi SLc = 88.5 * (10**3) # psi STt = 4.5 * (10**3) # psi STc = 17.1 * (10**3) # psi SLTs = 10.4 * (10**3) # psi # Tsai-Wu Coefficients F11 = 1 / (SLt * SLc) F22 = 1 / (STt * STc) F12 = (-1 / 2) * math.sqrt(F11 * F22) F66 = 1 / (SLTs**2) F1 = (1 / SLt) - (1 / SLc) F2 = (1 / STt) - (1 / STc) # [Nxx, Nyy, Nxy, Mxx, Myy, Mxy] in lb/in & in-lb/in stress_resultant = np.array([[1000], [0], [0], [0], [0], [0]]) # Enter a desired ply orientation angles in degrees here: angle_in_degrees = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, -45, 90, 90, 90, 90, -45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] # angle_in_degrees = [0,0,0,0,0,0,0,0,0,0,0,0,45,-45,45,-45,45,-45,90,90,90,90,-45,45,-45,45,-45,45,0,0,0,0,0,0,0,0,0,0,0,0] # angle_in_degrees = [0,0,0,0,0,0,0,0,0,0,45,-45,45,-45,45,-45,45,-45,90,90,90,90,-45,45,-45,45,-45,45,-45,45,0,0,0,0,0,0,0,0,0,0] # angle_in_degrees = [0,0,0,0,0,0,0,0,45,-45,45,-45,45,-45,45,-45,45,-45,90,90,90,90,-45,45,-45,45,-45,45,-45,45,-45,45,0,0,0,0,0,0,0,0] # angle_in_degrees = [0,0,0,0,0,0,45,-45,45,-45,45,-45,45,-45,45,-45,90,90,90,90,90,90,90,90,-45,45,-45,45,-45,45,-45,45,-45,45,0,0,0,0,0,0] # angle_in_degrees = [0,0,45,-45,45,-45,45,-45,45,-45,45,-45,45,-45,45,-45,45,-45,90,90,90,90,-45,45,-45,45,-45,45,-45,45,-45,45,-45,45,-45,45,-45,45,0,0] N = len(angle_in_degrees) # number of plies t_ply = 0.005 # ply thickness in m h = t_ply * N # Number of at each angle n_0 = angle_in_degrees.count(0) n_45 = 2 * angle_in_degrees.count( 45) # Using symmetry to save on processing resources n_90 = angle_in_degrees.count(90) # Actual percentages of each ply group n_0_percent = n_0 / N n_45_percent = n_45 / N n_90_percent = n_90 / N # Distance from laminate mid-plane to out surfaces of plies) z0 = -h / 2 z = [0] * (N) for i in range(N): z[i] = (-h / 2) + ((i + 1) * t_ply) # Distance from laminate mid-plane to mid-planes of plies z_mid_plane = [0] * N for i in range(N): z_mid_plane[i] = (-h / 2) - (t_ply / 2) + ((i + 1) * t_ply) # Ply orientation angle translated to radians to simplify equations below angle = [0] * N for i in range(N): angle[i] = math.radians(angle_in_degrees[i]) # Stress Transformation (Global to Local), pg 112 T = [0] * N for i in range(N): T[i] = np.array([[ cos(angle[i])**2, sin(angle[i])**2, 2 * sin(angle[i]) * cos(angle[i]) ], [ sin(angle[i])**2, cos(angle[i])**2, -2 * sin(angle[i]) * cos(angle[i]) ], [ -sin(angle[i]) * cos(angle[i]), sin(angle[i]) * cos(angle[i]), cos(angle[i])**2 - sin(angle[i])**2 ]]) # Strain Transformation (Global-to-Local), pg 113 T_hat = [0] * N for i in range(N): T_hat[i] = np.array([[ cos(angle[i])**2, sin(angle[i])**2, sin(angle[i]) * cos(angle[i]) ], [ sin(angle[i])**2, cos(angle[i])**2, -sin(angle[i]) * cos(angle[i]) ], [ -2 * sin(angle[i]) * cos(angle[i]), 2 * sin(angle[i]) * cos(angle[i]), cos(angle[i])**2 - sin(angle[i])**2 ]]) # The local/lamina compliance matrix, pg 110 S11 = 1 / E11 S12 = -V21 / E22 S21 = -V12 / E11 S22 = 1 / E22 S33 = 1 / G12 S = np.array([[S11, S12, 0], [S21, S22, 0], [0, 0, S33]]) # The local/lamina stiffness matrix, pg 107 Q_array = lg.inv(S) # The inverse of the S matrix # The global/laminate stiffness and compliance matrices Q_bar_array = [0] * N for i in range(N): Q_bar_array[i] = mm(lg.inv(T[i]), mm( Q_array, T_hat[i])) # The global/laminate stiffness matrix, pg 114 A_array = [[0] * 3] * 3 for i in range(N): A_array += Q_bar_array[i] * t_ply B_array = [[0] * 3] * 3 for i in range(N): B_array += (1 / 2) * (Q_bar_array[i] * ((z[i]**2) - ((z[i] - t_ply)**2))) D_array = [[0] * 3] * 3 for i in range(N): D_array += (1 / 3) * (Q_bar_array[i] * ((z[i]**3) - ((z[i] - t_ply)**3))) ABD_array = np.array([[ A_array[0][0], A_array[0][1], A_array[0][2], B_array[0][0], B_array[0][1], B_array[0][2] ], [ A_array[1][0], A_array[1][1], A_array[1][2], B_array[1][0], B_array[1][1], B_array[1][2] ], [ A_array[2][0], A_array[2][1], A_array[2][2], B_array[2][0], B_array[2][1], B_array[2][2] ], [ B_array[0][0], B_array[0][1], B_array[0][2], D_array[0][0], D_array[0][1], D_array[0][2] ], [ B_array[1][0], B_array[1][1], B_array[1][2], D_array[1][0], D_array[1][1], D_array[1][2] ], [ B_array[2][0], B_array[2][1], B_array[2][2], D_array[2][0], D_array[2][1], D_array[2][2] ]]) ABD_inverse_array = lg.inv(ABD_array) # Calculating the mid-plane strains and curvatures mid_plane_strains_and_curvatures_array = mm(lg.inv(ABD_array), stress_resultant) # Transforming numpy array into lists for ease of formatting Q = Q_array.tolist() Q_bar = [0] * N for i in range(N): Q_bar[i] = Q_bar_array[i].tolist() A = A_array.tolist() B = B_array.tolist() D = D_array.tolist() ABD_inverse = ABD_inverse_array.tolist() mid_plane_strains_and_curvatures = mid_plane_strains_and_curvatures_array.tolist( ) # Parsing the Mid-plane strains and curvatures apart mid_plane_strains = np.array([[mid_plane_strains_and_curvatures[0][0]], [mid_plane_strains_and_curvatures[1][0]], [mid_plane_strains_and_curvatures[2][0]]]) curvatures = np.array([[mid_plane_strains_and_curvatures[3][0]], [mid_plane_strains_and_curvatures[4][0]], [mid_plane_strains_and_curvatures[5][0]]]) # Global Strains at mid-plane of each ply global_strains = [[[0]] * 3] * N for i in range(N): global_strains[i] = mid_plane_strains + z_mid_plane[i] * curvatures # Global Stresses at mid-plane of each ply global_stresses = [[[0]] * 3] * N for i in range(N): global_stresses[i] = mm(Q_bar[i], global_strains[i]) # Local strains local_strains = [[[0]] * 3] * N for i in range(N): local_strains[i] = mm(T_hat[i], global_strains[i]) # Local stresses local_stresses = [[[0]] * 3] * N for i in range(N): local_stresses[i] = mm(Q, local_strains[i]) # Define Tsai-Wu quadratic function coefficients (aR^2 + bR + cc = 0) a = [0] * N for i in range(N): a[i] = (F11 * (local_stresses[i][0]**2)) + ( 2 * F12 * local_stresses[i][0] * local_stresses[i][1]) + ( F22 * (local_stresses[i][1]**2)) + (F66 * (local_stresses[i][2]**2)) b = [0] * N for i in range(N): b[i] = (F1 * local_stresses[i][0]) + (F2 * local_stresses[i][1]) cc = [-1] * N # Strength Ratios for Tsai-Wu Criteria R_1_array = [0] * N for i in range(N): R_1_array[i] = (-b[i] + math.sqrt((b[i]**2) - 4 * a[i] * cc[i])) / (2 * a[i]) R_2 = [0] * N for i in range(N): R_2[i] = (-b[i] - math.sqrt((b[i]**2) - 4 * a[i] * cc[i])) / (2 * a[i]) R_1 = [0] * N for i in range(N): R_1[i] = R_1_array[i].tolist() R_TW = min(R_1) # Tsai-Wu critical loads N_TW_xxc = float(R_TW * stress_resultant[0]) # Calculating E_xx E_xx = (A[0][0] / h) * (1 - ((A[0][1]**2) / (A[0][0] * A[1][1]))) # Calculating ε_xx and ε_xxc e_xx = float((stress_resultant[0]) / (E_xx * h)) e_xxc = float(e_xx * R_TW[0]) # Printing Ply Group Percentages print('Percent n_0:' + format(n_0_percent, '>9.2f')) print('Percent n_45:' + format(n_45_percent, '>8.2f')) print('Percent n_90:' + format(n_90_percent, '>8.2f')) print("\n# of ply that fails first: " + str(R_1.index(min(R_1)) + 1)) # Printing the Critical loads print( "\nThis is the calculated strain for first ply failure under Tsai-Wu:") print("ε_xx = " + format(e_xx, '>8.5f')) # Printing the Strength Ratio for Tsai-Wu Failure print( "\nThis is the Strength Ratio for the first ply failure under Tsai-Wu Failure Criterion:" ) print("R_TW = " + str(np.round(R_TW[0], 3))) # Printing the Critical loads print("\nThis is the critical strain for first ply failure under Tsai-Wu:") print("ε_xxc = " + format(e_xxc, '>8.5f'))
def solve(self): """ Solve the finite horizon problem """ # Compute Pt[...] matrices self.Pt[self.N] = self.Qf for t in range(self.N, 0, -1): atpb = mm(mm(self.A.T, self.Pt[t]), self.B) mid = self.R + mm(mm(self.B.T, self.Pt[t]), self.B) self.Pt[t - 1] = self.Q + mm(mm( self.A.T, self.Pt[t]), self.A) - mm(mm(atpb, inv(mid)), atpb.T) # Compute Kt[...] matrices for t in range(0, self.N): atpb = mm(mm(self.B.T, self.Pt[t + 1]), self.A) mid = self.R + mm(mm(self.B.T, self.Pt[t + 1]), self.B) self.Kt[t] = -mm(inv(mid), atpb) # Compute optimal input and trajectory for t in range(0, self.N): self.Ut[t] = mm(self.Kt[t], self.Xt[t]) self.Xt[t + 1] = mm(self.A, self.Xt[t]) + mm(self.B, self.Ut[t])
def simxfrm(A,B): return mm(transpose(B),mm(A,B)) def simxfrmt(A,B): return mm(B,mm(A,transpose(B)))
seen_lm[lm_idx, k] = True # initialize mean r = z_tr[0, 0] bearing = z_tr[1, 0] lm_x_bar = bel_x + (r * cos(bearing + bel_theta)) lm_y_bar = bel_y + (r * sin(bearing + bel_theta)) lm_loc_estimates_x[lm_idx, k] = lm_x_bar lm_loc_estimates_y[lm_idx, k] = lm_y_bar # calculate jacobian diff_x = lm_x_bar - bel_x diff_y = lm_y_bar - bel_y H = np.array([[r * diff_x, r * diff_y], [-diff_y, diff_x]]) H *= 1 / (r * r) # initialize covariance H_inv = mat_inv(H) sigma = mm(H_inv, mm(Q_t, np.transpose(H_inv))) lm_sig_i = 2 * lm_idx p_sig_i = 2 * k lm_uncertanties[lm_sig_i:lm_sig_i + 2, p_sig_i:p_sig_i + 2] = sigma # default importance weight weight *= p0 else: # measurement prediction lm_x_bar = lm_loc_estimates_x[lm_idx, k] lm_y_bar = lm_loc_estimates_y[lm_idx, k] diff_x = lm_x_bar - bel_x diff_y = lm_y_bar - bel_y q = (diff_x * diff_x) + (diff_y * diff_y) r = np.sqrt(q) bearing = arctan2(diff_y, diff_x) - bel_theta
def simxfrmt(A,B): return mm(B,mm(A,transpose(B))) def geigh(A,B):
bel_y = chi_bar_x[1, pt] bel_theta = chi_bar_x[2, pt] x_diff = lm_x[i] - bel_x y_diff = lm_y[i] - bel_y q = (x_diff * x_diff) + (y_diff * y_diff) Z_bar_t[0, pt] = np.sqrt(q) Z_bar_t[1, pt] = arctan2(y_diff, x_diff) - bel_theta Z_bar_t += chi_aug[-2:, :] z_hat = np.zeros((2, 1)) for pt in range(two_L_bound): z_hat += weights_m[0, pt] * np.reshape(Z_bar_t[:, pt], z_hat.shape) S_t = np.zeros((2, 2)) for pt in range(two_L_bound): meas_diff = np.reshape(Z_bar_t[:, pt], z_hat.shape) - z_hat S_t += weights_c[0, pt] * mm(meas_diff, np.transpose(meas_diff)) sigma_t = np.zeros((3, 2)) for pt in range(two_L_bound): state_diff = np.reshape(chi_bar_x[:, pt], mu_bar.shape) - mu_bar meas_diff = np.reshape(Z_bar_t[:, pt], z_hat.shape) - z_hat sigma_t += weights_c[0, pt] * mm(state_diff, np.transpose(meas_diff)) # (get the true measurement for the given landmark) true_x = x_pos_true[0, t_step] true_y = y_pos_true[0, t_step] true_theta = theta_true[0, t_step] z_true = np.zeros(z_hat.shape) x_diff = lm_x[i] - true_x y_diff = lm_y[i] - true_y
def mmultiply(left, right): return mm(left, right)
def kalman_filter(self): # for plotting self.plotter = Plotter(self.times) self.plotter.save_iteration_data(self, 0, None) self.plotter.x_cov_times.append(0) self.plotter.x_cov_vals.append(self.sigma[1 , 1]) for timestep in range(1,self.control_inputs.size): c_input = self.control_inputs[0 , timestep] c_input = np.reshape(c_input, (1,1)) z = self.measurements[0 , timestep] # prediction mu_bar = mm(self.A, self.mu) + mm(self.B, c_input) sigma_bar = mm(self.A, mm(self.sigma, np.transpose(self.A))) + self.R self.plotter.x_cov_times.append(self.times[timestep]) self.plotter.x_cov_vals.append(sigma_bar[1 , 1]) # correction c_transpose = np.transpose(self.C) matrix_one = mm(sigma_bar, c_transpose) matrix_two = mat_inv(mm(self.C, mm(sigma_bar, c_transpose)) + self.Q) k = mm(matrix_one, matrix_two) mu = mu_bar + mm(k, z - mm(self.C, mu_bar)) sigma = mm(np.identity(k.shape[0]) - mm(k, self.C), sigma_bar) # update the model's belief for the next filter iteration self.mu = mu self.sigma = sigma self.plotter.save_iteration_data(self, timestep, k) self.plotter.x_cov_times.append(self.times[timestep]) self.plotter.x_cov_vals.append(sigma[1 , 1])
def PPCA(Y_mat, d=20): """ Implements probabilistic PCA for data with missing values, using a factorizing distribution over hidden states and hidden observations. Args: Y: (N by D ) input numpy ndarray of data vectors d: ( int ) dimension of latent space dia: (boolean) if True: print objective each step Returns: ss: ( float ) isotropic variance outside subspace C: (D by d ) C*C' + I*ss is covariance model, C has scaled principal directions as cols M: (D by 1 ) data mean X: (N by d ) expected states Ye: (N by D ) expected complete observations (differs from Y if data is missing) Based on MATLAB code from J.J. VerBeek, 2006. http://lear.inrialpes.fr/~verbeek """ Y = Y_mat.copy() N, D = shape( Y ) # N observations in D dimensions (i.e. D is number of features, N is samples) threshold = 1E-4 # minimal relative change in objective function to continue hidden = isnan(Y) missing = hidden.sum() if (missing > 0): M = nanmean(Y, axis=0) else: M = average(Y, axis=0) Ye = Y - repmat(M, N, 1) if (missing > 0): Ye[hidden] = 0 # initialize C = normal(loc=0.0, scale=1.0, size=(D, d)) CtC = mm(C.T, C) X = mm(mm(Ye, C), inv(CtC)) recon = mm(X, C.T) recon[hidden] = 0 ss = np.sum((recon - Ye)**2) / (N * D - missing) count = 1 old = np.inf # EM Iterations while (count): Sx = inv(eye(d) + CtC / ss) # E-step, covariances ss_old = ss if (missing > 0): proj = mm(X, C.T) Ye[hidden] = proj[hidden] X = mm(mm(Ye, C), Sx / ss) # E-step: expected values SumXtX = mm(X.T, X) # M-step C = mm(mm(mm(Ye.T, X), (SumXtX + N * Sx).T), inv(mm((SumXtX + N * Sx), (SumXtX + N * Sx).T))) CtC = mm(C.T, C) ss = (np.sum((mm(X, C.T) - Ye)**2) + N * np.sum(CtC * Sx) + missing * ss_old) / (N * D) # transform Sx determinant into numpy float128 in order to deal with high dimensionality Sx_det = np.min(Sx).astype(np.float64)**shape(Sx)[0] * det( Sx / np.min(Sx)) objective = N * D + N * (D * log(ss) + tr(Sx) - log(Sx_det)) + tr( SumXtX) - missing * log(ss_old) rel_ch = np.abs(1 - objective / old) old = objective count = count + 1 if (rel_ch < threshold and count > 5): count = 0 # if (dia == True): # print('Objective: %.2f, Relative Change %.5f' % (objective, rel_ch)) # C = orth(C) # covM = cov(mm(Ye, C).T) # vals, vecs = eig(covM) # ordr = np.argsort(vals)[::-1] # vals = vals[ordr] # vecs = vecs[:, ordr] # C = mm(C, vecs) # X = mm(Ye, C) # add data mean to expected complete data Ye = Ye + repmat(M, N, 1) # return C, ss, M, X, Ye return Ye
def getCovariance(A, B, E): res = mm(A, B) res = mm(res, transpose(A)) res = res + E return res
def geigh(A,B): X = canorth(B) E,V = eigh(simxfrm(A,X)) return E,mm(X,V)