def get_first_state(dt, c1_ts, c1_tfs, c2_ts, c2_tfs, hy_ts, hy_tfs): """ Return the first state (mean, covar) to initialize the kalman filter with. Assumes that the time-stamps start at a common zero (are on the same time-scale). Returns a state b/w t=[0, dt] Gives priority to AR-markers. If no ar-markers are found in [0,dt], it returns hydra's estimate but with larger covariance. """ ar1 = [c1_tfs[i] for i in xrange(len(c1_ts)) if c1_ts[i] <= dt] ar2 = [c2_tfs[i] for i in xrange(len(c2_ts)) if c2_ts[i] <= dt] hy = [hy_tfs[i] for i in xrange(len(hy_ts)) if hy_ts[i] <= dt] if ar1 != [] or ar2 != []: ar1.extend(ar2) x0 = state_from_tfms_no_velocity([avg_transform(ar1)]) I3 = np.eye(3) S0 = scl.block_diag(1e-3*I3, 1e-2*I3, 1e-3*I3, 1e-3*I3) else: assert len(hy)!=0, colorize("No transforms found for KF initialization. Aborting.", "red", True) x0 = state_from_tfms_no_velocity([avg_transform(hy)]) I3 = np.eye(3) S0 = scl.block_diag(1e-1*I3, 1e-1*I3, 1e-2*I3, 1e-2*I3) return (x0, S0)
def mps_add(*args): ''' Add <KMPS>. Parameters ----------- args: <MPS> instances to be added. ''' if len(args)<=1: raise ValueError('At least 2 args is required.') AL=[] BL=[] hndim=args[0].hndim na=len(args[0].AL) nb=len(args[0].BL) nsite=na+nb for i in xrange(na): if i==0: ai=[concatenate([mps.AL[i][j] for mps in args],axis=1) for j in xrange(hndim)] elif i==nsite-1: ai=[concatenate([mps.AL[i][j] for mps in args],axis=0) for j in xrange(hndim)] else: ai=[block_diag(*[mps.AL[i][j] for mps in args]) for j in xrange(hndim)] AL.append(ai) for i in xrange(nb): if i+na==0: bi=[concatenate([mps.BL[i][j] for mps in args],axis=1) for j in xrange(hndim)] elif i+na==nsite-1: bi=[concatenate([mps.BL[i][j] for mps in args],axis=0) for j in xrange(hndim)] else: bi=[block_diag(*[mps.BL[i][j] for mps in args]) for j in xrange(hndim)] BL.append(bi) S=concatenate([mps.S for mps in args]) return args[0].__class__(AL=AL,BL=BL,S=S)
def compute_energy(self): S = np.matrix(la.block_diag(self.S, self.S)) T = np.matrix(la.block_diag(self.T, self.T)) V = np.matrix(la.block_diag(self.V, self.V)) D = np.matrix(np.zeros((self.nsbf, self.nsbf))) X = np.matrix(la.inv(la.sqrtm(S))) h = T + V E0 = 0 for count in range(self.maxiter): F = h + self.vu Ft = X * F * X e, Ct = la.eigh(Ft) C = X * np.matrix(Ct) OC = np.matrix(C[:,:self.nelec]) D = OC*OC.T self.vu = np.einsum('upvq,qp->uv', self.G, D) E1 = np.sum((np.array(h) + 0.5 * np.array(self.vu))*np.array(D.T)) + self.V_nuc psi4.print_out('Iteration {:<d} {:.10f} {:.10f}\n'.format(count, E1, E1-E0)) if abs(E1 - E0) < self.e_convergence: psi4.print_out('\nFinal HF Energy: {:<5.10f}'.format(E1)) self.C = C self.epsi = e self.ehf = E1 break else: E0 = E1 else: psi4.print_out('\n:( Does not converge :(')
def proj_affine(self, P): d = P.shape[1] N = P.shape[0] V = np.array([self.vertices[i].as_np_array() for i in range(self.dim+1)]).T Q_i = V.T.dot(V) Q = linalg.block_diag(*[Q_i for i in range(N)]) q = - np.reshape(V.T.dot(P.T).T, (N * (self.dim + 1))) A_i = np.ones(self.dim + 1) A = linalg.block_diag(*[A_i for i in range(N)]) ## Z * [alpha; lambda].T = c ## lhs of KKT n_vars = N * (self.dim + 1) n_cnts = N Z = np.zeros((n_vars + n_cnts, n_vars + n_cnts)) Z[:n_vars, :n_vars] = Q Z[n_vars:, :n_vars] = A Z[:n_vars, n_vars:] = A.T ## rhs of KKT c = np.zeros(n_vars + n_cnts) c[:n_vars] = -q c[n_vars:] = np.ones(n_cnts) alpha = np.linalg.solve(Z, c) alpha = alpha[:n_vars].reshape(N, self.dim + 1) P_affine = alpha.dot(V.T) return alpha, P_affine
def test_scalar_and_1d_args(self): a = block_diag(1) assert_equal(a.shape, (1,1)) assert_array_equal(a, [[1]]) a = block_diag([2,3], 4) assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
def vRaman(x,omega=1.0,delta=0.0,epsilon=0.048,phi=4.0/3.0): x=np.array(x) s=1 v=np.outer(omega*np.exp(1.0j*2.0*phi*x),Fx(s)*np.sqrt(2.0)/2.0).reshape(x.size,2*s+1,2*s+1) v=sLA.block_diag(*[np.triu(v[i])+np.conjugate(np.triu(v[i],1)).transpose() for i in range(x.size)]) v+=sLA.block_diag(*[np.diag([epsilon-delta,0.0,epsilon+delta])]*x.size) return v
def compute_giwc_from_forces(): A = calculate_local_forces_to_gi_matrix() CFC_all = block_diag(*[dot(CFC, block_diag(contact.R.T)) for contact in contacts for _ in xrange(4)]) S = span_of_face(CFC_all) F = face_of_span(dot(A, S)) return F
def generate_time_of_use_periods(self): """ time of use periods will be described by NxM indicator matricies """ N = const.DAILY_UNITS quarters = self.generate_quarter_hours() peak_indicator = [1 if ( (t >= const.PEAK_TIME_RANGE[0]) & (t < const.PEAK_TIME_RANGE[1])) else 0 for t in quarters] part_peak_indicator = [1 if ( (t >= const.PART_PEAK_TIME_RANGE[0][0]) and (t < const.PART_PEAK_TIME_RANGE[0][1]) or t >= const.PART_PEAK_TIME_RANGE[1][0]) and (t < const.PART_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters] off_peak_indicator = [1 if ( (t >= const.OFF_PEAK_TIME_RANGE[0][0]) and (t < const.OFF_PEAK_TIME_RANGE[0][1]) or t >= const.OFF_PEAK_TIME_RANGE[1][0]) and (t < const.OFF_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters] peak_day = np.diag(peak_indicator) part_peak = np.diag(part_peak_indicator) off_peak_weekday = np.diag(off_peak_indicator) off_peak_weekend_off = np.zeros([N,N]) # used for peak, part_peak off_peak_weekend_on = np.diag([1]*N) # used for off_peak # each of these will block_diag 5 week day indicators and 2 weekend indicators self.peak_mat = block_diag(peak_day, peak_day, peak_day, peak_day, peak_day, off_peak_weekend_off, off_peak_weekend_off) self.part_peak_mat = block_diag(part_peak, part_peak, part_peak, part_peak, part_peak, off_peak_weekend_off,off_peak_weekend_off) self.off_peak_mat = block_diag(off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekend_on, off_peak_weekend_on) self.all_peak_mat = np.eye(self.horizon)
def _update_z_dist2(self, g, beta, ifx, lam, alf, mu_ivp): gp = self.latentforces[0] Cz = [gp.kernel(self.ttc[:, None])]*self.dim.K Lz = [] for c in Cz: c[np.diag_indices_from(c)] += 1e-5 Lz.append(np.linalg.cholesky(c)) Cz_inv = block_diag(*[cho_solve((L, True), np.eye(L.shape[0])) for L in Lz]) K = self._K(g, beta, ifx) # parameters for the LDS update q = 0 Sigma = np.eye(self.N_data[q]*self.dim.K) / alf y = self.y_train_[q] Gamma_inv = np.eye(self.dim.N*self.dim.K) * alf # + Cz_inv / lam A = K #Gamma.dot(K) C = np.zeros((self.N_data[q]*self.dim.K, self.dim.N*self.dim.K)) inds = self.data_inds[q] inds = np.concatenate([inds + self.dim.N*k for k in range(self.dim.K)]) for i in range(C.shape[0]): C[i, inds[i]] += 1 Cz = block_diag(*Cz) Sigma = 0.01*C.dot(Cz.dot(C.T)) Gamma = np.eye(self.dim.N*self.dim.K) / alf #Gamma = 0.01*np.linalg.inv(Cz_inv) #np.linalg.inv(Gamma_inv) u1 = np.kron(mu_ivp[0, 0, :], np.ones(self.dim.N)) u1 = np.zeros(self.dim.N*self.dim.K) V1 = np.ones((self.dim.N*self.dim.K,self.dim.N*self.dim.K)) V1 = 100.*np.eye(V1.shape[0]) P1 = A.dot(V1.dot(A.T)) + Gamma K2 = P1.dot(C.T.dot(np.linalg.inv(C.dot(P1.dot(C.T)) + Sigma))) u2 = A.dot(u1) + K2.dot(y - C.dot(A.dot(u1))) V2 = (np.eye(K2.shape[0]) - K2.dot(C)).dot(P1) J1 = V1.dot(A.T.dot(np.linalg.inv(P1))) u1h = u1 + J1.dot(u2 - A.dot(u1)) V1h = V1 + J1.dot(V2 - P1).dot(J1.T) means = (u1h, u2) covs = (V1h, V2) pwcovs = (J1.dot(V2), ) return means, covs, pwcovs
def incre_svd(): """ Incremental SVD generator, see Matthew Brand, Incremental singular value decomposition of uncertain data with missing values http://www.cs.wustl.edu/~zhang/teaching/cs517/Spring12/CourseProjects/incremental%20svd%20missing%20value.pdf """ c = yield s = np.array([npl.norm(c.astype(float))]) # s = npl.norm(c.astype(float), axis=1) U0 = c / s Up = 1.0 V0 = 1.0 Vp = 1.0 Vpi = 1.0 while True: r = len(s) U = np.dot(U0, Up) V = np.dot(V0, Vp) c = yield U, s, V if c is None: continue I = np.identity(r) O = np.zeros(r) l = np.dot(U.T, c) j = c - np.dot(U, l) k = npl.norm(j) j /= k print(k) if k < trunc: k = 0 Q = block_diag(np.diag(s), k) Q[:r, -1:] = l A, s, B = npl.svd(Q, full_matrices=False) B = B.T if k < trunc: s = s[:-1] Up = np.dot(Up, A[:-1, :-1]) W, w = np.vsplit(B[:, :-1], [r]) Wi = (I + np.dot(w.T, w) / (1 - np.dot(w, w.T))).dot(W.T) Vp = np.dot(Vp, W) Vpi = np.dot(Wi, Vpi) V0 = np.vstack((V0, np.dot(w, Vpi))) else: Up = block_diag(Up, 1).dot(A) U0 = np.hstack((U0, j)) V0 = block_diag(V0, 1) Vp = np.dot(block_diag(Vp, 1), B) Vpi = np.dot(B.T, block_diag(Vpi, 1))
def initialize_covariances(freq, demo_dir): """ Initialize empirical estimates of covariances: -- Cameras and the hydra observe just the xyzrpy (no velocities). -- Motion covariance is for all 12 variables. """ cam_types = get_cam_types(demo_dir) cam_tfms = get_cam_tfms(demo_dir) rgbd_cam_xyz_std = [0.005, 0.005, 0.005] # 1cm rgb_cam_xyz_std = [0.2, 0.2, 0.4] # 1cm hydra_xyz_std = [0.03, 0.03, 0.03] # 3cm <-- use small std after tps-correction. rgbd_cam_rpy_std = np.deg2rad(30) rgb_cam_rpy_std = np.deg2rad(90) hydra_rpy_std = np.deg2rad(5) I3 = np.eye(3) rgbd_covar = scl.block_diag(np.diag(np.square(rgbd_cam_xyz_std)), np.square(rgbd_cam_rpy_std)*I3) rgb_covar = scl.block_diag(np.diag(np.square(rgb_cam_xyz_std)), np.square(rgb_cam_rpy_std)*I3) hydra_covar = scl.block_diag(np.diag(np.square(hydra_xyz_std)), np.square(hydra_rpy_std)*I3) cam_covars = {} for cam in cam_types: print cam if cam == 'camera1': if cam_types[cam] == 'rgb': cam_covars[cam] = rgb_covar else: cam_covars[cam] = rgbd_covar else: for i in xrange(len(cam_tfms)): tfm_info = cam_tfms[i] if tfm_info['parent'] == 'camera1_link' and tfm_info['child'] == '%s_link'%(cam): # tfm_info is from camera link to camera link tfm_rof1_rof2 = nlg.inv(tfm_link_rof).dot(tfm_info['tfm']).dot(tfm_link_rof) R = scl.block_diag(tfm_rof1_rof2[:3,:3], I3) #R = scl.block_diag(I3, I3) if cam_types[cam] == 'rgb': cam_covars[cam] = R.dot(rgb_covar).dot(R.transpose()) else: cam_covars[cam] = R.dot(rgbd_covar).dot(R.transpose()) break motion_covar = initialize_motion_covariance(freq) return (motion_covar, cam_covars, hydra_covar)
def update_matrix_hogwild(J,partition,q): n = J.shape[0] A,Bs,Cs = split_hogwild(J,partition) BinvCs = [np.linalg.solve(B,C) for B,C in zip(Bs,Cs)] BinvCqs = [np.linalg.matrix_power(BinvC,q) for BinvC in BinvCs] BinvC = block_diag(*BinvCs) BinvCq = block_diag(*BinvCqs) BinvA = np.vstack([np.linalg.solve(B,A[indices,:]) for B,indices in zip(Bs,partition)]) # TODO write this with (B-C)^{-1} A return BinvCq + (np.eye(n) - BinvCq).dot(np.linalg.solve(np.eye(n) - BinvC, BinvA))
def kepler_3d(params,t): """One-body Kepler problem in 3D. This function simply uses kepler_2d and rotates it into 3D. """ a = params.a pb = params.pb eps1 = params.eps1 eps2 = params.eps2 i = params.i lan = params.lan p2 = Kepler2DParameters(a=a, pb=pb, eps1=eps1, eps2=eps2, t0=params.t0) xv, jac = kepler_2d(p2,t) xyv = np.zeros(6) xyv[:2] = xv[:2] xyv[3:5] = xv[2:] jac2 = np.zeros((6,8)) t = np.zeros((6,6)) t[:2] = jac[:2] t[3:5] = jac[2:] jac2[:,:4] = t[:,:4] jac2[:,-2:] = t[:,-2:] r_i = np.array([[1,0,0], [0,np.cos(i),-np.sin(i)], [0,np.sin(i), np.cos(i)]]) d_r_i = np.array([[0,0,0], [0,-np.sin(i),-np.cos(i)], [0, np.cos(i),-np.sin(i)]]) r_i_6 = block_diag(r_i,r_i) d_r_i_6 = block_diag(d_r_i,d_r_i) xyv3 = np.dot(r_i_6,xyv) jac3 = np.dot(r_i_6,jac2) jac3[:,4] += np.dot(d_r_i_6, xyv) r_lan = np.array([[ np.cos(lan),np.sin(lan),0], [-np.sin(lan),np.cos(lan),0], [0,0,1]]) d_r_lan = np.array([[-np.sin(lan), np.cos(lan),0], [-np.cos(lan),-np.sin(lan),0], [0,0,0]]) r_lan_6 = block_diag(r_lan,r_lan) d_r_lan_6 = block_diag(d_r_lan,d_r_lan) xyv4 = np.dot(r_lan_6,xyv3) jac4 = np.dot(r_lan_6,jac3) jac4[:,5] += np.dot(d_r_lan_6, xyv3) return xyv4, jac4
def compute_giwc_from_wrenches(full=True): """ Compute Gravito-Inertial Wrench Cone (GIWC) from Contact Wrench Cones (CWCs). """ global CWC_all A = calculate_local_wrench_to_gi_matrix() # right vector of CWC_all is the stacked vector w_all of # contact wrenches in the *world* frame CWC_all = block_diag(*[ dot(CWC, block_diag(contact.R.T, contact.R.T)) for contact in contacts]) S = span_of_face(CWC_all) F = face_of_span(dot(A, S)) return F
def process_covar(self, x_target): _, _, w, wT, swT, cwT = self.transition_elements_helper(x_target) Q_pos_vel = np.zeros((4,4)) Q_pos_vel[self.pos_x, self.pos_x] = 2*(wT - swT)/(w**3) Q_pos_vel[self.pos_x, self.vel_x] = (1 - cwT)/(w**2) Q_pos_vel[self.pos_x, self.vel_y] = (wT - swT)/(w**2) Q_pos_vel[self.vel_x, self.pos_x] = Q_pos_vel[self.pos_x, self.vel_x] Q_pos_vel[self.vel_x, self.vel_x] = self.Ts Q_pos_vel[self.vel_x, self.pos_y] = -(wT-swT)/(w**2) Q_pos_vel[self.pos_y, self.vel_x] = Q_pos_vel[self.vel_x, self.pos_y] Q_pos_vel[self.pos_y, self.pos_y] = Q_pos_vel[self.pos_x, self.pos_x] Q_pos_vel[self.pos_y, self.vel_y] = Q_pos_vel[self.pos_x, self.vel_x] Q_pos_vel[self.vel_y, self.pos_x] = Q_pos_vel[self.pos_x, self.vel_y] Q_pos_vel[self.vel_y, self.pos_y] = Q_pos_vel[self.pos_y, self.vel_y] Q_pos_vel[self.vel_y, self.vel_y] = self.Ts Q_pos_vel *= self.sigma_a**2 Q_w = (self.sigma_w**2)*self.Ts Q = sp_linalg.block_diag(Q_pos_vel, Q_w) return Q
def test_multivariate_penalty(): alphas = [1, 2] weights = [1, 1] np.random.seed(1) x, y, pol = multivariate_sample_data() univ_pol1 = UnivariatePolynomialSmoother(x[:, 0], degree=pol.degrees[0]) univ_pol2 = UnivariatePolynomialSmoother(x[:, 1], degree=pol.degrees[1]) gp1 = UnivariateGamPenalty(alpha=alphas[0], univariate_smoother=univ_pol1) gp2 = UnivariateGamPenalty(alpha=alphas[1], univariate_smoother=univ_pol2) mgp = MultivariateGamPenalty(multivariate_smoother=pol, alpha=alphas, weights=weights) for i in range(10): params1 = np.random.randint(-3, 3, pol.smoothers[0].dim_basis) params2 = np.random.randint(-3, 3, pol.smoothers[1].dim_basis) params = np.concatenate([params1, params2]) c1 = gp1.func(params1) c2 = gp2.func(params2) c = mgp.func(params) assert_allclose(c, c1 + c2, atol=1.e-10, rtol=1.e-10) d1 = gp1.deriv(params1) d2 = gp2.deriv(params2) d12 = np.concatenate([d1, d2]) d = mgp.deriv(params) assert_allclose(d, d12) h1 = gp1.deriv2(params1) h2 = gp2.deriv2(params2) h12 = block_diag(h1, h2) h = mgp.deriv2(params) assert_allclose(h, h12)
def visualize(self): """Visualize the data. There are three groups -- a set of [Ca2+] recordings, set of Vm recordings for the same cells, and spike times for all cells. I'll display the Vm and [Ca2+] in side by side plots. At the same time display all the cells - organized in a grid grouped on the basis of depth and celltype. """ # Raster plot for the spike trains: we are skipping the animation for the time being. if self.spiketrain_dict: colorcount = 4 colorvec = [[color % colorcount + 1.0] for color in range(self.spike_matrix.shape[0])] colormatrix = block_diag(*colorvec) / colorcount self.image = self.spike_axes.imshow(np.dot(self.spike_matrix.transpose(), colormatrix).transpose(), interpolation='nearest') self.colorbar = self.figure.colorbar(self.image) print 'finished spike plot' count = 0 # for key, value in self.vm_dict.items(): # self.vm_axes.plot(self.timepoints, value+count*0.2, label=key) # count += 1 # count = 0 # for key, value in self.ca_dict.items(): # self.ca_axes.plot(self.timepoints, value+count*0.2, label=key) # count +=1 self.draw() self.figure.savefig(self.datafilename + '_.png')
def numericData_Karlgaard(): dt = 0.1 k_meas = 10 w_omega = 1E-13 # [rad^2 / s] w_bias = 1E-15 # [rad^2 / s^3] v = 7.16*1E-5 # [rad^2] P0_sigma = 0.0122 * np.identity(3) P0_bias = (2.35*1E-9) * np.identity(3) W = la.block_diag(w_omega*np.identity(3), w_bias*np.identity(3)) R = v * np.identity(3) P0 = la.block_diag(P0_sigma, P0_bias) X0 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) return(X0, P0, R, W, dt, k_meas)
def compute_GI_face_forces(contacting_links): t0 = time.time() S0 = compute_Coulomb_span() print "Coulomb span shape:", S0.shape nb_links = len(contacting_links) nb_forces = 4 * nb_links H = block_diag(*([S0] * nb_forces)) print "H.shape:", H.shape AGI = zeros((6, 3 * nb_forces)) for i, link in enumerate(contacting_links): X, Y = get_link_dimensions(link) p, R = link.p, link.R a = [[+X, +Y, 0], [+X, -Y, 0], [-X, -Y, 0], [-X, +Y, 0]] for j in xrange(4): pi = p + dot(R, a[i]) AGI[:3, 12 * i + 3 * j:12 * i + 3 * (j + 1)] = -R AGI[3:, 12 * i + 3 * j:12 * i + 3 * (j + 1)] = -dot(crossmat(pi), R) print "AGI.shape:", AGI.shape M = dot(AGI, H) # span for w_GI CGI = face_of_span(M) # face for w_GI report("Compute CGI (%d contacts): %f ms" % ( nb_forces, 1000 * (time.time() - t0))) report("CGI shape: %s" % str(CGI.shape)) return CGI
def update(self): # Messages from parents m = self.parents[0].message_to_child() k = self.parents[1].message_to_child() ## m = self.parents[0].message_to_child()[0] ## k = self.parents[1].message_to_child()[0] if self.observed: # Observations of this node self.u = gp_posterior_moment_function(m, k, self.x, self.f) else: x = np.array([]) y = np.array([]) # Messages from children for (child,index) in self.children: (msg, mask) = child.message_to_parent(index) # Ignoring masks and plates.. # m[0] is the inputs x = np.concatenate((x, msg[0]), axis=-2) # m[1] is the observations y = np.concatenate((y, msg[1])) # m[2] is the covariance matrix V = linalg.block_diag(V, msg[2]) self.u = gp_posterior_moment_function(m, k, x, y, covariance=V) self.x = x self.f = y
def prob6(): """Solve the allocation model problem in 'ForestData.npy'. Note that the first three rows of the data correspond to the first analysis area, the second group of three rows correspond to the second analysis area, and so on. Returns (in order): The optimizer x (ndarray) The optimal value (sol['primal objective']*-1000) """ data = np.load("ForestData.npy") c = matrix(data[:, 3] * -1) A = la.block_diag(*[[1.0, 1.0, 1.0] for _ in xrange(7)]) b = data[::3, 1].copy() G = np.vstack((-data[:, 4], -data[:, 5], -data[:, 6], -np.eye(21))) # flip the inequality signs h = np.hstack(([-40000.0, -5.0, -70.0 * 788.0], np.zeros(21))) # flip the inequality signs c = matrix(c) A = matrix(A) b = matrix(b) G = matrix(G) h = matrix(h) sol = solvers.lp(c, G, h, A, b) return np.ravel(sol["x"]), sol["primal objective"] * -1000.0
def construct_const_matrix(x_dim, Q, D): # -------------------------- #| 0 0 #| 0 I #| D-Q 0 #| 0 D^{-1} #| I #| I #| 0 # -------------------------- # Construct B1 B1 = zeros((2*x_dim, 2*x_dim)) B1[x_dim:,x_dim:] = eye(x_dim) # Construct B2 B2 = zeros((2*x_dim, 2*x_dim)) B2[:x_dim, :x_dim] = D-Q B2[x_dim:,x_dim:] = pinv(D) # Construct B3 B3 = eye(x_dim) # Construct B4 B4 = eye(x_dim) # Construct B5 B5 = zeros((x_dim, x_dim)) # Construct B6 B6 = zeros((1, 1)) # Construct Block matrix h = block_diag(B1, B2, B3, B4, B5, B6) return h
def calculate_expanded_base_transformation_matrix(src_base, dst_base, src_order, dst_order, use_eye=False): """ constructs a transformation matrix from basis given by 'src_base' to basis given by 'dst_base' that also transforms all temporal derivatives of the given weights. :param src_base: the source basis, given by an array of BaseFractions :param dst_base: the destination basis, given by an array of BaseFractions :param src_order: temporal derivative order available in src :param dst_order: temporal derivative order needed in dst :param use_eye: use identity as base transformation matrix :return: transformation matrix as 2d np.ndarray """ if src_order < dst_order: raise ValueError("higher derivative order needed than provided!") # build core transformation if use_eye: core_transformation = np.eye(src_base.size) else: core_transformation = calculate_base_transformation_matrix(src_base, dst_base) # build block matrix part_transformation = block_diag(*[core_transformation for i in range(dst_order + 1)]) complete_transformation = np.hstack([part_transformation] + [np.zeros((part_transformation.shape[0], src_base.size)) for i in range(src_order - dst_order)]) return complete_transformation
def penalty_matrix(self, alpha=None): """penalty matrix for generalized additive model Parameters ---------- alpha : list of floats or None penalty weights Returns ------- penalty matrix block diagonal, square penalty matrix for quadratic penalization. The number of rows and columns are equal to the number of parameters in the regression model ``k_params``. Notes ----- statsmodels does not support backwards compatibility when keywords are used as positional arguments. The order of keywords might change. We might need to add a ``params`` keyword if the need arises. """ if alpha is None: alpha = self.alpha s_all = [np.zeros((self.start_idx, self.start_idx))] for i in range(self.k_variables): s_all.append(self.gp[i].penalty_matrix(alpha=alpha[i])) return block_diag(*s_all)
def __init__(self, *arg): super(Frame2D, self).__init__(*arg) self.n_intps = arg[3] if len(arg)>3 else 5 self.dim = 2 self.dof = 6 self.local_dof = 3 # 单元局部自由度 self.strain = np.zeros(self.local_dof) # 计算积分点位置及权系数 xi,w = p_roots(self.n_intps) xi = xi.real self.loc_intps,self.wf_intps = 0.5*(xi+1.0),0.5*w # 积分点的局部坐标 self.x = self.length*self.loc_intps # 积分点截面序列 self.intps = [] # 积分点截面应变-位移转换矩阵序列 self.Bx = [] # 积分点截面轴向应变序列 self.epslnx = np.zeros(self.n_intps) # 积分点截面曲率序列 self.kappax = np.zeros(self.n_intps) # 遍历所有积分点 for i in xrange(self.n_intps): self.intps.append(deepcopy(self.section)) xi = self.loc_intps[i] self.Bx.append(spl.block_diag([1.],[6.*xi-4.,6.*xi-2])) # 计算单元刚度 self.get_stiffness() self.get_trans_matrix() self.get_stiffness_matrix()
def propagateRLHamiltonian(t, k, omega, delta, epsilon, U, n): t=np.array(t) psi0=rampedOnPsi(k,omega,delta,epsilon,U,n) # Energy1, V1 = LA.eig(RamanLatHamiltonian(0.0, 0.0, 0.0, 0.0, U, n)) # sort=np.argsort(Energy1) # V1sorted=V1[:,sort] # psi0=V1sorted[:,0] # psi0[np.divide(3*n,2)]=1.0+0.0*1j H = RamanLatHamiltonian(k, omega, delta ,epsilon,U,n) Energy, V = LA.eig(H) V = V + 1j*0.0 Vinv = np.conjugate(np.transpose(V)) # np.outer(t, Energy).flatten() creates a matrix for all t U = np.diag(np.exp(-1j*np.outer(t, Energy).flatten())) a = np.dot(Vinv, psi0) # This repeats a so that the shape is consitent with U aa = np.outer(np.ones(t.size),a).flatten() # Have to add the transpose to make shapes match b = np.dot(U, aa) # Same block diagonal trick for eigenvector matrix VV = sLA.block_diag(*([V]*t.size)) psi = np.dot(VV, b) pops=np.absolute(psi)**2.0 # Since you want the first value, need to take every 3rd row # and extract the values you want from the diagonal latPops=np.sum(pops.reshape(t.size,n,3)[:,np.divide(n,2)-1:np.divide(n,2)+2,:],axis=2).flatten() #populations in the -2k_L, 0, and +2k_L lattice sites, summed over spin sites,in time step blocks spinPops=np.sum(pops.reshape(t.size,n,3),axis=1).flatten() #populations in each spin state, summed over lattice sites, in time step blocks return spinPops
def __init__(self, n_lanes, proc_noise_scale, meas_noise_scale, process_cov_parallel=0, proc_noise_type='white'): self.n_lanes = n_lanes self.meas_size = 4 * self.n_lanes self.state_size = self.meas_size * 2 self.contr_size = 0 self.kf = cv2.KalmanFilter(self.state_size, self.meas_size, self.contr_size) self.kf.transitionMatrix = np.eye(self.state_size, dtype=np.float32) self.kf.measurementMatrix = np.zeros((self.meas_size, self.state_size), np.float32) for i in range(self.meas_size): self.kf.measurementMatrix[i, i*2] = 1 if proc_noise_type == 'white': block = np.matrix([[0.25, 0.5], [0.5, 1.]], dtype=np.float32) self.kf.processNoiseCov = block_diag(*([block] * self.meas_size)) * proc_noise_scale if proc_noise_type == 'identity': self.kf.processNoiseCov = np.eye(self.state_size, dtype=np.float32) * proc_noise_scale for i in range(0, self.meas_size, 2): for j in range(1, self.n_lanes): self.kf.processNoiseCov[i, i+(j*8)] = process_cov_parallel self.kf.processNoiseCov[i+(j*8), i] = process_cov_parallel self.kf.measurementNoiseCov = np.eye(self.meas_size, dtype=np.float32) * meas_noise_scale self.kf.errorCovPre = np.eye(self.state_size) self.meas = np.zeros((self.meas_size, 1), np.float32) self.state = np.zeros((self.state_size, 1), np.float32) self.first_detected = False
def build_correlation_matrix(params): """ Parameters ---------- params : dict of dicts: First set of keys - type of group interaction and strength Second set of keys - attributes for group strength : float strength of interaction func : function generator function dim : int number of individual per interaction data : np.array count table truth : np.array adjancency matrix name : str name of the interaction Returns ------- np.array The truth correlation matrix """ return block_diag(*[params[k]['truth'] for k in params.keys()])
def fit_res(line_fname, inames): line_names = load_pyfile(line_fname) print(inames) peaks, covs = zip(*(load_data(f, line_names) for f in inames)) peaks = [p for p in peaks if p] l = len(peaks) for i, ps in enumerate(peaks): ary = zeros(l - 1) if i != 0: ary[i - 1] = 1 for p in ps: p['coeff'] = r_[p['coeff'], ary] peaks = unpack_lists(peaks) pos, coeff = zip(*((p['pos_i'], p['coeff']) for p in peaks)) pos = array(pos) coeff = array(coeff) cov = block_diag(*covs) res = fit_lin_comb(coeff, pos, cov) para_a = res.a para_s = res.s # print(a_pm_s([r, r_s])) print(a_pm_s([para_a[:7], para_s[:7]])) print(a_pm_s([pos, sqrt(diag(cov))])) print(a_pm_s([res.b_fit, abs(res.b_e)]))
def run_smoother(): dt = 1/30. N = 3000 Ts_bh, Ts_bg, T_gh, Ts_bg_gh, X_bh, X_bg_gh = load_data() Ts_bh = Ts_bh[10:N] X_bh = X_bh[:,10:N] X_bg_gh = X_bg_gh[:,10:N] ## initialize the kalman belief: x_init = X_bg_gh[:,0] I3 = np.eye(3) S_init = scl.block_diag(1e-6*I3, 1e-3*I3, 1e-4*I3, 1e-1*I3) ## run the kalman filter: X_kf, S_kf = run_kalman(Ts_bh, x_init, S_init, 1./dt) ## load the noise covariance matrices: covar_mats = cPickle.load(open(hd_path + '/hd_track/data/old/pr2-hydra-kinect-covars-xyz-rpy.cpickle')) R = covar_mats['process'] A, _ = kalman().get_motion_mats(dt) X_s, _ = smoother(A, 4e2*np.eye(12), X_kf, S_kf) X_s = np.array(X_s).T[0,:,:] X_kf = np.array(X_kf) X_kf = np.reshape(X_kf, (X_kf.shape[0], X_kf.shape[1])).T ## plot the results: print X_kf.shape, X_bh.shape, X_bg_gh.shape plot_smoother(X_s, X_kf[:,1:], X_bh, X_bg_gh) plt.show()
def test_dtype(self): x = block_diag([[1.5]]) assert_equal(x.dtype, float) x = block_diag([[True]]) assert_equal(x.dtype, bool)
def test_mixed_dtypes(self): actual = block_diag([[1]], [[1j]]) desired = np.array([[1, 0], [0, 1j]]) assert_array_equal(actual, desired)
def controlled(self): return MatrixOperation( np.matrix(block_diag(np.eye(self.matrix.shape[0]), self.matrix)))
x = np.vstack((K.T, dual_l)) y = L.T # Get gradients and hessians for local NE computation PRIMAL DK, DL, DKL = gradient(50, 250, A, B, C, Q, Ru, Rw, K, L, T) Dlambda = Df_lambda(Lambda, L, Q, q, Rw, nx) DfL = Df_L(Lambda, L, Q, q, Rw, nx) DflL = Df_lambda_L(Lambda, L, Q, q, Rw, nx).reshape((nx**2, nx)) # Dx,Dy are all column vectors PRIMAL Dx = np.vstack((DK.reshape((nx, 1)), Dlambda)) Dy = DL + DfL Dy = Dy.T Dxy = np.vstack((DKL, DflL)) Dyx = Dxy.T hessian = LA.block_diag(np.eye(nx), D2P(Lambda, nx).reshape((nx**2, nx**2))) hessian_inv = np.linalg.inv(hessian) # Local NE PRIMAL Jx = np.linalg.inv(1 / eta_x * hessian + eta_y * np.matmul(Dxy, Dyx)) Jy = np.linalg.inv(1 / eta_y * np.eye(nx) + eta_x * np.matmul(np.matmul(Dyx, hessian_inv), Dxy)) del_x = -np.matmul( Jx, (Dx + eta_y * np.matmul(np.matmul(Dxy, np.eye(nx)), Dy))) del_y = np.matmul( Jy, (Dy - eta_x * np.matmul(np.matmul(Dyx, hessian_inv), Dx))) # Storing CMD iteration to lists before updating Dlambda_list = np.hstack((Dlambda_list, Dlambda)) lambda_list = np.hstack((lambda_list, Lambda)) L_list = np.hstack((L_list, L.reshape((nx, nw))))
def setup(self): self._var_names = {} num_seg = self.options['num_segments'] rk_data = rk_methods[self.options['method']] self._A = block_diag(rk_data['A']) self._num_stages = rk_data['num_stages'] for name, options in iteritems(self.options['state_options']): shape = options['shape'] units = options['units'] self._var_names[name] = {} self._var_names[name][ 'initial'] = 'initial_states_per_seg:{0}'.format(name) self._var_names[name]['k'] = 'k:{0}'.format(name) self._var_names[name]['predicted'] = 'predicted_states:{0}'.format( name) self.add_input( self._var_names[name]['initial'], shape=(num_seg, ) + shape, units=units, desc= 'The initial value of the state at the start of the segment.') self.add_input( self._var_names[name]['k'], shape=( num_seg, self._num_stages, ) + shape, units=units, desc='RK multiplier k for each stage in the segment.') self.add_output( self._var_names[name]['predicted'], shape=(num_seg * self._num_stages, ) + shape, units=units, desc= 'The predicted values of the state at the ODE evaluation points.' ) e = np.eye(np.prod(shape)) p = np.kron(np.ones(self._num_stages), e).T p = block_diag(*num_seg * [p]) r, c = np.nonzero(p) self.declare_partials(of=self._var_names[name]['predicted'], wrt=self._var_names[name]['initial'], rows=r, cols=c, val=1.0) size = np.prod(shape) p = block_diag(*num_seg * [np.kron(self._A, np.eye(size))]) r, c = np.nonzero(p) self.declare_partials(of=self._var_names[name]['predicted'], wrt=self._var_names[name]['k'], rows=r, cols=c, val=p[r, c])
def block_diag(values): return block_diag(*AutogradBox.unbox_list(values))
def Q_continuous_white_noise(dim, dt=1., spectral_density=1., block_size=1, order_by_dim=True): """ Returns the Q matrix for the Discretized Continuous White Noise Model. dim may be either 2, 3, 4, dt is the time step, and sigma is the variance in the noise. Parameters ---------- dim : int (2 or 3 or 4) dimension for Q, where the final dimension is (dim x dim) 2 is constant velocity, 3 is constant acceleration, 4 is constant jerk dt : float, default=1.0 time step in whatever units your filter is using for time. i.e. the amount of time between innovations spectral_density : float, default=1.0 spectral density for the continuous process block_size : int >= 1 If your state variable contains more than one dimension, such as a 3d constant velocity model [x x' y y' z z']^T, then Q must be a block diagonal matrix. order_by_dim : bool, default=True Defines ordering of variables in the state vector. `True` orders by keeping all derivatives of each dimensions) [x x' x'' y y' y''] whereas `False` interleaves the dimensions [x y z x' y' z' x'' y'' z''] Examples -------- >>> # constant velocity model in a 3D world with a 10 Hz update rate >>> Q_continuous_white_noise(2, dt=0.1, block_size=3) array([[0.00033333, 0.005 , 0. , 0. , 0. , 0. ], [0.005 , 0.1 , 0. , 0. , 0. , 0. ], [0. , 0. , 0.00033333, 0.005 , 0. , 0. ], [0. , 0. , 0.005 , 0.1 , 0. , 0. ], [0. , 0. , 0. , 0. , 0.00033333, 0.005 ], [0. , 0. , 0. , 0. , 0.005 , 0.1 ]]) """ if not (dim == 2 or dim == 3 or dim == 4): raise ValueError("dim must be between 2 and 4") if dim == 2: Q = [[(dt**3)/3., (dt**2)/2.], [(dt**2)/2., dt]] elif dim == 3: Q = [[(dt**5)/20., (dt**4)/8., (dt**3)/6.], [ (dt**4)/8., (dt**3)/3., (dt**2)/2.], [ (dt**3)/6., (dt**2)/2., dt]] else: Q = [[(dt**7)/252., (dt**6)/72., (dt**5)/30., (dt**4)/24.], [(dt**6)/72., (dt**5)/20., (dt**4)/8., (dt**3)/6.], [(dt**5)/30., (dt**4)/8., (dt**3)/3., (dt**2)/2.], [(dt**4)/24., (dt**3)/6., (dt**2/2.), dt]] if order_by_dim: return block_diag(*[Q]*block_size) * spectral_density return order_by_derivative(array(Q), dim, block_size) * spectral_density
def __set_costs(self, ocp): if ocp.nb_phases != 1: raise NotImplementedError("ACADOS with more than one phase is not implemented yet.") # costs handling in self.acados_ocp self.y_ref = [] self.y_ref_end = [] self.lagrange_costs = SX() self.mayer_costs = SX() self.W = np.zeros((0, 0)) self.W_e = np.zeros((0, 0)) if self.acados_ocp.cost.cost_type == "LINEAR_LS": raise RuntimeError("LINEAR_LS is not interfaced yet.") elif self.acados_ocp.cost.cost_type == "NONLINEAR_LS": for i in range(ocp.nb_phases): for j, J in enumerate(ocp.nlp[i].J): if J[0]["objective"].type.get_type() == ObjectiveFunction.LagrangeFunction: self.lagrange_costs = vertcat(self.lagrange_costs, J[0]["val"].reshape((-1, 1))) self.W = linalg.block_diag(self.W, np.diag([J[0]["objective"].weight] * J[0]["val"].numel())) if J[0]["target"] is not None: self.y_ref.append([J_tp["target"].T.reshape((-1, 1)) for J_tp in J]) else: self.y_ref.append([np.zeros((J_tp["val"].numel(), 1)) for J_tp in J]) elif J[0]["objective"].type.get_type() == ObjectiveFunction.MayerFunction: mayer_func_tp = Function(f"cas_mayer_func_{i}_{j}", [ocp.nlp[i].X[-1]], [J[0]["val"]]) self.W_e = linalg.block_diag( self.W_e, np.diag([J[0]["objective"].weight] * J[0]["val"].numel()) ) self.mayer_costs = vertcat(self.mayer_costs, mayer_func_tp(ocp.nlp[i].X[0])) if J[0]["target"] is not None: self.y_ref_end.append( [J[0]["target"]] if isinstance(J[0]["target"], (int, float)) else J[0]["target"] ) else: self.y_ref_end.append([0] * (J[0]["val"].numel())) else: raise RuntimeError("The objective function is not Lagrange nor Mayer.") # parameter as mayer function # IMPORTANT: it is considered that only parameters are stored in ocp.J, for now. if self.params: for j, J in enumerate(ocp.J): mayer_func_tp = Function(f"cas_J_mayer_func_{i}_{j}", [ocp.nlp[i].X[-1]], [J[0]["val"]]) self.W_e = linalg.block_diag( self.W_e, np.diag(([J[0]["objective"].weight] * J[0]["val"].numel())) ) self.mayer_costs = vertcat(self.mayer_costs, mayer_func_tp(ocp.nlp[i].X[0])) if J[0]["target"] is not None: self.y_ref_end.append( [J[0]["target"]] if isinstance(J[0]["target"], (int, float)) else J[0]["target"] ) else: self.y_ref_end.append([0] * (J[0]["val"].numel())) # Set costs self.acados_ocp.model.cost_y_expr = self.lagrange_costs if self.lagrange_costs.numel() else SX(1, 1) self.acados_ocp.model.cost_y_expr_e = self.mayer_costs if self.mayer_costs.numel() else SX(1, 1) # Set dimensions self.acados_ocp.dims.ny = self.acados_ocp.model.cost_y_expr.shape[0] self.acados_ocp.dims.ny_e = self.acados_ocp.model.cost_y_expr_e.shape[0] # Set weight self.acados_ocp.cost.W = np.zeros((1, 1)) if self.W.shape == (0, 0) else self.W self.acados_ocp.cost.W_e = np.zeros((1, 1)) if self.W_e.shape == (0, 0) else self.W_e # Set target shape self.acados_ocp.cost.yref = np.zeros((self.acados_ocp.cost.W.shape[0],)) self.acados_ocp.cost.yref_e = np.zeros((self.acados_ocp.cost.W_e.shape[0],)) elif self.acados_ocp.cost.cost_type == "EXTERNAL": raise RuntimeError("External is not interfaced yet, please use NONLINEAR_LS") else: raise RuntimeError("Available acados cost type: 'LINEAR_LS', 'NONLINEAR_LS' and 'EXTERNAL'.")
al_p_ = Function(Vp) al_q_ = Function(Vq) Hdes = 1. h_eq_ = Function(Vq) h_eq_.vector()[:] = Hdes Hd = 0.5 * (1. / rho * al_q_ * dot(al_p_, al_p_) + rho * g * al_q_**2) * r * dx Lyap = 0.5 * (1. / rho * al_q_ * dot(al_p_, al_p_) + rho * g * (al_q_ - Hdes)**2) * r * dx e_p_ = derivative(Hd, al_p_) e_q_ = derivative(Hd, al_q_) M = la.block_diag(M_p, M_q) J = np.zeros((n_V, n_V)) J[:n_Vp, n_Vp:n_V] = D_q J[n_Vp:n_V, :n_Vp] = D_p invM_q = la.inv(M_q) invM_p = la.inv(M_p) invM = la.inv(M) Jtilde = invM @ J @ invM # Jtilde = (Jtilde - Jtilde.T)/2. # Stormer Verlet integrator B = np.concatenate((np.zeros((n_Vp, )), B_r), axis=0).reshape((-1, 1)) z = 0.001 R = z * B @ B.T
def Solver(self): Bd = np.zeros((self.dsystem.A.shape[0], self.d_hat.shape[0])) Cd = np.array([[1.]]) [nQ, mQ] = self.dsystem.A.shape [nR, mR] = self.dsystem.B.shape qmpc = np.eye(mQ, dtype="float") * self.qw #qmpc[0:1, 0:1] = 40.0 #qmpc[9-len(self.del_list), 9-len(self.del_list)] = 1000.0 #qmpc *= 5500.0 rmpc = self.rw * np.eye(mR, dtype="float") # 5.0 (kl, ll, el) = bb_dlqr(self.dsystem.A, self.dsystem.B, qmpc, rmpc) q_fmpc = ll # Equality Constraints # a_til = np.zeros((self.N * mQ, self.N * (mR + mQ)), dtype="float") first_row = np.bmat([-self.dsystem.B, np.eye(mQ, dtype="float")]) second_row = np.bmat( [-self.dsystem.A, -self.dsystem.B, np.eye(mQ, dtype="float")]) a_til[0:nR, 0:(mQ + mR)] = first_row for k in range(1, self.N): a_til[k * nR:(k + 1) * nR, k * mR + (k - 1) * mQ:(k + 1) * mR + (k + 1) * mQ] = second_row # b_til = np.zeros((self.N * mQ, 1), dtype="float") b_til[0:mQ] = np.dot(Bd, self.d_hat) + np.dot(self.dsystem.A, self.x_hat) for k in range(1, self.N): b_til[k * nR:(k + 1) * nR] = np.dot(Bd, self.d_hat) # # Inequality Constraints # hc = np.zeros((self.N * (mQ + mR), self.N * (mQ + mR)), dtype="float") for k in range(0, self.N): hc[k * mR + k * mQ:(k + 1) * mR + k * mQ, k * mR + k * mQ:(k + 1) * mR + k * mQ] = rmpc hc[(k + 1) * mR + k * mQ:(k + 1) * mR + (k + 1) * mQ, (k + 1) * mR + k * mQ:(k + 1) * mR + (k + 1) * mQ] = qmpc if k == self.N - 1: hc[(k + 1) * mR + k * mQ:(k + 1) * mR + (k + 1) * mQ, (k + 1) * mR + k * mQ:(k + 1) * mR + (k + 1) * mQ] = q_fmpc hc *= 2. # Construct G # first_col_g = np.bmat([[-np.eye(mR, dtype="float")], [np.eye(mR, dtype="float")]]) second_col_g = np.bmat([[-np.eye(mQ, dtype="float")], [np.eye(mQ, dtype="float")]]) g_block = block_diag(first_col_g, second_col_g) g = g_block for k in range(0, self.N - 1): g = block_diag(g_block, g) # Construct h # h = np.zeros((2 * self.N * (mQ + mR), 1), dtype="float") u_low = np.zeros((mR, 1), dtype="float") u_up = self.max_influx * np.ones((mR, 1), dtype="float") x_low = np.zeros((mQ, 1), dtype="float") x_low[3:4, :] = self.min_lung x_low[5:6, :] = self.min_skin x_low[7:8, :] = self.min_bladder x_low[9:10, :] = self.min_liver x_low[11:12, :] = self.min_residual x_low[13:14, :] = self.min_kidney x_low[25:26, :] = self.min_heart x_low[27:28, :] = self.min_muscle x_low[29:30, :] = self.min_spleen x_low[33:34, :] = self.min_placental x_up = np.ones((mQ, 1), dtype="float") x_up[3:4, :] = self.max_lung x_up[5:6, :] = self.max_skin x_up[7:8, :] = self.max_bladder x_up[9:10, :] = self.max_liver x_up[11:12, :] = self.max_residual x_up[13:14, :] = self.max_kidney x_up[25:26, :] = self.max_heart x_up[27:28, :] = self.max_muscle x_up[29:30, :] = self.max_spleen x_up[33:34, :] = self.max_placental for k in range(0, self.N): h[2 * k * mR + 2 * k * mQ:2 * k * mR + 2 * k * mQ + mR] = u_low h[2 * k * mR + 2 * k * mQ + mR:2 * (k + 1) * mR + 2 * k * mQ] = u_up h[2 * (k + 1) * mR + 2 * k * mQ:2 * (k + 1) * mR + 2 * k * mQ + mQ] = x_low h[2 * k * mR + 2 * k * mQ + 2 * mR + mQ:2 * (k + 1) * mR + 2 * (k + 1) * mQ] = x_up plin = np.zeros((1, self.N * (mQ + mR)), dtype="float") a_lin = np.dot(self.x_.T, qmpc) b_lin = np.dot(self.u_.T, rmpc) lin = np.bmat([[b_lin, a_lin]]) for i in range(0, self.N * (mQ + mR), (mQ + mR)): plin[0, i:i + (mQ + mR)] = lin f = np.dot(self.x_.T, q_fmpc) flin = np.concatenate((b_lin, f), axis=1) plin[0, (self.N - 1) * (mQ + mR)::] = flin plin = -2. * plin.T solvers.options['abstol'] = 1.e-324 sol = solvers.qp(matrix(hc), matrix(plin), matrix(g), matrix(h), matrix(a_til), matrix(b_til)) if sol['x'][0] <= 0: u = 0 else: u = sol['x'][0] return u, sol['status']
def __new__(cls, *args, **kwargs): return AffineTransform( mean_in=vstack([transform.mean_in for transform in args]), pre_in=block_diag(*[transform.pre_in for transform in args]), **kwargs)
def __simulate_for_one_metric(self, Ns_all_users, external_int_data_all_metrics, MsPk_all_users, Wk_all_users, metric_name, current_parameters): """ This method is only called inside the _run_simulation method. This method has the common code that is execute for each metric inside the _run_simulation method. Parameters ---------- Ns_all_users : np.ndarray Number of streams for each user. This variable controls how many data streams will be generated for each user of the K users. This is a 1D numpy array of size K. external_int_data_all_metrics : np.ndarray The data of the external interference sources (2D numpy array). MsPk_all_users : np.ndarray The precoders of all users returned by the block diagonalize method for the given metric. This is a 1D numpy array of 2D numpy arrays. Wk_all_users : np.ndarray The receive filter for all users (1D numpy array of 2D numpy arrays). metric_name : string Metric name. This string will be appended to each result name. Returns ------- TODO: Write the rest of the docstring """ # pylint: disable=R0914 Ns_total = np.sum(Ns_all_users) self.data_RS = np.random.RandomState(self.data_gen_seed) input_data = self.data_RS.randint( 0, current_parameters['M'], [Ns_total, current_parameters['NSymbs']]) symbols = self.modulator.modulate(input_data) # Prepare the transmit data. That is, the precoded_data as well as # the external interference sources' data. precoded_data = np.dot(np.hstack(MsPk_all_users), symbols) # external_int_data_all_metrics = ( # np.sqrt(self.pe) # * misc.randn_c_RS( # self.ext_data_RS, self.ext_int_rank, self.NSymbs)) all_data = np.vstack([precoded_data, external_int_data_all_metrics]) # xxxxxxxxxx Pass the precoded data through the channel xxxxxxxxxxx self.multiuser_channel.set_noise_seed(self.noise_seed) received_signal = self.multiuser_channel.corrupt_concatenated_data( all_data) # xxxxxxxxxx Filter the received data xxxxxxxxxxxxxxxxxxxxxxxxxxxxx # noinspection PyArgumentList Wk = block_diag(*Wk_all_users) received_symbols = np.dot(Wk, received_signal) # xxxxxxxxxx Demodulate the filtered symbols xxxxxxxxxxxxxxxxxxxxxx decoded_symbols = self.modulator.demodulate(received_symbols) # xxxxxxxxxx Calculates the Symbol Error Rate xxxxxxxxxxxxxxxxxxxxx num_symbol_errors = np.sum(decoded_symbols != input_data, 1) # num_symbol_errors = sum_user_data(num_symbol_errors, # Ns_all_users) num_symbols = np.ones(Ns_total) * input_data.shape[1] # xxxxxxxxxx Calculates the Bit Error Rate xxxxxxxxxxxxxxxxxxxxxxxx num_bit_errors = misc.count_bit_errors(decoded_symbols, input_data, 1) # num_bit_errors = sum_user_data(num_bit_errors, # Ns_all_users) num_bits = num_symbols * np.log2(current_parameters['M']) # xxxxxxxxxx Calculates the Package Error Rate xxxxxxxxxxxxxxxxxxxx ber = num_bit_errors / num_bits per = 1. - ((1. - ber)**current_parameters['packet_length']) num_packages = num_bits / current_parameters['packet_length'] num_package_errors = per * num_packages # xxxxxxxxxx Calculates the Spectral Efficiency xxxxxxxxxxxxxxxxxxx # nominal spectral Efficiency per stream nominal_spec_effic = self.modulator.K effective_spec_effic = (1 - per) * nominal_spec_effic # xxxxx Map the per stream metric to a global metric xxxxxxxxxxxxxx num_bit_errors = np.sum(num_bit_errors) num_bits = np.sum(num_bits) num_symbol_errors = np.sum(num_symbol_errors) num_symbols = np.sum(num_symbols) num_package_errors = np.sum(num_package_errors) num_packages = np.sum(num_packages) effective_spec_effic = np.sum(effective_spec_effic) # xxxxx Calculate teh SINR xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Uk_all_users = np.empty(Wk_all_users.size, dtype=np.ndarray) for ii in range(Wk_all_users.size): Uk_all_users[ii] = Wk_all_users[ii].conjugate().T SINR_all_k = self.multiuser_channel.calc_JP_SINR( MsPk_all_users, Uk_all_users) # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # None metric ber_result = Result.create('ber_{0}'.format(metric_name), Result.RATIOTYPE, num_bit_errors, num_bits) ser_result = Result.create('ser_{0}'.format(metric_name), Result.RATIOTYPE, num_symbol_errors, num_symbols) per_result = Result.create('per_{0}'.format(metric_name), Result.RATIOTYPE, num_package_errors, num_packages) spec_effic_result = Result.create('spec_effic_{0}'.format(metric_name), Result.RATIOTYPE, effective_spec_effic, 1) sinr_result = Result('sinr_{0}'.format(metric_name), Result.RATIOTYPE, accumulate_values=True) for k in range(Wk_all_users.size): sinr_k = SINR_all_k[k] for value in sinr_k: sinr_result.update(value, 1) return (ber_result, ser_result, per_result, spec_effic_result, sinr_result)
def test_solve_continuous_are(): mat6 = _load_data('carex_6_data.npz') mat15 = _load_data('carex_15_data.npz') mat18 = _load_data('carex_18_data.npz') mat19 = _load_data('carex_19_data.npz') mat20 = _load_data('carex_20_data.npz') cases = [ # Carex examples taken from (with default parameters): # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark # Examples for the Numerical Solution of Algebraic Riccati # Equations II: Continuous-Time Case', Tech. Report SPC 95_23, # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. # # The format of the data is (a, b, q, r, knownfailure), where # knownfailure is None if the test passes or a string # indicating the reason for failure. # # Test Case 0: carex #1 (np.diag([1.], 1), np.array([[0], [1]]), block_diag(1., 2.), 1, None), # Test Case 1: carex #2 (np.array([[4, 3], [-4.5, -3.5]]), np.array([[1], [-1]]), np.array([[9, 6], [6, 4.]]), 1, None), # Test Case 2: carex #3 (np.array([[0, 1, 0, 0], [0, -1.89, 0.39, -5.53], [0, -0.034, -2.98, 2.43], [0.034, -0.0011, -0.99, -0.21]]), np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]), np.array([[2.313, 2.727, 0.688, 0.023], [2.727, 4.271, 1.148, 0.323], [0.688, 1.148, 0.313, 0.102], [0.023, 0.323, 0.102, 0.083]]), np.eye(2), None), # Test Case 3: carex #4 (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0], [0.522, -1.051, 0.596, 0, 0, 0, 0, 0], [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], [0, 0, 0.522, -1.548, 0.718, 0, 0, 0], [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], [0, 0, 0, 0, 0.922, -1.721, 0.901, 0], [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], [0, 0, 0, 0, 0, 0, 0.922, -1.943]]), np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00], [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]).T * 0.001, np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0], [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]), np.eye(2), None), # Test Case 4: carex #5 (np.array( [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870], [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970], [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680], [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390], [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400], [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.], [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0], [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0], [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]), np.array([[0.010, -0.011, -0.151], [0.003, -0.021, 0.000], [0.009, -0.059, 0.000], [0.024, -0.162, 0.000], [0.068, -0.445, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), np.eye(9), np.eye(3), None), # Test Case 5: carex #6 (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None), # Test Case 6: carex #7 (np.array([[1, 0], [0, -2.]]), np.array([[1e-6], [0]]), np.ones( (2, 2)), 1., 'Bad residual accuracy'), # Test Case 7: carex #8 (block_diag(-0.1, -0.02), np.array([[0.100, 0.000], [0.001, 0.010]]), np.array([[100, 1000], [1000, 10000]]), np.ones( (2, 2)) + block_diag(1e-6, 0), None), # Test Case 8: carex #9 (np.array([[0, 1e6], [0, 0]]), np.array([[0], [1.]]), np.eye(2), 1., None), # Test Case 9: carex #10 (np.array([[1.0000001, 1], [1., 1.0000001]]), np.eye(2), np.eye(2), np.eye(2), None), # Test Case 10: carex #11 (np.array([[3, 1.], [4, 2]]), np.array([[1], [1]]), np.array([[-11, -5], [-5, -2.]]), 1., None), # Test Case 11: carex #12 (np.array([[7000000., 2000000., -0.], [2000000., 6000000., -2000000.], [0., -2000000., 5000000.]]) / 3, np.eye(3), np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot( np.diag([1e-6, 1, 1e6])).dot( np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9, np.eye(3) * 1e6, 'Bad Residual Accuracy'), # Test Case 12: carex #13 (np.array([[0, 0.4, 0, 0], [0, 0, 0.345, 0], [0, -0.524e6, -0.465e6, 0.262e6], [0, 0, 0, -1e6]]), np.array([[0, 0, 0, 1e6]]).T, np.diag([1, 0, 1, 0]), 1., None), # Test Case 13: carex #14 (np.array([[-1e-6, 1, 0, 0], [-1, -1e-6, 0, 0], [0, 0, 1e-6, 1], [0, 0, -1, 1e-6]]), np.ones((4, 1)), np.ones( (4, 4)), 1., None), # Test Case 14: carex #15 (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None), # Test Case 15: carex #16 (np.eye(64, 64, k=-1) + np.eye(64, 64) * (-2.) + np.rot90(block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1), np.eye(64), np.eye(64), np.eye(64), None), # Test Case 16: carex #17 (np.diag(np.ones((20, )), 1), np.flipud(np.eye(21, 1)), np.eye(21, 1) * np.eye(21, 1).T, 1, 'Bad Residual Accuracy'), # Test Case 17: carex #18 (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None), # Test Case 18: carex #19 (mat19['A'], mat19['B'], mat19['Q'], mat19['R'], 'Bad Residual Accuracy'), # Test Case 19: carex #20 (mat20['A'], mat20['B'], mat20['Q'], mat20['R'], 'Bad Residual Accuracy') ] # Makes the minimum precision requirements customized to the test. # Here numbers represent the number of decimals that agrees with zero # matrix when the solution x is plugged in to the equation. # # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 # # If the test is failing use "None" for that entry. # min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14, None, 9, 14, 13, 14, None, 12, None, None) def _test_factory(case, dec): """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true""" a, b, q, r, knownfailure = case if knownfailure: pytest.xfail(reason=knownfailure) x = solve_continuous_are(a, b, q, r) res = x.dot(a) + a.conj().T.dot(x) + q out_fact = x.dot(b) res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
# Some useful global variables # non-parametrized qubit gates I = np.identity(2) X = np.array([[0, 1], [1, 0]]) Y = np.array([[0, -1j], [1j, 0]]) Z = np.array([[1, 0], [0, -1]]) H = np.array([[1, 1], [1, -1]]) / np.sqrt(2) S = np.diag([1, 1j]) T = np.diag([1, np.exp(1j * np.pi / 4)]) SWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]) CZ = np.diag([1, 1, 1, -1]) toffoli = np.diag([1 for i in range(8)]) toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]]) CSWAP = block_diag(I, I, SWAP) # parametrized qubit gates phase_shift = lambda phi: np.array([[1, 0], [0, np.exp(1j * phi)]]) rx = lambda theta: np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * X ry = lambda theta: np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y rz = lambda theta: np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z rot = lambda a, b, c: rz(c) @ (ry(b) @ rz(a)) crz = lambda theta: np.array( [ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.exp(-1j * theta / 2), 0], [0, 0, 0, np.exp(1j * theta / 2)], ] )
def test_basic(self): x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]]) assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 2, 0, 0, 0], [0, 0, 3, 4, 0, 0, 0], [0, 0, 5, 6, 0, 0, 0], [0, 0, 0, 0, 1, 2, 3]])
]) phi_scale = np.empty((12, 12)) phi_scale.fill(-0.0034) phi_scale[np.diag_indices(12)] = 0.0367 seasonal_comp = FullEffectsFourier(12, harmonics=[1, 3, 4], discount=0.95) # seasonal_comp = FullEffectsFourier(12, discount=0.95) model = (Polynomial(2, discount=0.9) + Regression(index, discount=0.98) + seasonal_comp) # transformation matrix H = seasonal_comp.H seasonal_prior_mean = np.dot(H, phi_effects) seasonal_prior_scale = zero_out(chain_dot(H, phi_scale, H.T)) prior_mean = np.array(np.concatenate(([9.5, 0., -0.7], seasonal_prior_mean))) prior_scale = block_diag(np.diag([0.09, 0.01, 0.01]), seasonal_prior_scale) mean_prior = (prior_mean, prior_scale) # k = model.F.shape[1] dlm = DLM(sales, model.F, G=model.G, mean_prior=mean_prior, var_prior=var_prior, discount=model.discount)
from __future__ import division, print_function from six.moves import xrange as range from nose.tools import with_setup, assert_raises, assert_equal import itertools from scipy.linalg import block_diag import numpy as np where = np.flatnonzero from lavaburst.core import algo from lavaburst import scoring A = 10 * block_diag(np.ones((4, 4)), np.ones((5, 5)), np.ones( (4, 4))).astype(float) A[np.diag_indices_from(A)] = 0 def test_sums_by_segment(): n = len(A) Zseg = np.zeros((n + 1, n + 1)) for i in range(n + 1): for j in range(i, n + 1): Zseg[i, j] = Zseg[j, i] = np.sum(A[i:j, i:j] / 2.0) assert np.allclose(scoring.sums_by_segment(A), Zseg) assert np.allclose(scoring.sums_by_segment(A, normalized=True), Zseg / Zseg[0, n]) class BruteForceEnsemble(object): def __init__(self, scorer): self.n_nodes = len(scorer) - 1
#------------------------------------- Generating graphs, covariances, multivariate normal observarions ----------------------------------------- A_list = [] C_list = [] # first dim is time second dim is group X_list = [] #ml_glassocv = cov.GraphLassoCV(assume_centered=True) #Theta_glassocv_list = [] for class_ix in range(len_class): A_c = [] C_c = [] X_c = [] #Theta_t = [] for time_ix in range(len_t): #print class_ix #print block_diag(*A1_list[:(len_class - class_ix)]), np.matrix(np.eye(class_ix*5)) A = block_diag(A0_list[time_ix], block_diag(*A1_list[:(len_class - class_ix)]), np.matrix(np.eye(class_ix*p1))) C = block_diag(C0_list[time_ix], block_diag(*C1_list[:(len_class - class_ix)]), np.matrix(np.eye(class_ix*p1))) X = np.random.multivariate_normal(mean = np.zeros(p), cov = C, size = ni) #ml_glassocv.fit(X) #Theta = ml_glassocv.get_precision() A_c.append(A) C_c.append(C) X_c.append(X) #Theta_t.append(Theta) A_list.append(A_c) C_list.append(C_c) X_list.append(X_c) #Theta_glassocv_list.append(Theta_t) #f = open("mydata.pkl", 'wb') #pickle.dump(X_list, f)
def construct_polynomial_trajectory(self): """ function to construct the trajectory""" t = time.time() r = self.r N = 1 + self.N # because number of terms in a polynomial = degree+1 QQ = [] AA_inv = [] for i in range(self.no_of_segments): q = self.construct_Q(N, r, self.T[i], self.T[i + 1]) a = self.construct_A(N, r, self.T[i], self.T[i + 1]) a_inv = scipy.linalg.pinv(a) QQ = block_diag(QQ, q) AA_inv = block_diag(AA_inv, a_inv) order = 2 * r * self.no_of_segments R = np.dot(AA_inv.T, np.dot(QQ, AA_inv)) bx = np.concatenate((self.x0, self.xT), axis=0) by = np.concatenate((self.y0, self.yT), axis=0) bz = np.concatenate((self.z0, self.zT), axis=0) m = Model("qp") order = 2 * r * self.no_of_segments dx = m.addVars(order, lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="dx") dy = m.addVars(order, lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="dy") dz = m.addVars(order, lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="dz") # using LinExpr for the second expression is significantly faster obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order)) obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order)) obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order)) obj = obj1 + obj2 + obj3 j = 0 addconstraint = m.addConstr for k in range(order): if k < r: addconstraint(dx[k] == bx[k]) addconstraint(dy[k] == by[k]) addconstraint(dz[k] == bz[k]) elif k >= order - r: addconstraint(dx[k] == bx[r + j]) addconstraint(dy[k] == by[r + j]) addconstraint(dz[k] == bz[r + j]) j += 1 c = 1 # counter for n in range(r, order - 2 * r, 2 * r): #if c ==3: #m.addConstr(dx[n] == self.x_wp[c]) #m.addConstr(dy[n] == self.y_wp[c]) #m.addConstr(dz[n] == self.z_wp[c]) #else: m.addConstr(dx[n] <= self.x_wp[c] + 0.25) m.addConstr(dy[n] <= self.y_wp[c] + 0.25) m.addConstr(dz[n] <= self.z_wp[c] + 0.25) m.addConstr(dx[n] >= self.x_wp[c] - 0.25) m.addConstr(dy[n] >= self.y_wp[c] - 0.25) m.addConstr(dz[n] >= self.z_wp[c] - 0.25) c = c + 1 for q in range(r): addconstraint(dx[n + q] == dx[n + q + r]) addconstraint(dy[n + q] == dy[n + q + r]) addconstraint(dz[n + q] == dz[n + q + r]) #addconstraint(dx[n+1] == 0) #addconstraint(dy[n+1] == 0) #addconstraint(dz[n+1] == 0) #addconstraint(dx[n+2] == 0) #addconstraint(dy[n+2] == 0) #addconstraint(dz[n+2] == 0) #addconstraint(dx[n+3] == 0) #addconstraint(dy[n+3] == 0) #addconstraint(dz[n+3] == 0) m.setObjective(obj, GRB.MINIMIZE) #m.write('model.lp') m.setParam('OutputFlag', 0) m.setParam('PSDtol', 1e5) m.setParam('NumericFocus', 3) m.optimize() print 'The optimality status is (2: optimal, 13: suboptimal):', m.status #runtime = m.Runtime #optimal_objective = obj.getValue() #print 'optimal objective is:', optimal_objective x_coeff = [dx[i].X for i in range(order)] y_coeff = [dy[i].X for i in range(order)] z_coeff = [dz[i].X for i in range(order)] Dx = np.asarray(x_coeff)[np.newaxis].T Dy = np.asarray(y_coeff)[np.newaxis].T Dz = np.asarray(z_coeff)[np.newaxis].T pcx = np.dot(AA_inv, Dx) pcy = np.dot(AA_inv, Dy) pcz = np.dot(AA_inv, Dz) poly_coeff_x = pcx.T.ravel().tolist() poly_coeff_y = pcy.T.ravel().tolist() poly_coeff_z = pcz.T.ravel().tolist() return self.T, poly_coeff_x, poly_coeff_y, poly_coeff_z
def configure_io(self): """ I/O creation is delayed until configure so we can determine variable shape and units. """ self._var_names = {} num_segs = self.options['num_segments'] rk_data = rk_methods[self.options['method']] num_stages = rk_data['num_stages'] for name, options in self.options['state_options'].items(): shape = options['shape'] size = np.prod(shape) units = options['units'] self._var_names[name] = {} self._var_names[name][ 'initial'] = 'initial_states_per_seg:{0}'.format(name) self._var_names[name]['k'] = 'k:{0}'.format(name) self._var_names[name]['final'] = 'final_states:{0}'.format(name) self._var_names[name]['integral'] = 'state_integrals:{0}'.format( name) self.add_input( self._var_names[name]['initial'], shape=(num_segs, ) + shape, units=units, desc= 'The initial value of the state at the start of each segment.') self.add_input( self._var_names[name]['k'], shape=(num_segs, num_stages) + shape, units=units, desc='RK multiplier k for each stage in each segment.') self.add_output( self._var_names[name]['integral'], shape=(num_segs, ) + shape, units=units, desc='The change in value of the state along each segment') self.add_output( self._var_names[name]['final'], shape=(num_segs, ) + shape, units=units, desc='The final value of the state at the end of each segment.' ) e = np.eye(size * num_segs) r, c = np.nonzero(e) self.declare_partials(of=self._var_names[name]['final'], wrt=self._var_names[name]['initial'], rows=r, cols=c, val=1.0) p = np.kron(rk_data['b'], np.eye(size)) p = block_diag(*num_segs * [p]) r, c = p.nonzero() self.declare_partials(of=self._var_names[name]['final'], wrt=self._var_names[name]['k'], rows=r, cols=c, val=np.tile(rk_data['b'], size * num_segs)) self.declare_partials(of=self._var_names[name]['integral'], wrt=self._var_names[name]['k'], rows=r, cols=c, val=np.tile(rk_data['b'], size * num_segs))
sess = tf.Session() N = 100 h = 1.0/(N+1) n = N**2 # Creating matrix A # J = 4*np.eye(N) i,j = np.indices(J.shape) J[j==i+1] = -1 J[j==i-1] = -1 I_upper = -1*np.diag(np.ones(N*(N-1)), N) I_lower = -1*np.diag(np.ones(N*(N-1)), -N) repeated_J = [J]*N A = block_diag(*repeated_J) A = A + I_lower + I_upper A_mat = tf.convert_to_tensor(A, dtype=tf.float64, name='A_matrix') # Creating vector b and Creating weights i.e boundary values # # Approach 4, create only 4N weights and then add them to vector b using a for loop instead of reshaping #''' b = (h**2)*20*np.ones((N**2,1)) b_tf = tf.constant(b, dtype=tf.float64, name='b_vector') # left_boundary_weights = tf.Variable(np.zeros(N), dtype=tf.float64, name='left_boundary') left_boundary_weights = tf.Variable(np.zeros((N, 1)), dtype=tf.float64, name='left_boundary') padding_vector_0 = tf.constant(np.zeros(((N-1)*N, 1)), name='padding_1') b_mat = b_tf + tf.concat([left_boundary_weights, padding_vector_0], 0) # lower_boundary_weights = tf.Variable(np.zeros(N), dtype=tf.float64, name='lower_boundary')
def exact_inference_with_ll(seqs, params, get_ll=True): """ Extracts latent trajectories from neural data, given GPFA model parameters. Parameters ---------- seqs : np.recarray Input data structure, whose n-th element (corresponding to the n-th experimental trial) has fields: y : np.ndarray of shape (#units, #bins) neural data T : int number of bins params : dict GPFA model parameters whe the following fields: C : np.ndarray FA factor loadings matrix d : np.ndarray FA mean vector R : np.ndarray FA noise covariance matrix gamma : np.ndarray GP timescale eps : np.ndarray GP noise variance get_ll : bool, optional specifies whether to compute data log likelihood (default: True) Returns ------- seqs_latent : np.recarray a copy of the input data structure, augmented with the new fields: latent_variable : (#latent_vars, #bins) np.ndarray posterior mean of latent variables at each time bin Vsm : (#latent_vars, #latent_vars, #bins) np.ndarray posterior covariance between latent variables at each timepoint VsmGP : (#bins, #bins, #latent_vars) np.ndarray posterior covariance over time for each latent variable ll : float data log likelihood, np.nan is returned when `get_ll` is set False """ y_dim, x_dim = params['C'].shape # copy the contents of the input data structure to output structure dtype_out = [(x, seqs[x].dtype) for x in seqs.dtype.names] dtype_out.extend([('latent_variable', np.object), ('Vsm', np.object), ('VsmGP', np.object)]) seqs_latent = np.empty(len(seqs), dtype=dtype_out) for dtype_name in seqs.dtype.names: seqs_latent[dtype_name] = seqs[dtype_name] # Precomputations if params['notes']['RforceDiagonal']: rinv = np.diag(1.0 / np.diag(params['R'])) logdet_r = (np.log(np.diag(params['R']))).sum() else: rinv = linalg.inv(params['R']) rinv = (rinv + rinv.T) / 2 # ensure symmetry logdet_r = gpfa_util.logdet(params['R']) c_rinv = params['C'].T.dot(rinv) c_rinv_c = c_rinv.dot(params['C']) t_all = seqs_latent['T'] t_uniq = np.unique(t_all) ll = 0. # Overview: # - Outer loop on each element of Tu. # - For each element of Tu, find all trials with that length. # - Do inference and LL computation for all those trials together. for t in t_uniq: k_big, k_big_inv, logdet_k_big = gpfa_util.make_k_big(params, t) k_big = sparse.csr_matrix(k_big) blah = [c_rinv_c for _ in range(t)] c_rinv_c_big = linalg.block_diag(*blah) # (xDim*T) x (xDim*T) minv, logdet_m = gpfa_util.inv_persymm(k_big_inv + c_rinv_c_big, x_dim) # Note that posterior covariance does not depend on observations, # so can compute once for all trials with same T. # xDim x xDim posterior covariance for each timepoint vsm = np.full((x_dim, x_dim, t), np.nan) idx = np.arange(0, x_dim * t + 1, x_dim) for i in range(t): vsm[:, :, i] = minv[idx[i]:idx[i + 1], idx[i]:idx[i + 1]] # T x T posterior covariance for each GP vsm_gp = np.full((t, t, x_dim), np.nan) for i in range(x_dim): vsm_gp[:, :, i] = minv[i::x_dim, i::x_dim] # Process all trials with length T n_list = np.where(t_all == t)[0] # dif is yDim x sum(T) dif = np.hstack(seqs_latent[n_list]['y']) - params['d'][:, np.newaxis] # term1Mat is (xDim*T) x length(nList) term1_mat = c_rinv.dot(dif).reshape((x_dim * t, -1), order='F') # Compute blkProd = CRinvC_big * invM efficiently # blkProd is block persymmetric, so just compute top half t_half = np.int(np.ceil(t / 2.0)) blk_prod = np.zeros((x_dim * t_half, x_dim * t)) idx = range(0, x_dim * t_half + 1, x_dim) for i in range(t_half): blk_prod[idx[i]:idx[i + 1], :] = c_rinv_c.dot( minv[idx[i]:idx[i + 1], :]) blk_prod = k_big[:x_dim * t_half, :].dot( gpfa_util.fill_persymm(np.eye(x_dim * t_half, x_dim * t) - blk_prod, x_dim, t)) # latent_variableMat is (xDim*T) x length(nList) latent_variable_mat = gpfa_util.fill_persymm( blk_prod, x_dim, t).dot(term1_mat) for i, n in enumerate(n_list): seqs_latent[n]['latent_variable'] = \ latent_variable_mat[:, i].reshape((x_dim, t), order='F') seqs_latent[n]['Vsm'] = vsm seqs_latent[n]['VsmGP'] = vsm_gp if get_ll: # Compute data likelihood val = -t * logdet_r - logdet_k_big - logdet_m \ - y_dim * t * np.log(2 * np.pi) ll = ll + len(n_list) * val - (rinv.dot(dif) * dif).sum() \ + (term1_mat.T.dot(minv) * term1_mat.T).sum() if get_ll: ll /= 2 else: ll = np.nan return seqs_latent, ll
) # get real-space representations analyzer = mg.symmetry.analyzer.SpacegroupAnalyzer(structure) symops = analyzer.get_symmetry_operations(cartesian=False) symops_cart = analyzer.get_symmetry_operations(cartesian=True) rots = [x.rotation_matrix for x in symops] taus = [x.translation_vector for x in symops] # get corresponding represesentations in the Hamiltonian basis reps = [] for n, (rot, tau) in enumerate(zip(rots, taus)): C = symops_cart[n].rotation_matrix tauc = symops_cart[n].translation_vector prep = C spinrep = spin_reps(C) R = np.kron(spinrep, la.block_diag(1.0, prep, prep)) reps.append(R) # set up the space group symmetries symmetries = [ sr.SymmetryOperation( # r-space and k-space matrices are related by transposing and inverting rotation_matrix=rot, repr_matrix=repr_mat, repr_has_cc=False, ) for rot, repr_mat in zip(rots, reps) ] point_group = sr.SymmetryGroup(symmetries=symmetries, full_group=True) sr.io.save([time_reversal, point_group], "results/symmetries.hdf5")
def Q_discrete_white_noise(dim, dt=1., var=1., block_size=1, order_by_dim=True): """ Returns the Q matrix for the Discrete Constant White Noise Model. dim may be either 2, 3, or 4 dt is the time step, and sigma is the variance in the noise. Q is computed as the G * G^T * variance, where G is the process noise per time step. In other words, G = [[.5dt^2][dt]]^T for the constant velocity model. Parameters ----------- dim : int (2, 3, or 4) dimension for Q, where the final dimension is (dim x dim) dt : float, default=1.0 time step in whatever units your filter is using for time. i.e. the amount of time between innovations var : float, default=1.0 variance in the noise block_size : int >= 1 If your state variable contains more than one dimension, such as a 3d constant velocity model [x x' y y' z z']^T, then Q must be a block diagonal matrix. order_by_dim : bool, default=True Defines ordering of variables in the state vector. `True` orders by keeping all derivatives of each dimensions) [x x' x'' y y' y''] whereas `False` interleaves the dimensions [x y z x' y' z' x'' y'' z''] Examples -------- >>> # constant velocity model in a 3D world with a 10 Hz update rate >>> Q_discrete_white_noise(2, dt=0.1, var=1., block_size=3) array([[0.000025, 0.0005 , 0. , 0. , 0. , 0. ], [0.0005 , 0.01 , 0. , 0. , 0. , 0. ], [0. , 0. , 0.000025, 0.0005 , 0. , 0. ], [0. , 0. , 0.0005 , 0.01 , 0. , 0. ], [0. , 0. , 0. , 0. , 0.000025, 0.0005 ], [0. , 0. , 0. , 0. , 0.0005 , 0.01 ]]) References ---------- Bar-Shalom. "Estimation with Applications To Tracking and Navigation". John Wiley & Sons, 2001. Page 274. """ if not (dim == 2 or dim == 3 or dim == 4): raise ValueError("dim must be between 2 and 4") if dim == 2: Q = [[.25*dt**4, .5*dt**3], [ .5*dt**3, dt**2]] elif dim == 3: Q = [[.25*dt**4, .5*dt**3, .5*dt**2], [ .5*dt**3, dt**2, dt], [ .5*dt**2, dt, 1]] else: Q = [[(dt**6)/36, (dt**5)/12, (dt**4)/6, (dt**3)/6], [(dt**5)/12, (dt**4)/4, (dt**3)/2, (dt**2)/2], [(dt**4)/6, (dt**3)/2, dt**2, dt], [(dt**3)/6, (dt**2)/2 , dt, 1.]] if order_by_dim: return block_diag(*[Q]*block_size) * var return order_by_derivative(array(Q), dim, block_size) * var
Initial safe set (with plot) """ lyapunov.initial_safe_set = np.abs( lyapunov.discretization.all_points.squeeze()) < 0.05 lyapunov.update_safe_set() noisy_dynamics = lambda x, u, noise: true_dynamics(x, u) plotting.plot_lyapunov_1d(lyapunov, noisy_dynamics, legend=True) """ RL for mean dynamics """ # mean_dynamics = dynamics.to_mean_function() reward = safe_learning.QuadraticFunction(linalg.block_diag(-q, -r), name='reward_function') value_function = safe_learning.Triangulation(policy_disc, np.zeros(len(policy_disc)), project=True, name='value_function') rl = safe_learning.PolicyIteration(policy, dynamics, reward, value_function) """ Plot the dynamics Note that the initial policy is just all zeros! """ _STORAGE = {}
def solve(self, **params_kw): """Solve impurity model Arguments: n_cycles : number of Monte Carlo cycles n_warmup_cycles : number of warmub Monte Carlo cycles length_cycle : number of proposed moves per cycle h_int : interaction Hamiltonian as a quartic triqs operator h_0 : quadratic part of the local Hamiltonian (only required if delta_interface=true has been specified during construction) """ n_cycles = params_kw.pop( "n_cycles") ### what does the True or False mean? n_warmup_cycles = params_kw.pop("n_warmup_cycles", 5000) ### default max_time = params_kw.pop("max_time", -1) worm = params_kw.pop("worm", False) percentageworminsert = params_kw.pop("PercentageWormInsert", 0.00) percentagewormreplace = params_kw.pop("PercentageWormReplace", 0.00) length_cycle = params_kw.pop("length_cycle", 50) h_int = params_kw.pop("h_int") self.last_solve_params = { "n_cycles": n_cycles, "n_warmup_cycles": n_warmup_cycles, "length_cycle": length_cycle, "h_int": h_int } if self.delta_interface: h_0 = params_kw.pop("h_0") self.last_solve_params["h_0"] = h_0 random_seed = params_kw.pop("random_seed", 1) move_double = params_kw.pop("move_double", True) measure_G_l = params_kw.pop("measure_G_l", True) measure_pert_order = params_kw.pop("measure_pert_order", False) statesampling = params_kw.pop("statesampling", False) flavourchange_moves = params_kw.pop("flavourchange_moves", False) move_global_prob = params_kw.pop("flavomove_global_prob", 0.005) if isinstance(self.gf_struct, dict): print( "WARNING: gf_struct should be a list of pairs [ [str,[int,...]], ...], not a dict" ) self.gf_struct = [[k, v] for k, v in self.gf_struct.items()] ### Andi: the definition in the U-Matrix in w2dyn is ### 1/2 \sum_{ijkl} U_{ijkl} cdag_i cdag_j c_l c_k ### ! ! ### a factor of 2 is needed to compensate the 1/2, and a minus for ### exchange of the annihilators; is this correct for any two particle interaction term? U_ijkl = dict_to_matrix(extract_U_dict4(h_int), self.gf_struct) ### Make sure that the spin index is the fastest running variable norb = U_ijkl.shape[0] // 2 U_ijkl = U_ijkl.reshape(2, norb, 2, norb, 2, norb, 2, norb) U_ijkl = U_ijkl.transpose(1, 0, 3, 2, 5, 4, 7, 6) U_ijkl = U_ijkl.reshape(norb * 2, norb * 2, norb * 2, norb * 2) if self.delta_interface: t_ij_matrix = dict_to_matrix(extract_h_dict(h_0), self.gf_struct) else: Delta_iw, t_ij_lst = extract_deltaiw_and_tij_from_G0( self.G0_iw, self.gf_struct) self.Delta_tau = BlockGf(mesh=self.tau_mesh, gf_struct=self.gf_struct) self.Delta_tau << Fourier(Delta_iw) assert len(t_ij_lst) in set([1, 2, 4]), \ "For now t_ij_lst must not contain more than 4 blocks; generalize it!" t_ij_matrix = block_diag(*t_ij_lst) # in w2dyn Delta is a hole propagator for bl, Delta_bl in self.Delta_tau: Delta_bl.data[:] = -Delta_bl.data[::-1, ...] t_ij_matrix *= -1 # W2Dynamics sign convention ### transform t_ij from (f,f) to (o,s,o,s) format norb = int(t_ij_matrix.shape[0] / 2) t_ij_matrix = t_ij_matrix.reshape(2, norb, 2, norb) t_osos_tensor = t_ij_matrix.transpose(1, 0, 3, 2) ftau, _, __ = triqs_gf_to_w2dyn_ndarray_g_tosos_beta_ntau( self.Delta_tau) ### now comes w2dyn! # Make a temporary files with input parameters Parameters_in = """#asdf [General] [Atoms] [[1]] Nd = %i Hamiltonian = Kanamori [QMC] TaudiffMax = -1.0""" % norb cfg_file = tempfile.NamedTemporaryFile(delete=False, mode="w") cfg_file.write(Parameters_in) cfg_file.close() ### read w2dyn parameter file; later we will replace this by a ### converter of triqs-parameters to w2dyn-parameters key_value_args = {} cfg = config.get_cfg(cfg_file.name, key_value_args, err=sys.stderr) ### check if Delta_tau is diagonal matrix, and set w2dyn parameter ### offdiag accordingly max_blocksize = 0 for name, d in self.Delta_tau: blocksize = d.data.shape[-1] #print "blocksize", blocksize if blocksize > max_blocksize: max_blocksize = blocksize if max_blocksize == 1: cfg["QMC"]["offdiag"] = 0 else: cfg["QMC"]["offdiag"] = 1 ### complex worms are not yet existing if self.complex and worm: print('complex and worm together not yet implemented') exit() if self.complex: cfg["QMC"]["complex"] = 1 cfg["QMC"]["use_phase"] = 1 ### check if offdiag is set; complex makes no sense without offdiag assert cfg["QMC"]["offdiag"] != 0, \ "Complex does not make sense for diagonal Delta_tau!" if move_double: cfg["QMC"]["Percentage4OperatorMove"] = 0.005 else: cfg["QMC"]["Percentage4OperatorMove"] = 0 cfg["QMC"]["PercentageGlobalMove"] = move_global_prob if flavourchange_moves: cfg["QMC"]["flavourchange_moves"] = 1 else: cfg["QMC"]["flavourchange_moves"] = 0 os.remove(cfg_file.name) # remove temp file with input parameters ### I now write the triqs parameters into the cfg file; ### we may later do this with dictionaries ### in a more sophisticated way cfg["General"]["beta"] = self.beta cfg["QMC"]["Niw"] = self.n_iw cfg["QMC"][ "Ntau"] = self.n_tau * 2 # use double resolution bins & down sample to Triqs l8r if measure_G_l: cfg["QMC"]["NLegMax"] = self.n_l cfg["QMC"]["NLegOrder"] = self.n_l else: cfg["QMC"]["NLegMax"] = 1 cfg["QMC"]["NLegOrder"] = 1 cfg["QMC"]["Nwarmups"] = length_cycle * n_warmup_cycles cfg["QMC"]["Nmeas"] = n_cycles cfg["QMC"]["measurement_time"] = max_time cfg["QMC"]["Ncorr"] = length_cycle if statesampling: cfg["QMC"]["statesampling"] = 1 else: cfg["QMC"]["statesampling"] = 0 if worm: cfg["QMC"]["WormMeasGiw"] = 1 cfg["QMC"]["WormMeasGtau"] = 1 cfg["QMC"]["WormSearchEta"] = 1 ### set worm parameters to some default values if not set by user if percentageworminsert != 0.0: cfg["QMC"]["PercentageWormInsert"] = percentageworminsert else: cfg["QMC"]["PercentageWormInsert"] = 0.2 if percentagewormreplace != 0.0: cfg["QMC"]["PercentageWormReplace"] = percentagewormreplace else: cfg["QMC"]["PercentageWormReplace"] = 0.2 if mpi.rank == 0: print(' ') print('specifications for w2dyn:') print('cfg["QMC"]["offdiag"] ', cfg["QMC"]["offdiag"]) print('cfg["QMC"]["Percentage4OperatorMove"] ', cfg["QMC"]["Percentage4OperatorMove"]) print('cfg["QMC"]["flavourchange_moves"] ', cfg["QMC"]["flavourchange_moves"]) print('cfg["QMC"]["statesampling"] ', cfg["QMC"]["statesampling"]) ### initialize the solver; it needs the config-string Nseed = random_seed + mpi.rank use_mpi = False mpi_comm = mpi.world solver = impurity.CtHybSolver(cfg, Nseed, 0, 0, 0, False, mpi_comm) ### generate dummy input that we don't necessarily need niw = 2 * cfg["QMC"]["Niw"] g0inviw = np.zeros(shape=(2 * self.n_iw, norb, 2, norb, 2)) fiw = np.zeros(shape=(2 * self.n_iw, norb, 2, norb, 2)) fmom = np.zeros(shape=(2, norb, 2, norb, 2)) symmetry_moves = () paramag = False atom = config.atomlist_from_cfg(cfg, norb)[0] ### if calculation not complex, the solver must have only ### real arrays as input if self.complex: muimp = t_osos_tensor else: g0inviw = np.real(g0inviw) fiw = np.real(fiw) fmom = np.real(fmom) ftau = np.real(ftau) muimp = np.real(t_osos_tensor) U_ijkl = np.real(U_ijkl) ### here the properties of the impurity will be defined imp_problem = impurity.ImpurityProblem(self.beta, g0inviw, fiw, fmom, ftau, muimp, atom.dd_int, None, None, symmetry_moves, paramag) print("\n" + "." * 40) ### hardcode the set of conserved quantities to number of electrons ### and activate the automatic minimalisation procedure of blocks ### ( QN "All" does this) #print "imp_problem.interaction.quantum_numbers ", imp_problem.interaction.quantum_numbers imp_problem.interaction.quantum_numbers = ("Nt", "All") #imp_problem.interaction.quantum_numbers = ( "Nt", "Szt", "Qzt" ) #imp_problem.interaction.quantum_numbers = ( "Nt", "Szt" ) #imp_problem.interaction.quantum_numbers = ( "Nt" ) ### feed impurity problem into solver solver.set_problem(imp_problem) ### solve impurity problem mccfgcontainer = [] iter_no = 1 if self.complex: solver.set_problem(imp_problem) solver.umatrix = U_ijkl result = solver.solve(mccfgcontainer) gtau = result.other["gtau-full"] else: if not worm: solver.set_problem(imp_problem) solver.umatrix = U_ijkl result = solver.solve(iter_no, mccfgcontainer) gtau = result.other["gtau-full"] else: gtau = np.zeros(shape=(1, norb, 2, norb, 2, 2 * self.n_tau)) from auxiliaries.compoundIndex import index2component_general components = [] for comp_ind in range(1, (2 * norb)**2 + 1): tmp = index2component_general(norb, 2, int(comp_ind)) ### check if ftau is nonzero bands = tmp[1] spins = tmp[2] b1 = bands[0] b2 = bands[1] s1 = spins[0] s2 = spins[1] all_zeros = not np.any(ftau[:, b1, s1, b2, s2] > 1e-5) if not all_zeros: components = np.append(components, comp_ind) if mpi.rank == 0: print('worm components to measure: ', components) ### divide either max_time Nmeas among the nonzero components if max_time <= 0: cfg["QMC"]["Nmeas"] = int(cfg["QMC"]["Nmeas"] / float(len(components))) else: cfg["QMC"]["measurement_time"] = int( float(max_time) / float(len(components))) for comp_ind in components: if mpi.rank == 0: print('--> comp_ind', comp_ind) solver.set_problem(imp_problem) solver.umatrix = U_ijkl result_aux, result = solver.solve_component( 1, 2, comp_ind, mccfgcontainer) for i in list(result.other.keys()): if "gtau-worm" in i: gtau_name = i tmp = index2component_general(norb, 2, int(comp_ind)) ### check if ftau is nonzero bands = tmp[1] spins = tmp[2] b1 = bands[0] b2 = bands[1] s1 = spins[0] s2 = spins[1] # Remove axis 0 from local samples by averaging, so # no data remains unused even if there is more than # one local sample (should not happen) gtau[0, b1, s1, b2, s2, :] = np.mean(result.other[gtau_name].local, axis=0) gtau = stat.DistributedSample(gtau, mpi_comm, ntotal=mpi.size) if cfg["QMC"]["offdiag"] == 0 and worm == 0: def bs_diagflat(bs_array): """Return an array with a shape extended compared to that of the argument by doubling the first two axes and fill it such that the returned array is diagonal with respect to both pairs (axes 1 and 3 and axes 2 and 4). """ shape_bsonly = bs_array.shape[0:2] bsbs_shape = shape_bsonly + bs_array.shape bsbs_array = np.zeros(bsbs_shape, dtype=bs_array.dtype) for b in range(bs_array.shape[0]): for s in range(bs_array.shape[1]): bsbs_array[b, s, b, s, ...] = bs_array[b, s, ...] return bsbs_array gtau = result.other["gtau"].apply(bs_diagflat) ### here comes the function for conversion w2dyn --> triqs self.G_tau, self.G_tau_error = w2dyn_ndarray_to_triqs_BlockGF_tau_beta_ntau( gtau, self.beta, self.gf_struct) self.G_iw = BlockGf(mesh=self.iw_mesh, gf_struct=self.gf_struct) ### I will use the FFT from triqs here... for name, g in self.G_tau: bl_size = g.target_shape[0] known_moments = np.zeros((4, bl_size, bl_size), dtype=np.complex) for i in range(bl_size): known_moments[1, i, i] = 1 self.G_iw[name].set_from_fourier(g, known_moments) ### add perturbation order as observable #print 'measure_pert_order ', measure_pert_order if measure_pert_order: hist = result.other["hist"] #print 'hist.shape', hist.shape ### GF in Legendre expansion if measure_G_l: Gl = result.other["gleg-full"]
def _get_ucg_matrix(squs): return block_diag(*squs)
def test_no_args(self): a = block_diag() assert_equal(a.ndim, 2) assert_equal(a.nbytes, 0)
def COV_C(A, B, VM): block1 = np.array([1, A, -B]) AJLA = linalg.block_diag(*[block1 for i in range(N)]) return np.dot(AJLA, np.dot(COVd, AJLA.transpose())) + np.eye(N) * VM
def predict(_x, u, _cov): """ process model: x_k = f(x_{k-1},u_k) = x_{k-1} + u = x_{k-1} + v*dt u dalam kerangka acuan robot: u = | v_x*dt | | v_y*dt | | v_th*dt | u dalam kerangka acuan global: u' = Ru dengan R adalah matriks rotasi R = | cos(th) -sin(th) 0 | | sin(th) cos(th) 0 | | 0 0 1 | u' = | cos(th) -sin(th) 0 | | v_x*dt | | sin(ht) cos(th) 0 | | v_y*dt | | 0 0 1 | | v_th*dt | = | v_x*cos(th) - v_y*sin(th) | | v_x*sin(th) + v_y*cos(th) | * dt | v_th | x_k = |x_{k-1}| | v_x*cos(t) - v_y*sin(t) | |y_{k-1}| + | v_x*sin(t) + v_y*cos(t) | * dt |t_{k-1}| | v_th | Linearisasi: x_k = |x_{k-1} | | 1 0 -v_x*dt*sin(theta) - v_y*dt*cos(theta) | | dx| |y_{k-1} | + | 0 1 v_x*dt*cos(theta) - v_y*dt*sin(theta) | | dy| |th_{k-1}| | 0 0 1 | |dth| """ n = int((_cov.shape[0] - 3) / 2) # num of landmarks dt = 1 [vx, vy, vth] = u th = _x[2] _Jf = np.array([[1, 0, -vx * dt * math.sin(th) - vy * dt * math.cos(th)], [0, 1, vx * dt * math.cos(th) - vy * dt * math.sin(th)], [0, 0, 1]]) _Jf = np.column_stack([_Jf, np.zeros((3, n * 2))]) Jf_last_rows = np.column_stack([np.zeros((n * 2, 3)), np.eye(n * 2)]) Jf = np.vstack([_Jf, Jf_last_rows]) dx = np.array([vx * dt, vy * dt, vth * dt]) dx = np.concatenate((dx, np.zeros(n * 2))) noise = np.random.multivariate_normal(np.zeros(3), Q) noise = np.concatenate((noise, np.zeros(n * 2))) x = _x + np.dot(Jf, dx) + noise x = np.concatenate((x, _x[5:])) cov = np.dot(Jf, np.dot(_cov, Jf.T)) + block_diag(Q, np.zeros((n * 2, n * 2))) return [x, cov]