def GaussSeidel(A, b, guess, MAXITER, TOLL): n = len(b) xk = guess D = sparse.diags( A.diagonal(), 0, format='csr', ) L = sparse.tril(A, format='csr') U = sparse.triu(A, format='csr') T = -(linalg.inv(D + L)) * U c = (linalg.inv(D + L)) * b i = 0 err = TOLL + 1 while i < MAXITER: x = T * xk + c err = np.linalg.norm(x - xk, 1) / np.linalg.norm(x, 1) xk = x i += 1 if i % 10 == 0: print(i, err) if err < TOLL: print("Converged at iteration:", i) break return xk
def __init__(self, topo, quad, topo_q, lx, g, H): self.topo = topo self.quad = quad self.topo_q = topo_q # topology for the quadrature points as 0 forms self.g = g self.H = H det = 0.5 * lx / topo.nx self.detInv = 1.0 / det # 1 form matrix inverse self.M1 = Pmat(topo, quad).M self.M1inv = la.inv(self.M1) # 0 form matrix inverse self.M0 = Umat(topo, quad).M self.M0inv = la.inv(self.M0) # 1 form gradient matrix self.D10 = BoundaryMat(topo).M self.D01 = self.D10.transpose() # 0 form to 1 from gradient operator self.A = self.detInv * self.D10 # 1 form to 0 form gradient operator self.B = -1.0 * self.detInv * self.M0inv * self.D01 * self.M1
def solveAdjointNonlinearHeat1D( model, theta=0.5, initialAdjointSolution=None ): ''' ''' # Allocate timeSeries = np.zeros( (len(model.time), model.size) ) # Define helper functions def updateTime( timeIndex ): model.updateTime( timeIndex ) # Initialize if initialAdjointSolution is not None: timeSeries[-1] = initialAdjointSolution.copy() updateTime(len(model.time)-1) massInverse = linalg.inv(model.mass) previousState = massInverse.dot( (model.stiffness+model.nonsymmetricStiffness).dot(timeSeries[-1]) + model.load ) # Loop for i in range( len(model.time)-1, 0, -1 ): updateTime( i-1 ) dt = model.time[i] - model.time[i-1] massInverse = linalg.inv(model.mass) timeSeries[i-1] = solveLinearSystem( 1.0/dt*np.identity(model.size) \ + theta*massInverse.dot( model.stiffness + model.nonsymmetricStiffness ), 1.0/dt*timeSeries[i] \ - theta*massInverse.dot(model.load) - (1.0-theta)*previousState, sparse=False ) # Update for next iteration previousState = massInverse.dot( (model.stiffness+model.nonsymmetricStiffness).dot(timeSeries[i-1]) + model.load ) return timeSeries
def __init__(self, topo, quad, X, dX, dt, grav): self.topo = topo self.quad = quad self.X = X self.dX = dX self.dt = dt self.grav = grav self.linear = False self.topo_q = Topo(topo.nx, quad.n) # 1 form matrix inverse self.M1 = Pmat(topo, quad, dX).M self.M1inv = la.inv(self.M1) # 0 form matrix inverse self.M0 = Umat(topo, quad, dX).M self.M0inv = la.inv(self.M0) # 1 form gradient matrix self.E10 = BoundaryMat(topo).M self.E01 = -1.0 * self.E10.transpose() self.PtQ = PtQmat(topo, quad, dX).M self.PtQT = PtQmat(topo, quad, dX).M.transpose() self.UtQ = UtQmat(topo, quad, dX).M self.UtQT = UtQmat(topo, quad, dX).M.transpose() # helmholtz operator GRAD = 0.5 * self.grav * self.dt * self.E01 * self.M1 DIV = 0.5 * self.dt * self.M1 * self.E10 HELM = self.M1 - DIV * self.M0inv * GRAD self.HELMinv = la.inv(HELM) self.DM0inv = DIV self.M0invG = self.M0inv * GRAD
def solve_lu(pr, l, u, pc, b): """ Solves the linear system Ax = b via forward and backward substitution given the decomposition pr * A * pc = l * u. Parameters ---------- pr : scipy.sparse.csr_matrix row permutation matrix of LU-decomposition l : scipy.sparse.csr_matrix lower triangular unit diagonal matrix of LU-decomposition u : scipy.sparse.csr_matrix upper triangular matrix of LU-decomposition pc : scipy.sparse.csr_matrix column permutation matrix of LU-decomposition b : numpy.ndarray vector of the right-hand-side of the linear system Returns ------- x : numpy.ndarray solution of the linear system """ pr = scipy.sparse.csc_matrix(pr) pc = scipy.sparse.csc_matrix(pc) inverse_pr = sla.inv(pr) inverse_pc = sla.inv(pc) y_1 = sla.spsolve(inverse_pr, b) y_2 = sla.spsolve_triangular(l, y_1) y_3 = sla.spsolve_triangular(u, y_2, lower=False) solution = sla.spsolve(inverse_pc, y_3) return solution
def __motion_update_sparse(self, command, U): r = self.robotFeaturesDim previousMeanState = self.estimate() meanStateChange = self.motionModel.exact_move(previousMeanState, command) newMeanState = clipState(previousMeanState + meanStateChange) # TO IMPROVE angle = previousMeanState[2, 0] # TO IMPROVE gradMeanMotion = np.zeros_like(self.H) # TO IMPROVE gradMeanMotion[2, 0:2] = command[0] * np.array( [-math.sin(angle), math.cos(angle)]) # TO IMPROVE Sx = sparse.bsr_matrix(self.Sx) sH = sparse.bsr_matrix(self.H) invU = sparse.coo_matrix(inv(U)) sparseGradMeanMotion = sparse.bsr_matrix(gradMeanMotion) delta = Sx.T.dot(sparseGradMeanMotion).dot(Sx) G = Sx.dot(linalg.inv(sparse.eye(r) + delta) - sparse.eye(r)).dot(Sx.T) phi = sparse.eye(self.dimension) + G Hp = phi.T.dot(sH).dot(phi) deltaH = Hp.dot(Sx).dot(linalg.inv(invU + Sx.T.dot(Hp).dot(Sx))).dot( Sx.T).dot(Hp) # H = inv(Hp + dots(self.Sx, U, self.Sx.T)) H = Hp - deltaH # self.b = self.b - dot(previousMeanState.T, self.H - H) + dot(meanStateChange.T, H) self.H = H.todense() self.b = H.dot(newMeanState).T self.mu = newMeanState
def _eval_error(self, ls, noise_type): """ Evaluates the error (AtN^-1A)^-1 Parameters ---------- ls: instance of linsolve instance of linsolve solver containing the linear system of equations. noise_type: string Type of noise or error, can be 'uncorr', 'partial' or 'uncorr'. 'uncorr' assumes the the error measurements associated with the measurements are uncorrelated with each other. 'partial' assumes that the error measurements associated with the mesurements are correlated with adjacent measurements. 'corr' considers correlation between all the flux measurements. Default is 'uncorr'. """ A = self.get_A(ls) An = (A - np.min(A))/(np.max(A) - np.min(A)) An_sparse = csc_matrix(A[:, :, 0]) N = self.get_noise_matrix(noise_type=noise_type) Ninv = csc_matrix(Ninv) N_sparse = csc_matrix(N) Ninv = inv(N_sparse) At = An_sparse.T AtNi = At.dot(Ninv) AtNiA = AtNi.dot(An_sparse) AtNiAi = inv(AtNiA).todense() return AtNiAi
def solve_lu(pr, l, u, pc, b): """ Solves the linear system Ax = b via forward and backward substitution given the decomposition pr * A * pc = l * u. => A = pr^-1 * l * u * pc^-1 Parameters ---------- pr : scipy.sparse.csr_matrix row permutation matrix of LU-decomposition l : scipy.sparse.csr_matrix lower triangular unit diagonal matrix of LU-decomposition u : scipy.sparse.csr_matrix upper triangular matrix of LU-decomposition pc : scipy.sparse.csr_matrix column permutation matrix of LU-decomposition b : numpy.ndarray vector of the right-hand-side of the linear system Returns ------- x : numpy.ndarray solution of the linear system """ _ = slina.spsolve(sm.csc_matrix(slina.inv(sm.csc_matrix(pr))), b) _ = slina.spsolve_triangular(l, _, lower=True) _ = slina.spsolve_triangular(u, _, lower=False) _ = slina.spsolve(sm.csc_matrix(slina.inv(sm.csc_matrix(pc))), _) return _
def solve_ln(spss, A, b, x, dtype): spss.set_verbose(1) A = A.astype(dtype) b = b.astype(dtype) x = x.astype(dtype) if np.iscomplexobj(A): A = A + 0.1 * 1j * A b = b - 0.1 * 1j * b spss.set_csr_matrix(A) spss.solve(b, x, 0) #print(list(x)) #print(np.array(np.dot(inv(A.tocsc()).todense(), b)).flatten()) err = (np.array(np.dot(inv(A.tocsc()).todense(), b)).flatten() - x) th = 1e-7 if dtype == np.float32 or dtype == np.complex64 else 1e-16 print("x: ", list(x)) print("x (ref):", list(np.array(np.dot(inv(A.tocsc()).todense(), b)).flatten())) print("err:", np.abs(err)) if np.max(np.abs(err)) > th: assert False, "error is too large" else: print("max err:", np.max(np.abs(err)))
def MCS_scheme(m, N, U_0, delta_t, theta, A, A_0, A_1, A_2, b, b_0, b_1, b_2, r_f): start = datetime.now() U = U_0 I = np.identity(m) lhs_1 = csc_matrix(I - theta * delta_t * A_1) inv_lhs_1 = inv(lhs_1) lhs_2 = csc_matrix(I - theta * delta_t * A_2) inv_lhs_2 = inv(lhs_2) for n in range(0, N): Y_0 = U + delta_t * F(n - 1, U, A, b, r_f, delta_t) rhs_1 = Y_0 + theta * delta_t * (b_1 * exp(r_f * delta_t * n) - F_1( n - 1, U, A_1, b_1, r_f, delta_t)) #we update b_1 Y_1 = inv_lhs_1 * rhs_1 rhs_2 = Y_1 + theta * delta_t * (b_2 * exp(r_f * delta_t * n) - F_2( n - 1, U, A_2, b_2, r_f, delta_t)) #we update b_2 Y_2 = inv_lhs_2 * rhs_2 Y_0_hat = Y_0 + theta * delta_t * ( F_0(n, Y_2, A_0, b_0, r_f, delta_t) - F_0(n - 1, U, A_0, b_0, r_f, delta_t)) Y_0_tilde = Y_0_hat + (0.5 - theta) * delta_t * ( F(n, Y_2, A, b, r_f, delta_t) - F(n - 1, U, A, b, r_f, delta_t)) rhs_1 = Y_0_tilde + theta * delta_t * ( b_1 * exp(r_f * delta_t * n) - F_1(n - 1, U, A_1, b_1, r_f, delta_t)) #we update b_1 Y_1_tilde = inv_lhs_1 * rhs_1 rhs_2 = Y_1_tilde + theta * delta_t * ( b_2 * exp(r_f * delta_t * n) - F_2(n - 1, U, A_2, b_2, r_f, delta_t)) #we update b_2 U = inv_lhs_2 * rhs_2 end = datetime.now() time = (end - start).total_seconds() return U, time
def __sparsification(self): x = np.arange(self.robotFeaturesDim) y0, yp = self.__partition_links() Sx = sparse.coo_matrix(self.__build_projection_matrix(x)) Sy0 = sparse.coo_matrix(self.__build_projection_matrix(y0)) Sxy0 = sparse.coo_matrix( self.__build_projection_matrix(np.concatenate((x, y0)))) Sxyp = sparse.coo_matrix( self.__build_projection_matrix(np.concatenate((x, yp)))) Sxy0yp = sparse.coo_matrix( self.__build_projection_matrix(np.concatenate((x, y0, yp)))) H = sparse.bsr_matrix(self.H) Hp = Sxy0yp.dot(Sxy0yp.T).dot(H).dot(Sxy0yp).dot(Sxy0yp.T) Ht = H - (0 if not y0.size else Hp.dot(Sy0).dot(linalg.inv(Sy0.T.dot(Hp).dot(Sy0))).dot(Sy0.T).dot(Hp)) \ + Hp.dot(Sxy0).dot(linalg.inv(Sxy0.T.dot(Hp).dot(Sxy0))).dot(Sxy0.T).dot(Hp) \ - H.dot(Sx).dot(linalg.inv(Sx.T.dot(H).dot(Sx))).dot(Sx.T).dot(H) eps = 1e-5 Htt = Ht.todense() Htt[np.abs(Htt) < eps] = 0 bt = self.b + (Ht - H).dot(self.mu) self.H = Htt self.b = bt
def test_SBPProterty(): #This routine will check that the finite difference #operators and the inner product matrices satisfy the #SBP property at least when N = 4 N = 4 dx = 2 / N Dp = ConstructDp(dx, N) Dm = ConstructDm(dx, N) Ppinv, Pminv = ConstructPpminv(dx, N) Pp = inv(Ppinv) Pm = inv(Pminv) ## Pm = csr_matrix([[0,0,0,0,0,0],\ ## [0,1,0,0,0,0],\ ## [0,0,1,0,0,0],\ ## [0,0,0,1,0,0],\ ## [0,0,0,0,1,0],\ ## [0,0,0,0,0,0]])*dx ## Qp = Pp.dot(Dp) Qm = Pm.dot(Dm) Q = Qp + Qm.transpose() FAlist = [-3, -2, -1, 0, 1, 2, 3] LAlist = [-2, -1, 0, 1, 2, 3, -3] FBlist = [-6, -4, -2, 0, 2, 4, 6] LBlist = [6, -6, -4, -2, 0, 2, 4] for i in range(6): FA = FAlist[i] FB = FBlist[i] LA = LAlist[i] LB = LBlist[i] A = np.array([FA, 0, 0, 0, LA]) B = np.array([FB, 0, 0, 0, 0, LB]) #A = np.array([FA,0,0,0,0,0,0,0,LA]) #B = np.array([FB,0,0,0,0,0,0,0,0,LB]) err = abs(LB * LA - FB * FA - A.dot(Q.dot(B))) assert err < 0.0001 for i in range(6): FA = FAlist[i] FB = FBlist[i] LA = LAlist[i] LB = LBlist[i] A = np.array([FA, 6, 8, 2, LA]) B = np.array([FB, -3, -9, 2, 3, LB]) #A = np.array([FA,6,8,2,2,2,2,2,LA]) #B = np.array([FB,-3,-9,2,3,3,5,7,9,LB]) err = abs(LB * LA - FB * FA - A.dot(Q.dot(B))) assert err < 0.0001
def init_point(self, A_w, A_z, Q_w, Q_z, c_w, c_z, b, u): y_ = inv(A_z.dot(A_z.T)).dot(2 * b - A_z.dot(u)) z = (A_z.T.dot(y_) + u) / 2 y = inv(A_z.dot(A_z.T)).dot(A_z).dot(c_z) s = 1 / 2 * (A_z.T.dot(y) - c_z) w = np.zeros(A_w.shape[1]) v = -s v = np.clip(v, 1e-5, None) s = np.clip(s, 1e-5, None) z = np.clip(z, 1e-5, u - 1e-5) return w, z, y, s, v
def __init__(self, sess, n, filename, jump_prob=0.05, drop_tol=1e-8, verbose=False): """ Computes PPR using LU decomposition. Args: sess (Session): tensorflow session. n (int): Number of nodes. filename (str): A csv file denoting the graph. jump_prob (float): Jumping probability of PPR. drop_tol (float): Drops entries with absolute value lower than this value when computing inverse of LU. verbose (bool): Prints step messages if True. """ self.alias = 'ludc' self.verbose = verbose self.pp("initializing") self.sess = sess self.n = n self.c = jump_prob d = 1 - self.c t = drop_tol exact = False if t is None: t = np.power(n, -0.5) elif t == 0: exact = True self.pp("reading") self.node2index, H = read_matrix(filename, d=-d, add_identity=True) self.pp("sorting H") self.perm = degree_reverse_rank_perm(H) H = reorder_matrix(H, self.perm).tocsc() self.pp("computing LU decomposition") if exact: self.LU = splu(H) else: self.LU = spilu(H, drop_tol=t) Linv = inv(self.LU.L).tocoo() Uinv = inv(self.LU.U).tocoo() self.pp("tf init") with tf.variable_scope('ppr_lu_decomposition_tf'): t_Linv = tf.SparseTensorValue(list(zip(Linv.row, Linv.col)), Linv.data, dense_shape=self.LU.L.shape) t_Uinv = tf.SparseTensorValue(list(zip(Uinv.row, Uinv.col)), Uinv.data, dense_shape=self.LU.U.shape) self.t_q = tf.placeholder(tf.float64, shape=[self.n, 1]) self.t_r = _sdmm(t_Uinv, _sdmm(t_Linv, self.c * self.t_q))
def fit(self, R, hidden_size=100, iteration=5): from scipy.sparse.linalg import inv # data(R) must be csr_matrix format n by m self.R = R left_size, right_size = R.shape # init self.X = csr_matrix(np.random.normal(size=(left_size, hidden_size))) self.Y = csr_matrix(np.random.normal(size=(right_size, hidden_size))) # confidence level self.C = csr_matrix(R.copy() * self.alpha + np.array([1] * (left_size * right_size)).reshape( (left_size, right_size))) # cost cost = self.cost_function() print('cost: ' + str(cost)) # iterate for iterate in range(iteration): print('iter: ' + str(iterate)) print('User Matrix') for u, Cu in enumerate(self.C): C_diag = diags(Cu.toarray()[0], shape=[right_size, right_size]) self.X[u] = inv( self.Y.T.dot(C_diag).dot(self.Y) + eye(hidden_size) * self.reg).dot(self.Y.T).dot(C_diag).dot( R[u].T).T self.progress(u, left_size) print('\nMovie Matrix') for i, Ci in enumerate(self.C.T): C_diag = diags(Ci.toarray()[0], shape=[left_size, left_size]) self.Y[i] = inv( self.X.T.dot(C_diag).dot(self.X) + eye(hidden_size) * self.reg).dot(self.X.T).dot(C_diag).dot( R.T[i].T).T self.progress(i, right_size) self.cost = self.cost_function() print('\ncost: ' + str(self.cost)) # save prediciton matrix self.prediction = self.X.dot(self.Y.T) return self.cost
def __init__(self,topo,quad): self.shift = topo.nx*topo.ny*topo.n*topo.n Uxx = UxxMat(topo,quad).M Uxy = UxyMat(topo,quad).M Uyx = UyxMat(topo,quad).M Uyy = UyyMat(topo,quad).M UxxInv = la.inv(Uxx) UyyInv = la.inv(Uyy) # U tangent to normal transformation matrices self.Uxtn = UxxInv*Uxy self.Uytn = UyyInv*Uyx
def multistepFDM_cg(vertices, edges, fixed, q, fcs=[], lcs=[], l0cs=[], steps=250, lin_solver=cg, i_tol=1e-5, callbacks=no_callbacks, cond_num=False): v = len(vertices) q0 = copy(q) ndof, tdof = table_of_nodal_DsOF(v, fixed) mD, mDf = mDmDf(ndof, tdof, vertices, edges, q0) if cond_num == True: print('01', ' max =', mD.max(), ' nrm1 =', sm_norm(inv(mD), 1), ' nrmF =', sm_norm(inv(mD)), ' cn =', cond(mD.A, 1)) x0 = zero_m(mDf.shape) cc = __solve_lin_syst_it(mD, -mDf, x0, lin_solver, i_tol, None, callbacks) xyz = table_of_nodal_coordinates(cc, vertices, tdof) l = list_of_element_lengths(edges, xyz) f = list_of_element_forces(l, q0) for i in xrange(2, steps + 1): for fj in fcs: q0[fj[0]] = fj[1] / l[fj[0]] for lj in lcs: q0[lj[0]] = f[lj[0]] / lj[1] for l0j in l0cs: i0 = l0j[0] l0 = l0j[1][0] ae0 = l0j[1][1] ffj = f[i0] ll = (ae0 + ffj) * l0 / ae0 q0[i0] = ffj / ll mD, mDf = mDmDf(ndof, tdof, vertices, edges, q0) if i % 10 == 1: print(i, ' max =', mD.max(), ' nrm1 =', sm_norm(inv(mD), 1), ' nrmF =', sm_norm(inv(mD)), ' cn =', cond(mD.A, 1)) cc = __solve_lin_syst_it(mD, -mDf, x0, lin_solver, i_tol, None, callbacks) xyz = table_of_nodal_coordinates(cc, vertices, tdof) l = list_of_element_lengths(edges, xyz) f = list_of_element_forces(l, q0) return (xyz, f, q0)
def findPilot(Ybus, V, gIdx, lIdx, cll, nPilot = 1): dS_dVm, dS_dVa = dSbus_dV(Ybus, V) S = dS_dVm.imag Sgg = S[np.ix_(gIdx, gIdx)] Sgl = S[np.ix_(gIdx, lIdx)] Slg = S[np.ix_(lIdx, gIdx)] Sll = S[np.ix_(lIdx, lIdx)] # Compute elements quantity nG, nL = Sgl.shape # Pilot node selection M = inv(Sll) #(3) B = -1 * inv(Sll) * Slg #(4) Qx = eye(nL) pIdx = [] pOption = list(range(nL)) CLL = diags(cll) #Load disturbances are modelled by a vector of Gaussian random variables with means equal to 0 and a covariance matrix denominated CLL. PL = M * CLL * M.transpose() #Inicializar C = csc_matrix((0, nL)) #Crear matriz de dimension 0 x nL for i in range(nPilot): fitBest = 1e6 bestIdx = 0 for j in pOption: Crow = csc_matrix(([1], ([0], [j])), shape=(1, nL)) Ctmp = vstack([C, Crow], format='csc') CB = Ctmp * B F = (CB).transpose() * inv(CB *CB.transpose()) F = csc_matrix(F).reshape(nG, i+1) X = (eye(nL) - B * F * Ctmp) I = np.trace(( PL * X.transpose() * Qx * X).toarray()) if I < fitBest: Cbest = 1 * Ctmp fitBest = 1 * I bestIdx = 1 * j C = 1 * Cbest pOption.remove(bestIdx) pIdx.append(bestIdx) pilotBus = [lIdx[i] for i in pIdx] return pilotBus
def __init__(self, topo, quad, topo_q, lx, ly, f, g, apvm, hb): self.topo = topo self.quad = quad self.topo_q = topo_q # topology for the quadrature points as 0 forms self.g = g self.apvm = apvm det = 0.5 * lx / topo.nx self.detInv = 1.0 / det self.hb = hb # 1 form matrix inverse M1 = Umat(topo, quad).M self.M1inv = la.inv(M1) # 2 form matrix inverse M2 = Wmat(topo, quad).M self.M2inv = la.inv(M2) # 0 form matrix inverse self.M0 = Pmat(topo, quad).M M0inv = la.inv(self.M0) D10 = BoundaryMat10(topo).M D01 = D10.transpose() self.D01M1 = self.detInv * D01 * M1 self.D10 = self.detInv * D10 self.M0inv = M0inv # 2 form gradient matrix self.D21 = BoundaryMat(topo).M self.D12 = -1.0 * self.D21.transpose() self.D12M2 = self.detInv * self.D12 * M2 # 0 form coriolis vector Mxto0 = Xto0(topo, quad).M fx = f * np.ones( (topo_q.n * topo_q.n * topo.nx * topo.ny), dtype=np.float64) self.f = Mxto0 * fx self.M0f = self.M0 * self.f self.Uh = Uhmat(topo, quad) self.WU = WtQUmat(topo, quad) self.PU = PtQUmat(topo, quad) self.Rq = RotationalMat(topo, quad) self.q = np.zeros((self.M0f.shape[0]), dtype=np.float64) self.hvec = Phvec(topo, quad)
def get_iteration_matrix_v_cycle(problem_list, pre_smoother_list, post_smoother_list, transfer_list, nu1=1, nu2=1): """ Uses the attached smoothers and transferclasses to compute the iteration matrix recursively :return: a iteration matrix and the preconditioner """ if len(problem_list) == 1: return problem_list[0].A - problem_list[0].A, spla.inv(problem_list[0].A) else: N = problem_list[0].ndofs I = sp.eye(N, format='csc') Pinv_c = get_iteration_matrix_v_cycle(problem_list[1:], pre_smoother_list[1:], post_smoother_list[1:], transfer_list[1:])[1] CG_P_inv = transfer_list[0].I_2htoh.dot(Pinv_c.dot(transfer_list[0].I_hto2h)) CG_correction = I - CG_P_inv.dot(problem_list[0].A) if nu1 == 0: pre_smooth = I else: pre_smooth = combine_N_P_inv(problem_list[0].A, [pre_smoother_list[0].Pinv] * nu1) if nu2 == 0: post_smooth = I else: post_smooth = combine_N_P_inv(problem_list[0].A, [post_smoother_list[0].Pinv] * nu2) pre = I - pre_smooth.dot(problem_list[0].A) post = I - post_smooth.dot(problem_list[0].A) it_matrix = pre.dot(CG_correction.dot(post)) precond_inv = combine_N_P_inv(problem_list[0].A, [pre_smooth, CG_P_inv, post_smooth]) return it_matrix, precond_inv
def kernelized_optimize(self): # fact: H^T(HH^T + lambda*I_N) == (lambda*I_d + H^TH)H^T # instead of inverting a dxd matrix, we invert an nxn matrix. # So our ridge formula becomes: # (lambda*I_d + H^TH)^(-1)H^T = H^T(HH^T + lambdaI_N)^(-1) if self.sparse: piece_to_invert = self.X.dot( self.X.T) + sp.identity(self.N) * self.lam else: piece_to_invert = self.X.dot( self.X.T) + np.identity(self.N) * self.lam assert piece_to_invert.shape == (self.N, self.N) # yay! # invert this NxN matrix. if self.verbose: print("invert matrix:") print("time: {}".format(time.asctime(time.localtime(time.time())))) if self.sparse: inverted_piece = splin.inv(piece_to_invert) else: inverted_piece = np.linalg.inv(piece_to_invert) if self.verbose: print("done inverting via kernel trick at time: {}".format( time.asctime(time.localtime(time.time())))) # dot with H^T.dot(y) if self.verbose: print("dot with H^T at time: {}".format( time.asctime(time.localtime(time.time())))) self.W = self.X.T.dot(inverted_piece).dot(self.Y) if self.verbose: print("done dotting with H^T at time: {}".format( time.asctime(time.localtime(time.time())))) assert self.W.shape == (self.d, self.C)
def run_solver( faces_rel_matrix, incidence_matrix, faces_fmm_matrix, faces_ID_deleted_list, faces_list, faces_deleted_list ): from main.RNM_Solver import Solver rows, cols = faces_rel_matrix.shape if isinstance(faces_rel_matrix, sparse.csr_matrix): admitance_matrix = splinalg.inv(faces_rel_matrix) else: admitance_matrix = np.linalg.inv(faces_rel_matrix) flux = Solver.SolveMagneticCircuit(incidence_matrix, admitance_matrix, faces_fmm_matrix) # flux = flux.tocsr() # Organize the flux number_complete_flux = len(faces_ID_deleted_list) + flux.shape[0] complete_flux = np.zeros((number_complete_flux, 1)) counter = 0 counter_face = 0 for each in xrange(number_complete_flux): if not each in faces_ID_deleted_list: complete_flux[each, 0] = flux[counter, 0] counter += 1 else: faces_list.insert(each, faces_deleted_list[counter_face]) counter_face += 1 return complete_flux
def global_katz(node1, node2, adj_matrix, beta=0.5, depth=2): """ This index is based on the ensemble of all paths, which directly sums over the collection of paths and is exponentially damped by length to give the shorter paths more weights. :param node1: :param node2: :param adj_matrix: :param beta: beta coefficient in Katz similarity :param depth: how many times to iterate :return: """ # 1st way # coef = beta # matrix = adj_matrix # result = coef * matrix[node1, node2] # np.dot(adj_matrix, adj_matrix) # for i in range(1, depth): # coef *= beta # matrix = np.dot(matrix, adj_matrix) # result += coef * matrix[node1, node2] # return result # 2nd way I = csr_matrix(np.eye(adj_matrix.shape[0])) try: katz = inv(I - adj_matrix * beta) - I print("Katz is successfully calculated with matrix.shape =", adj_matrix.shape) return katz[node1, node2] except: print(adj_matrix) print("Quitting from Katz when matrix shape is", adj_matrix.shape) exit()
def filter(self,m,M): """ Predicts the estimates and updates them with the observation. Parameters ------- m : array The measured packet counts M : matrix The measurement matrix """ x, Q, P, F, e = (self.x, self.Q, self.P, self.F, self.e) # Predict x = F.dot(x) P = ((F.dot(P)).dot(F.T)) + Q # Update y = m - (M.dot(x)) S = e**2*Is(M.shape[0]) + ((M.dot(P)).dot(M.T)) K = (P.dot(M.T)).dot(inv(S)) # Kalman gain x = x + (K.dot(y)) P = (Is(P.shape[0]) - (K.dot(M))).dot(P) P[P<0] = 0 # TODO: Must be tested! self.P, self.x = (P, x)
def learn_embedding(self): """ Apply HOPE embedding """ A = nx.to_scipy_sparse_matrix(self._graph, format='csc') I = identity(self._graph.number_of_nodes(), format='csc') M_g = I - -self._beta * A M_l = self._beta * A # A = nx.to_numpy_matrix(self._graph) # M_g = np.eye(len(self._graph.nodes())) - self._beta * A # M_l = self._beta * A # S = inv(M_g).dot(M_l) S = np.dot(inv(M_g), M_l) u, s, vt = svds(S, k=self._d // 2) X1 = np.dot(u, np.diag(np.sqrt(s))) X2 = np.dot(vt.T, np.diag(np.sqrt(s))) self._X = np.concatenate((X1, X2), axis=1) p_d_p_t = np.dot(u, np.dot(np.diag(s), vt)) eig_err = np.linalg.norm(p_d_p_t - S) print('SVD error (low rank): %f' % eig_err) # create dictionary of nodes nodes = list(self._graph.nodes()) projections = {} for i in range(len(nodes)): y = self._X[i] y = np.reshape(y, newshape=(1, self._d)) projections.update({nodes[i]: y[0]}) # X is the embedding matrix, S is the similarity, projections is the embedding dictionary return projections, S, self._X, X1, X2
def neumann_kernel(A, lambda_max, v_max, alpha=0.5): n = A.get_shape()[0] D_v = sp.diags([v_max], [0], shape=(n, n), format='csc') return csr_matrix( inv( sp.eye(n, format='csc') - ((alpha * D_v * csc_matrix(A) * D_v) / lambda_max)))
def solvePoissonPeriodic(dx, Ng, rho, kBT, tol, maxiter, phi0): phi = phi0 D = np.zeros((Ng + 1, Ng + 1)) A = laplacian1DPeriodic(Ng) resid = 1. k = 0 dx2 = dx * dx c0 = rho[(Ng + 1) / 2] / epsilon0 c1 = e / kBT c2 = rho / epsilon0 while (resid > tol) & (k <= maxiter): F = A.dot(phi) - dx2 * c0 * np.exp(c1 * (phi - phi[ (Ng + 1) / 2])) + dx2 * c2 np.fill_diagonal( D, -dx2 * c0 * c1 * np.exp(c1 * (phi - phi[(Ng + 1) / 2]))) J = spp.csc_matrix(A + D) dphi = sppla.inv(J).dot(F) phi = phi - dphi resid = la.norm(dphi) k += 1 #end while return phi
def absorbTime(self): P = self.getTransitionMatrix(probabilities=True) components,labels = csgraph.connected_components(P, directed=True, connection='strong',return_labels=True) if components == 1: print("no absorbing states") return transientStates = np.ones(P.shape[0],dtype=bool) for component in range(components): indices = np.where(labels==component)[0] n = len(indices) if n==1: probSum = P[indices,indices].sum() else: probSum = P[np.ix_(indices,indices)].sum() if np.isclose(probSum,n): transientStates[indices] = False indices = np.where(transientStates)[0] n = len(indices) if n==1: Q = P[indices,indices] else: Q = P[np.ix_(indices,indices)] #N will be dense N = inv(eye(n)-Q).A N2 = N*(2*N[np.arange(n),np.arange(n)]-np.eye(n))-np.power(N,2) t = np.zeros(P.shape[0]) t[indices] = np.sum(N,axis=1) for index in indices: print( self.mapping[index],t[index] )
def linrel(self, X_t, y_t, X, mu, c): """Linrel algorithm""" temp = X_t.T * X_t + mu*identity(X_t.shape[1]) temp = inv(temp) temp = X * temp * X_t.T score = (temp*y_t).toarray() + c/2 * numpy.linalg.norm(temp.toarray(), axis=1).reshape((temp.shape[0],1)) return score
def solvePoisson(dx, Ng, rho, kBT, tol, maxiter, phi0): phi = phi0 D = np.zeros((Ng, Ng)) A = laplacian1D(Ng) resid = 1. k = 0 dx2 = dx * dx c0 = rho[Ng / 2] / epsilon0 c1 = e / kBT c2 = rho / epsilon0 while (resid > tol) & (k <= maxiter): F = A.dot(phi) - dx2 * c0 * np.exp(c1 * (phi - phi[Ng / 2])) + dx2 * c2 F[0] = phi[0] F[-1] = phi[-1] np.fill_diagonal(D, -dx2 * c0 * c1 * np.exp(c1 * (phi - phi[Ng / 2]))) D[0, 0] = -dx2 * c0 * c1 D[-1, -1] = -dx2 * c0 * c1 J = spp.csc_matrix(A + D) dphi = sppla.inv(J).dot(F) phi = phi - dphi resid = la.norm(dphi) k += 1 #end while return phi
def jacobian_cond(m=None, scaled=True, ord=None, pinv=False, jac=None): """ Get the condition number of the scaled or unscaled Jacobian matrix of a model. Args: m: calculate the condition number of the Jacobian from this model. scaled: if True use scaled Jacobian, else use unscaled ord: norm order, None = Frobenius, see scipy.sparse.linalg.norm for more pinv: Use pseudoinverse, works for non-square matrixes jac: (optional) perviously calculated jacobian Returns: (float) Condition number """ if jac is None: jac, nlp = get_jacobian(m, scaled) jac = jac.tocsc() if jac.shape[0] != jac.shape[1] and not pinv: _log.warning("Nonsquare Jacobian using pseudo inverse") pinv = True if not pinv: jac_inv = spla.inv(jac) return spla.norm(jac, ord) * spla.norm(jac_inv, ord) else: jac_inv = la.pinv(jac.toarray()) return spla.norm(jac, ord) * la.norm(jac_inv, ord)
def test_mass_matrix_inverse(self): # get mesh mesh = get_2p1d_mesh_for_testing(ypts=5, zpts=5) spatial_methods = { "macroscale": pybamm.FiniteVolume(), "current collector": pybamm.ScikitFiniteElement(), } # create model a = pybamm.Variable("a", domain="negative electrode") b = pybamm.Variable("b", domain="current collector") model = pybamm.BaseModel() model.rhs = {a: pybamm.Laplacian(a), b: 4 * pybamm.Laplacian(b)} model.initial_conditions = {a: pybamm.Scalar(3), b: pybamm.Scalar(10)} model.boundary_conditions = { a: {"left": (0, "Neumann"), "right": (0, "Neumann")}, b: {"negative tab": (0, "Neumann"), "positive tab": (0, "Neumann")}, } model.variables = {"a": a, "b": b} # create discretisation disc = pybamm.Discretisation(mesh, spatial_methods) disc.process_model(model) # test that computing mass matrix block-by-block (as is done during # discretisation) gives the correct result # Note: inverse is more efficient in csc format mass_inv = inv(csc_matrix(model.mass_matrix.entries)) np.testing.assert_equal( model.mass_matrix_inv.entries.toarray(), mass_inv.toarray() )
def Matrices_Psuedo(Tau, Pr, Ra, sigma, D, R, N_modes, Y): # Correct # Extract X & Ra mu = Y[-1, 0] Ra = mu X = Y[0:-1] nr = len(R) - 2 D3 = np.zeros((nr, nr, N_modes)) D2 = np.zeros((nr, nr, N_modes)) for ii in xrange(N_modes): D3[:, :, ii] = D3_SP(D, R, ii) D2[:, :, ii] = Stokes_D2_SP(D, R, ii) # Part a) Linear Operator L = L_0_SPAR(D, R, sigma, Tau, Pr, Ra, N_modes) M = M_0_SPAR(D, R, N_modes) M_inv = inv(M.tocsc()) # Part b) Non-Linear Terms N1 = N_Full(X, L.shape, N_modes, nr, D3, D2, D, R) N2 = N2_Full(X, L.shape, N_modes, nr, D3, D2, D, R) # Aim to make L sparse #Return I, N1, N2, F all multiplied by M_inv and L_inv return M_inv, L.todense(), N1, N2
def _linear_naive_solver(A, B): """ Solve Ax = B. Parameters: A (scipy.sparse.csr_matrix): sparse 2d matrix B (scipy.sparse.csr_matrix): sparse 2d matrix Returns: x (scipy.sparse.csr_matrix): answer for linear equation Ax = B """ print("solving linear equation...") print(datetime.datetime.now()) # inverse(A.T * A) print("inversing matrix") AT = A.transpose() ATA = AT.dot(A) if not Solver._is_invertible(ATA): print("matrix is not invertible, using pseudo-inverse instead") ATAI = sparse.csr_matrix(np.linalg.pinv(ATA.todense())) else: ATAI = inv(ATA) x = ATAI.dot(AT).dot(B) print(datetime.datetime.now()) return x
def spinv(M): """ Compute an inverse of a matrix using the appropriate sparse or dense function """ if spar.issparse(M): return spla.inv(M) else: return nla.inv(M)
def GaussSeidel(A, b, MAXITER, TOLL): n = len(b) xk = np.ones(shape = n,dtype = float) D = sparse.diags(A.diagonal(), 0, format = 'csc',) L = sparse.tril(A, format = 'csc') U = sparse.triu(A, format = 'csc') T = -(linalg.inv(D+L))* U c = (linalg.inv(D+L))* b i = 0 err = TOLL + 1 while i < MAXITER and err > TOLL: x = T*xk + c err = np.linalg.norm(x-xk, 1)/np.linalg.norm(x,1) xk = x i += 1 return xk, i
def detrend(values): """ From "An advanced detrending method with application to HRV analysis." ,T arvainen MP, Ranta-Aho PO, Karjalainen PA., 2002 Get a 1D numpy array, return detrended array """ T = len(values) lamb = 10 I = eye(T) # instead of using Matlab speye/ones/spdiags, use scipy sparse.diags on the right list L = [1,-2,1]+[0]*(T-2-3) D2 = diags(L, range(len(L)), shape=(len(L), len(L)+2)) return ((I-inv(I + (lamb**2)*D2.H*D2))*values)
def kwok_lau(graph, v, k, epsilon, path_length_scaler, volbd_scaler): num_vertices = graph.shape[0] volbd = k * volbd_scaler vertices = list(range(num_vertices)) print("N: {0}\nVolume bound: {1}\n".format(num_vertices, volbd)) p = [np.zeros(num_vertices, int)] p[0][v] = 1 last = p[-1] # length of walk to compute # need to get W! I = mlib.identity(num_vertices, int) # assuming symmetric here L, D_vector = csglib.laplacian(graph, return_diag=True) D = mlib.diags(D_vector, (0), format='csc') lazy_walk = 0.5 * (I + lalib.inv(D) * graph) num_iterations = math.ceil(num_vertices ** 2 * math.log(num_vertices, 2)) for t in range(1, num_iterations): p.append(last * lazy_walk) last = p[-1] # value function for sorting: sortkey = lambda t: (lambda vertex: p[t][vertex] / D_vector[vertex]) # initialize set now S = dict() S[0,1] = p[0][v] outset = S[0,1] # when S has one element, the conductance is 1 # conductance is <= 1 outcond = 2 for t in range(1, num_iterations): # so#rt all at once here? p[t] = p[t-1] * lazy_walk for j in range(1, num_vertices): # compute new S[t,j] # don't want to include the entire graph, that's dumb... # should also put another bound in here for later S[t,j] = computeS(sortkey(t), vertices, j, num_vertices) # find smallest S_{t,j} currcond = conductance(S[t,j], L, D) if (currcond < outcond and volume(S[t,j], D) <= volbd): outset = S[t,j] outcond = currcond return outset
def calculate_posterior(i): # This function calculates the posterior for u[i,:] and # sigma[i,:]. Note: this function makes use of variables which are # outside of its scope. logger.debug('evaluating the filtered solution for data set %s ...' % i) # identify observation points where we do not want to estimate the # filtered solution mask = _get_mask(x,sigma[i],fill) # number of unmasked entries K = np.sum(~mask) # build differentiation matrices L,D = build_L_and_D(tuple(mask)) # form weight matrix W = _diag(1.0/sigma[i,~mask]) # compute penalty parameter lamb = _penalty(cutoff,p,sigma[i,~mask]) # form left and right hand side of the system to solve lhs = W.T.dot(W) + L.T.dot(L)/lamb**2 rhs = W.T.dot(W).dot(u[i,~mask]) # generate LU decomposition of left-hand side lu = spla.splu(lhs) # compute the smoothed derivative of the posterior mean post_mean = np.empty((N,)) post_mean[~mask] = D.dot(lu.solve(rhs)) post_mean[mask] = np.nan # compute the posterior standard deviation. if exact: cov = D.dot(spla.inv(lhs)).dot(D.T) var = np.diag(cov.toarray()) else: # compute uncertainty through repeated random perturbations of # the data and prior vector. ivar = _IterativeVariance(post_mean[~mask]) for j in range(samples): w1 = np.random.normal(0.0,1.0,K) w2 = np.random.normal(0.0,1.0,K) # generate sample of the posterior post_sample = lu.solve(rhs + W.T.dot(w1) + L.T.dot(w2)/lamb) # differentiate the sample post_sample = D.dot(post_sample) ivar.add_sample(post_sample) var = ivar.get_variance() post_sigma = np.empty((N,)) post_sigma[~mask] = np.sqrt(var) post_sigma[mask] = np.inf logger.debug('done') return post_mean,post_sigma
def se_covariance(param, W, sparse=False): """ This computes a covariance matrix for a SAR-type error specification: ( (I - param * W)^T(I - param * W) )^{-1} and always returns a dense matrix. This first calls se_precision, and then inverts the results of that call. """ prec = se_precision(param, W, sparse=sparse) if sparse: return spla.inv(prec) return np.linalg.inv(prec)
def __init__(self, A, omega, *args, **kwargs): """Initialization routine for the smoother Args: A (scipy.sparse.csc_matrix): sparse matrix A of the system to solve omega (float): a weighting factor *args: Variable length argument list **kwargs: Arbitrary keyword arguments """ super(WeightedJacobi, self).__init__(A, *args, **kwargs) self.P = sp.spdiags(self.A.diagonal(), 0, self.A.shape[0], self.A.shape[1], format='csc') # precompute inverse of the preconditioner for later usage self.Pinv = omega * spLA.inv(self.P)
def construct_efficient_steady_state_solver_matrices(self, epsilon): num_dms = self.number_density_matrices() n_vectors, higher_coupling_elements, higher_coupling_row_indices, higher_coupling_column_indices, \ lower_coupling_elements, lower_coupling_row_indices, lower_coupling_column_indices = generate_hierarchy_and_tier_couplings(num_dms, self.num_aux_dm_indices, self.truncation_level, \ self.dm_per_tier()) dtype = 'complex128' # first construct inverse operator op_to_invert = sp.kron(sp.eye(num_dms, dtype=dtype), -self.liouvillian() + epsilon*np.eye(self.system_dimension**2)) diag_vectors = np.copy(n_vectors) aux_dm_idx = 0 for site in self.environment: if site: for osc in site: if isinstance(osc, OBOscillator): aux_dm_idx += 1 elif isinstance(osc, UBOscillator): diag_vectors[:,aux_dm_idx] = n_vectors[:,aux_dm_idx] + n_vectors[:,aux_dm_idx+1] diag_vectors[:,aux_dm_idx+1] = n_vectors[:,aux_dm_idx+1] - n_vectors[:,aux_dm_idx] aux_dm_idx += 2 aux_dm_idx += self.num_matsubara_freqs op_to_invert += sp.kron(sp.diags(np.dot(diag_vectors, self.diag_coeffs), dtype=dtype), sp.eye(self.system_dimension**2, dtype=dtype)) inverse_op = spla.inv(op_to_invert) # now construct hierarchy matrix starting with relaxation parameter hm = sp.eye(self.M_dimension(), dtype=dtype).multiply(epsilon) #sp.kron(epsilon*sp.eye(num_dms, dtype=dtype), sp.eye(self.system_dimension**2, dtype=dtype)) # include temperature correction / Markovian truncation term for Matsubara frequencies if self.temperature_correction: hm -= sp.kron(sp.eye(self.number_density_matrices(), dtype=dtype), np.sum(self.tc_terms, axis=0)).astype(dtype) # off diag bits for n in range(self.num_aux_dm_indices): higher_coupling_matrix = sp.coo_matrix((higher_coupling_elements[n], (higher_coupling_row_indices[n], higher_coupling_column_indices[n])), shape=(num_dms, num_dms), dtype=dtype).tocsr() lower_coupling_matrix = sp.coo_matrix((lower_coupling_elements[n], (lower_coupling_row_indices[n], lower_coupling_column_indices[n])), shape=(num_dms, num_dms), dtype=dtype).tocsr() hm -= sp.kron(higher_coupling_matrix.multiply(self.phix_coeffs[n]) + lower_coupling_matrix.multiply(self.thetax_coeffs[n]), self.Vx_operators[n]) \ + sp.kron(lower_coupling_matrix.multiply(self.thetao_coeffs[n]), self.Vo_operators[n]) return hm.tocsc().astype(dtype), inverse_op.tocsr()
def spinv(a): """ Compute the inverse of a sparse or dense matrix Parameters ---------- a : array or sparse matrix Object with one or more columns Returns ------- ai, the inverse of a """ if SP.issparse(a): ai = SPla.inv(a) else: ai = la.inv(a) return ai
def svd_eye_minus_hat_matrix(self): """ Following Craven & Wahba find the singular value decomposition of F = D Q R^{-1/2} This function returns the non-zero singular values S and the corresponding left singular vectors in U, satisfying I - Hp = D U [ si**2 / ( 6(1-p) si**2 + p ) ] U.T D**(-1) where si is the ith singular value. """ # TODO: is it indeed faster to take the non-sparse inverse?! method = 4 if method==0: sqrt_invR = sqrtm(spla.inv(self.R).A) elif method==1: sqrt_invR = sqrtm(la.inv(self.R.todense())) elif method==2: invR = la.inv(self.R.todense()) eR, oR = la.eigh(invR) sqrt_invR = oR.dot(np.diag(np.sqrt(eR))).dot(oR.T) elif method==3: eR, oR = la.eigh(self.R.todense()) sqrt_invR = oR.dot(np.diag(1./np.sqrt(eR))).dot(oR.T) elif method==4: # TODO: # deal with the error # File "splines.py", line 378, in svd_eye_minus_hat_matrix # eR, oR = la.eig_banded(self.R.data[self.R.offsets>=0][::-1]) # File "/ph2users/eldada/lib/anaconda/lib/python2.7/site-packages/scipy/linalg/decomp.py", # line 563, in eig_banded # raise LinAlgError("eig algorithm did not converge") try: eR, oR = la.eig_banded(self.R.data[self.R.offsets>=0][::-1]) sqrt_invR = oR.dot(np.diag(1./np.sqrt(eR))).dot(oR.T) except LinAlgError: # if eig_banded fails try the eigh eR, oR = la.eigh(self.R.todense()) sqrt_invR = oR.dot(np.diag(1./np.sqrt(eR))).dot(oR.T) U, S, VT = la.svd(self.D * self.Q * sqrt_invR, full_matrices=False) return U,S
def get_funcMap(coefS,coefT,eivalS,eivalT): k = coefS.shape[0] t = coefS.shape[1] w_func = 1 w_com = 1 w_reg = 1 data_1 = (coefS.T.repeat(k,axis=0).flatten())*w_func row_ind_1 = np.arange(k*t).repeat(k) col_ind_1 = np.tile(np.arange(k*k),t) lS = np.tile(eivalS,(eivalS.shape[0],1)) lT = np.tile(eivalT,(eivalS.shape[0],1)).T matEival = (lT-lS) * (lT-lS) data_2 = (matEival.T.repeat(k,axis=0).flatten())*w_com row_ind_2 = (np.arange(k*k)+(k*t)).repeat(k) col_ind_2 = np.tile(np.arange(k*k),k) data_3 = (np.ones(k*k))*w_reg row_ind_3 = np.arange(k*k)+((k*t)+(k*k)) col_ind_3 = np.arange(k*k) M = (k*t)+(k*k)+(k*k) N = k*k data = np.concatenate((data_1,data_2,data_3),axis=0) row_ind = np.concatenate((row_ind_1,row_ind_2,row_ind_3),axis=0) col_ind = np.concatenate((col_ind_1,col_ind_2,col_ind_3),axis=0) a = csr_matrix((data,(row_ind,col_ind)),shape=(M,N)) b1 = (coefT.T.flatten())*w_func b2 = (np.zeros(k*k))*w_com b3 = (np.zeros(k*k))*w_reg b = (np.concatenate((b1,b2,b3),axis=0))[:,None] aTa = a.T.dot(a) aTb = a.T.dot(b) funcMap = (inv(aTa).dot(aTb)).reshape(k,k) return funcMap
def calculaW(X,Y,Xteste,Yteste,Xdev,Ydev): #funcao nao normaliza Y errorg=[] errortg=[] #calcula o W usando os valores de X e Y de train e de dev final=[-1,0,0] print "A fazer calculos para obter o W" #print "A calcular transposta de X" #print "Transposta calculada, a outros calculos" Xt=X.transpose() print "A testar lambdas:" lambs=[10**x for x in xrange(-3,8)] xg=map(lambda x: np.log10(x),lambs) #lambs=[100] #xg=[2] for lamb in lambs: #for lamb in [0.0000000000000000000000000001]: #calculo do w para cada lambda temp=Xt*X shape=temp.shape lambd=sparse.identity(shape[0])*lamb temp=temp+lambd temp=linalg.inv(temp) temp=temp*Xt temp=temp*Y #fim do calculo error=erro(Xdev,Ydev,temp) errortg.append(erro(Xteste,Yteste,temp)) print lamb,error errorg.append(error) if final[0]>error or final[0]==-1: final[0]=error final[1]=lamb final[2]=temp #valores returnados: temp->W final, final[0]->erro medio associado a esse valor, final[1]-> lambda try: import os os.environ['DISPLAY'] import matplotlib.pyplot as plt plt.figure(1) plt.subplot(211) plt.title("Evolucao do erro Dev") plt.plot(xg,errorg,"b",xg,errorg,"ro") plt.subplot(212) plt.title("Evolucao do erro Teste") plt.plot(xg,errortg,"b",xg,errortg,"ro") #plt.savefig("formula.png") plt.show() except : import os try: del os.environ['DISPLAY'] except: pass print "Nao foi encontrado display\nO grafico vai ser escrito num ficheiro" print "Os dados usados sao:"+f import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt plt.figure(1) plt.subplot(211) plt.title("Evolucao do erro Dev") plt.plot(xg,errorg,"b",xg,errorg,"ro") plt.subplot(212) plt.title("Evolucao do erro Teste") plt.plot(xg,errortg,"b",xg,errortg,"ro") plt.savefig("formula.png") return final[2],final[0],final[1]
def kernel_AS (X, labels, num_initial=1, num_eval=1, pi=0.05, eta=0.5, w0=None, init_pt=None, verbose=True, all_fs=False, sparse=False, tinv=False): """ X --> r x n matrix of feature values for each point. labels --> true labels for each point. pi --> prior target probability eta --> jump probability num_initial --> number of initial points to start from. num_eval --> number of points to be investigated """ #X = np.array(X) r,n = X.shape labels = np.array(labels) if init_pt is not None: if not isinstance(init_pt, list): init_pt = [init_pt] num_initial = len(init_pt) idxs = init_pt if not (labels[idxs]).all(): if verbose: print "Warning: start points provided not targets. Converting to targets." labels[idxs] = 1 true_targets = (np.array(labels)==1).nonzero()[0] unlabeled_idxs = [i for i in range(n) if i not in idxs] else: # Random start node true_targets = (np.array(labels)==1).nonzero()[0] # %%% Randomly pick 1 target point as the first point idxs = [true_targets[i] for i in nr.permutation(range(len(true_targets)))[:num_initial]] unlabeled_idxs = [i for i in range(n) if i not in idxs] # %%% Randomly pick 1 target point as the first point num_initial = min(n-1,num_initial) num_eval = min(num_eval, n-num_initial) if sparse: Ir = ss.eye(r) else: Ir = np.eye(r) # Lambda from TK's notes can be written using eta as follows l = (1-eta)/eta # omega0 as in TK's code if w0 is None: w0 = 1/n # Set up initial BD and C B = 1/(1+w0)*np.ones(n) # Need to update B every iteration B[idxs] = l/(1+l) D = np.squeeze(X.T.dot(X.dot(np.ones((n,1))))) #TODO: if we don't need to keep this, we can remove it. Dinv = 1./D BDinv = np.squeeze(B*Dinv) if sparse: BDinv_ss = ss.diags([BDinv],[0]).tocsr() if verbose: print 'Start point: \n', idxs y = pi*np.ones(n) y[idxs] = 1 I_B = 1-B q = I_B*y # Need to update q every iteration # the atleast 2d transpose is there to make it be the same as diag(__) # import IPython # IPython.embed() if verbose: print "Constructing C" t1 = time.time() if sparse: C = (Ir - X.dot(BDinv_ss.dot(X.T))) else: C = (Ir - X.dot(BDinv[:,None]*X.T)) if verbose: print time.time() - t1 # import IPython # IPython.embed() if verbose: print "Inverting" t1 = time.time() if sparse: Cinv = ssl.inv(C.tocsc()) # Need to update Cinv every iteration else: Cinv = nlg.inv(C) dtinv = time.time() - t1 if verbose: print "Time for inverse:", dtinv hits = np.zeros((num_eval+num_initial,1)) hits[0] = num_initial selected = [ix for ix in idxs] if sparse: f = q + BDinv_ss.dot(((X.T.dot(Cinv.dot(X.dot(q)))))) else: f = q + BDinv*((X.T.dot(Cinv.dot(X.dot(q))))) # Number of true targets true_n = sum(labels==1) found_n = num_initial #temp # dinvA = (np.diag(Dinv)).dot(X.T.dot(X)) # B2 = np.ones(n)*1/(1+w0) # B2[idxs] = l/(1+l) # yp = np.ones(n)*pi # yp[idxs] = labels[idxs] if all_fs: fs = [f] # Modifying the element for i in range(num_eval): # import IPython # IPython.embed() t1 = time.time() # assert len(unlabeled_idxs) == n - num_initial - i # if len(unlabeled_idxs) != len(np.unique(unlabeled_idxs)): # print "ERROR: NOT ALL UNLABELED IDXS ARE UNIQUE" # Find next index to investigate uidx = np.argmax(f[unlabeled_idxs]) idx = unlabeled_idxs[uidx] # if idx == n: # print "ERROR: SELECTING SELECTED PT", idx # import IPython # IPython.embed() del unlabeled_idxs[uidx] # assert idx not in unlabeled_idxs found_n += labels[idx] if found_n==true_n: if verbose: print "Found all", found_n, "targets. Breaking out." break # Update relevant matrices if sparse: BDinv_ss[idx,idx] *= (1+w0)*l/(1+l) else: BDinv[idx] *= (1+w0)*l/(1+l) q[idx] = labels[idx]*1/(1+l) gamma = -(l/(1+l)-1/(1+w0))*Dinv[idx] Xi = X[:,[idx]] # ith feature vector # t7 = time.time() Cif = Cinv.dot(Xi) # t8 = time.time() # d7 = t8 - t7 # import IPython # IPython.embed() if sparse: Cinv = Cinv - gamma*(Cif.dot(Cif.T))/(1 + (gamma*Xi.T.dot(Cif))[0,0]) else: Cinv = Cinv - gamma*(Cif.dot(Cif.T))/(1 + gamma*Xi.T.dot(Cif)) # t9 = time.time() # d8 = t9 - t8 if sparse: f = q + BDinv_ss.dot(((X.T.dot(Cinv.dot(X.dot(q)))))) else: f = q + BDinv*((X.T.dot(Cinv.dot(X.dot(q))))) # t0 = time.time() # d9 = t0 - t9 if all_fs: fs.append(f) # import IPython # IPython.embed() elapsed = time.time() - t1 selected.append(idx) hits[i+1] = found_n ## temp ## # B2[idx] = l/(l+1) # yp[idx] = float(labels[idx]) # Ap = np.diag(B2).dot(dinvA) # q2 = (np.eye(n) - np.diag(B2)).dot(yp) # f2 = nlg.inv(np.eye(n) - Ap).dot(q2) # print nlg.norm(f-f2) ## temp ## if verbose: if (i%1)==0 or i==1: print 'Iter: %i, Selected: %i, Best f: %f, Hits: %i/%i, Time: %f'%(i,selected[i+num_initial], f[idx], hits[i+1], (i+num_initial+1), elapsed) print '%d %d %f %d\n'%(i, hits[i+1]/true_n, elapsed, selected[i+num_initial]) # Ap = np.diag(B2).dot(dinvA) # q2 = (np.eye(n) - np.diag(B2)).dot(yp) # f2 = nlg.inv(np.eye(n) - Ap).dot(q2) if all_fs: if tinv: return f, hits, selected, fs, dtinv return f, hits, selected, fs if tinv: return f, hits, selected, dtinv return f, hits, selected
def _instability_analysis_from_N2_profile_raw(zc, N2, f0, beta, k, l, zf, ubar, vbar, etax, etay, Ah, sort='LI', num=4, depth=None, **kwargs): nz = len(zc) ### vertical discretization ### # ~~~~~ zf[0]==0, phi[0] ~~~~ # # ----- zc[0], N2[0] -------- # # ----- zf[1], phi[1] ------- # ... # ---- zc[nz-1], N2[nz-1] --- # # ~~~~ zf[nz], phi[nz] ~~~~~~ # just for notation's sake # (user shouldn't worry about discretization) dzc = np.hstack(np.diff(zc)) dzf = np.diff(zf) ################## # We want a matrix representation of the operator such that # # np.dot(L, psi) = omega * np.dot(G, psi) ################## omega = np.zeros( (num, len(l), len(k)), dtype=np.complex128 ) psi = np.zeros( (nz+1, num, len(l), len(k)), dtype=np.complex128 ) for j in range(len(l)): for i in range(len(k)): if Ah == 0.: L = lil_matrix((nz+1, nz+1), dtype=np.float64) else: L = lil_matrix((nz+1, nz+1), dtype=np.complex128) G = L.copy() ################ # n = 0 (surface) # # From the discretized equations above, at the surface we get # L[0, 0] = (( k (U[0] + U[1])/2 + l (V[0] + V[1])/2 ) / dzf[0] # - .5*( k ( (U[0] - U[1])/dzf[0] - N2[0]**2/f0 * etay[0] ) + l ( (V[0] - V[1])/dzf[0] + N2[0]**2/f0 * etax[0] ))) # L[0, 1] = (( k (U[0] + U[1])/2 + l (V[0] + V[1])/2 ) / (-dzf[0]) # - .5*( k ( (U[0] - U[1])/dzf[0] - N2[0]/f0 * etay[0] ) + l ( (V[0] - V[1])/dzf[0] + N2[0]/f0 * etax[0] ))) # and # G[0, 0] = dzf[0]**-1 # G[0, 1] = -dzf[0]**-1 ################ R = k[i] * .5*(ubar[0]+ubar[1]) + l[j] * .5*(vbar[0]+vbar[1]) D = dzf[0]**-1 S = .5 * ( k[i] * ( (ubar[0]-ubar[1])*D - N2[0]/f0 * etay[0] ) + l[j] * ( (vbar[0]-vbar[1])*D + N2[0]/f0 * etax[0] ) ) L[0, 0] = R * D - S L[0, 1] = R * (-D) - S G[0, 0] = D G[0, 1] = - D # L[0, 0] = R - S * D**-1 # L[0, 1] = R * (-1.) - S * D**-1 # G[0, 0] = 1. # G[0, 1] = - 1. ################ # n = nz (bottom) # # From symmetry, at the bottom # L[nz, nz-1] = (( k (U[nz-1] + U[nz])/2 + l (V[nz-1] + V[nz])/2 ) / dzf[nz-1] # - .5*( k ( (U[nz-1] - U[nz])/dzf[nz-1] - N2[nz-1]**2/f0 * etay[1] ) + l ( (V[nz-1] - V[nz])/dzf[nz-1] + N2[nz-1]**2/f0 * etax[1] ))) # L[nz, nz] = (( k (U[nz-1] + U[nz])/2 + l (V[nz-1] + V[nz])/2 ) / (-dzf[nz-1]) # - .5*( k ( (U[nz-1] - U[nz])/dzf[nz-1] - N2[nz-1]/f0 * etay[1] ) + l ( (V[nz-1] - V[nz])/dzf[nz-1] + N2[nz-1]/f0 * etax[1] ))) # and # G[nz, nz-1] = dzf[nz-1]**-1 # G[nz, nz] = -dzf[nz-1]**-1 ################ R = k[i] * .5*(ubar[nz-1]+ubar[nz]) + l[j] * .5*(vbar[nz-1]+vbar[nz]) D = dzf[nz-1]**-1 S = .5 * ( k[i] * ( (ubar[nz-1]-ubar[nz])*D - N2[nz-1]/f0 * etay[1] ) + l[j] * ( (vbar[nz-1]-vbar[nz])*D + N2[nz-1]/f0 * etax[1] ) ) L[nz, nz-1] = R * D - S L[nz, nz] = R * (-D) - S G[nz, nz-1] = D G[nz, nz] = - D # L[nz, nz-1] = R - S * D**-1 # L[nz, nz] = R * (-1.) - S * D**-1 # G[nz, nz-1] = 1. # G[nz, nz] = - 1. ################ # 0 < n < nz (interior) # # In the interior, we have # L[n, n-1] = (k U[n] + l V[n] - i Ah K**2) * f**2/dzc[n] / (N2[n-1] * dzf[n-1]) # L[n, n] = (k U[n] + l V[n] - i Ah K**2) # * ( - f**2/dzc[n] * ( 1/(N2[n] * dzf[n]) + 1/(N2[n-1]*dzf[n-1]) ) - K**2 ) + k*Qy[n] - l*Qx[n] ) # L[n, n+1] = (k U[n] + l V[n] - i Ah K**2) * f**2/dzc[n] / (N2[n] * dzf[n]) # and # G[n, n-1] = f**2/dzc[n] / N2[n-1] / dzf[n-1] # G[n, n] = ( - f**2/dzc[n] * ( 1/(N2[n]*dzf[n]) + 1/(N2[n-1]*dzf[n-1]) ) - K**2 ) # G[n, n+1] = f**2/dzc[n] / N2[n] / dzf[n] ################ for n in range(1,nz): K2 = k[i]**2 + l[j]**2 if Ah == 0.: R = k[i] * ubar[n] + l[j] * vbar[n] else: R = k[i] * ubar[n] + l[j] * vbar[n] - 1j * Ah * K2 bf = f0**2 * dzc[n-1]**-1 b_1 = N2[n-1] * dzf[n-1] b = N2[n] * dzf[n] B_1 = bf * b_1**-1 B = - (bf * (b**-1 + b_1**-1) + K2) Bt1 = bf * b**-1 N2Z = (N2[n]*dzf[n])**-1 N2Z_1 = (N2[n-1]*dzf[n-1])**-1 P = ( k[i] * ( beta - bf * ( ubar[n+1] * N2Z - (N2Z + N2Z_1) * ubar[n] + N2Z_1 * ubar[n-1] ) ) - l[j] * bf * ( vbar[n+1] * N2Z - (N2Z + N2Z_1) * vbar[n] + N2Z_1 * vbar[n-1] ) ) L[n, n-1] = R * B_1 L[n, n] = R * B + P L[n, n+1] = R * Bt1 G[n, n-1] = B_1 G[n, n] = B G[n, n+1] = Bt1 # L[n, n-1] = R # L[n, n] = R + P * B**-1 # L[n, n+1] = R # G[n, n-1] = 1. # G[n, n] = 1. # G[n, n+1] = 1. # Return Nans if matrix G is singular try: # read in kwargs if any if len(kwargs) > 0: val, func = eigs( csc_matrix(inv(csc_matrix(G)).dot(csc_matrix(L))), k=num, which='LI', v0=kwargs['init_vector'], ncv=kwargs['num_Lanczos'], maxiter=kwargs['iteration'], tol=kwargs['tolerance']) # default returns 6 eigenvectors else: num_Lanczos = nz iteration = 10*nz val, func = eigs( csc_matrix(inv(csc_matrix(G)).dot(csc_matrix(L))), k=num, which='LI', ncv=num_Lanczos, maxiter=iteration, ) # default returns 6 eigenvectors # val, func = eigs( csc_matrix(L), M=csc_matrix(G), Minv=csc_matrix(inv(csc_matrix(G))), # k=num, which='LI', ncv=num_Lanczos, maxiter=iteration, **kwargs ) ########### # eigs returns complex values. For a linear-stability analysis, # we don't want the eigenvalues to be real ########### omega[:, j, i] = val psi[:, :, j, i] = func # Each column is the eigenfunction except RuntimeError: warnings.warn('The matrix is ill-conditioned or singular', RuntimeWarning) omega[:, j, i] = np.nan psi[:, :, j, i] = np.nan ########### # they are often sorted and normalized but not always, # so we have to do that here. # sort them by the imaginary part of the eigenvalues # (growth rate) ########### omega1d = omega.reshape( (num, len(k)*len(l)) ).imag.max(axis=1) p = np.argsort(omega1d)[::-1] omega = omega[p] psi = psi[:, p] # return the first 'num' leading modes return zf, omega[:num], psi[:, :num]
def walk_generator(A): """ Return the walk-generating function of A. """ I = sparse.identity(A.shape[1], dtype=float) inv_mat = linalg.inv((I-A).tocsc()).tocsr() return inv_mat
def predict(model, Xi, X, Y, eps0=1e-1, maxdist=1e3, compute_uncertainty=False, use_kd_trees=True, demean=True): """ Return the Gauss-Markov minimum variance estimate for points *Xi* given data *Y* observed at *X*. (DISEP Eqn 2.397) Arguments: ---------- model: function, returns the isotropic structure function given a distance Xi: np.ndarray, (n x 2) X: np.ndarray, (n x 2) Y: np.ndarray, (n) eps0: zero lag variance, or measurement error compute_uncertainty: boolean, optional Returns: -------- (np.ndarray, np.ndarray) Predictions and prediction uncertainty (variance) Notes: ------ Oceanographers call this optimal interpolation, or objective analysis. Geologists call this simple kriging. """ # Matrix inversion and multiplication are somewhat slow # Error variance is extremely slow with the current algorithm if demean: Ym = Y.mean() else: Ym = 0.0 Yd = Y-Ym if use_kd_trees: kdx = scipy.spatial.cKDTree(X) kdxi = scipy.spatial.cKDTree(Xi) Rxx = _model_covariance_matrix_kd(model, kdx, kdx, maxdist=maxdist) \ + sparse.diags(eps0*np.ones_like(Y), 0) Rxy = _model_covariance_matrix_kd(model, kdx, kdxi, maxdist=maxdist) Rxx_inv = splinalg.inv(Rxx) alpha = Rxx_inv*Rxy # all matrices are CSC Yi = alpha.T*Yd + Ym if compute_uncertainty: Ryy = _model_covariance_matrix_kd(model, kdxi, kdxi, maxdist=maxdist) epsi = _uncertainty(Ryy, Rxy, Rxx_inv) else: epsi = np.nan*np.empty_like(Yi) else: if hasattr(eps0, "__iter__"): Rxx = _model_covariance_matrix(model, X, X) + np.diag(eps0) else: Rxx = _model_covariance_matrix(model, X, X) + np.diag(eps0*np.ones_like(Y)) Rxy = _model_covariance_matrix(model, X, Xi) Rxx_inv = np.linalg.inv(Rxx) alpha = np.dot(Rxx_inv, Rxy) Yi = np.dot(alpha.T, Yd) + Ym if compute_uncertainty: Ryy = _model_covariance_matrix(model, Xi, Xi) epsi = _uncertainty(Ryy, Rxy, Rxx_inv) else: epsi = np.nan*np.empty_like(Yi) return Yi, epsi
def compute_fundamental(P): """ """ n = P.shape[0] F = inv(eye(n, format='csc') - P.tocsc()).todense() return F
def SpectralLearning(M2,K,initSingle,Single,Pair21,Pair31,Triple,transObs,marks,emission_file): """ The main spectral learning algorithm """ transObs2 = {} for i in transObs.keys(): transObs2[transObs[i]] = i # 'Computes SVD of observation pairs matrix' (U2,S2,V_T2) = ssl.svds(Pair31,K) U_T2 = np.matrix(U2.transpose()) V_T2 = np.matrix(V_T2) U_T = np.matrix(np.zeros((K,M2))) S = np.zeros((K)) V = np.matrix(np.zeros((M2,K))) for i in range(0,K): U_T[i,:] = U_T2[K-i-1,:]*np.sign(U_T2[K-i-1,0]) S[i] = S2[K-i-1] V[:,i] = (V_T2[K-i-1,:]*np.sign(V_T2[K-i-1,0])).transpose() # Pair_inv3 = (U^T * Pair31)^+ Pair_inv3 = ss.dok_matrix((M2,K)) for j in range(0,K): for i in range(0,M2): Pair_inv3[i,j] = V[i,j]/S[j] # 'Finds the major observation by using U' mIdx = {} exist = {} for i in range(0,K): mV = 0 mI = 0 for j in range(0,M2): if (j in exist): continue v = U_T[i,j]**2 if (v>mV): mV = v mI = j mIdx[i] = mI exist[mI] = True # print i,transObs2[mIdx[i]] # 'Computes eigenvectors' eigVec = np.matrix(np.zeros((K,K))) for i in range(0,K): Cx = U_T * Triple[mIdx[i]] * Pair_inv3 # [w,v] = eigs(Cx,1,maxiter=miter) [Ws,Vs] = nl.eig(Cx) v = Vs[:,0] eigVec[:,i] = np.matrix(np.real(v)*np.sign(np.real(v[0,0]))) eigVec_inv = nl.inv(eigVec) # 'Computes emission matrix' O = ss.dok_matrix((M2,K)) O1 = eigVec_inv * U_T O2 = Pair_inv3 * eigVec for i in range(0,M2): if (i not in Triple): continue Ox = O1 * Triple[i] * O2 for j in range(0,K): O[i,j] = abs(Ox[j,j]) for j in range(0,K): O[:,j] /= O[:,j].sum() # 'Write emission matrix to the output file' f = open(emission_file,'w+') f.write(str(K)+'\t'+str(transObs.__len__())+'\t(Emission order)') for mark in marks: f.write('\t'+mark) f.write('\n') for j in range(0,K): for i in sorted(transObs.keys()): f.write(str(j+1)+'\t'+str(i)+'\t'+str(O[transObs[i],j])+'\n') f.close() # U_T_O = U_T * O # 'Computes Inverse of Emission Matrix' # O_inv = matrix(pinv(O.todense())) # Pi = O_inv * initSingle A = ssl.inv((O.transpose()*O).tocsc()) # 'Computes initial state distribution vector' Pi = A * (O.transpose()*initSingle) s = abs(Pi).sum() for i in range(0,K): Pi[i,0] = abs(Pi[i,0])/s # 'Computes transition matrix' # T = abs(O_inv*Pair21*O_inv.transpose()*inv(diagMatrix(Pi).todense())) # T1 = O_inv*Pair31 # T2 = pinv(O_inv*Pair21) # T = abs(T1 * T2) T3 = O.transpose()*Pair31 T2 = O.transpose()*Pair21 T2_inv = ssl.inv((T2*T2.transpose()).tocsc()) A_inv = ssl.inv(A) T = abs(A*(T3*T2.transpose())*T2_inv*A_inv) for j in range(0,K): T[:,j] /= T[:,j].sum() return (T,O,Pi)
def get_random_walk_distance(self, source=None, target=None, parameter=1, saveto=""): """ Compute the random walk effective distance: F. Iannelli, A. Koher, P. Hoevel, I.M. Sokolov (in preparation) Parameters ---------- source : int or None If source is None, the distances from all nodes to the target is calculated Otherwise the integer has to correspond to a node index target : int or None If target is None, the distances from the source to all other nodes is calculated Otherwise the integer has to correspond to a node index parameter : float compound parameter which includes the infection and recovery rate alpha and beta, respectively, the mobility rate kappa and the Euler-Mascheroni constant lambda: log[ (alpha-beta)/kappa - lambda ] saveto : string If empty, the result is saved internally in self.dominant_path_distance Returns: -------- random_walk_distance : ndarray or float If source and target are specified, a float value is returned that specifies the distance. If either source or target is None a numpy array is returned. The position corresponds to the node ID. shape = (Nnodes,) If both are None a numpy array is returned. Each row corresponds to the node ID. shape = (Nnodes,Nnodes) """ assert (isinstance(parameter,float) or isinstance(parameter,int)) and parameter > 0 assert isinstance(saveto,str) assert self.graph != None, "Load graph first." P = adjacency_matrix(self.graph, weight="transition_rate").tocsc() assert np.all(np.isclose(P.sum(axis=1), 1, rtol=1e-15)), "The transition matrix has to be row normalized" one = eye( self.nodes, format="csc") Z = inv( one - P * np.exp(-parameter)) D = diags(1./Z.diagonal(), format="csc") RWED = -np.log( Z.dot(D).toarray() ) if source is not None: if target is not None: RWED = RWED[source, target] else: RWED = RWED[source,:] elif target is not None: RWED = RWED[:,target] if saveto is not "": save( saveto, RWED ) return RWED
def _roi_extract(inputs): """ROI extract code, intended to be used by extract_rois. Needs to be a top-level function to allow it to be used with Pools. Parameters - a single two-element tuple, 'inputs' ---------- frame : array An individual aligned frame from the imaging session. constants : dict Variables that do not change each loop and are pre-calculated to speed up extraction. Includes demixer, mask_stack, A, masked_pixels, and is_overlap. Returns - a single three-element tuple ------- values : array n_rois length array of average pixel intensity for all pixels in each ROI demixed_values : array If demixer is None, the second returned value will also be None. Same format as values, but calculated from the demixed raw signal. frame : array Return back the frame as it was passed in, used for calculating a mean image. """ frame, frame_idx, constants = inputs def put_back_nans(values, imaged_rois, n_rois): """Puts NaNs back in output arrays for ROIs that were not imaged this frame. """ value_idx = 0 roi_idx = 0 final_values = np.empty(n_rois) while roi_idx < n_rois: if roi_idx in imaged_rois: final_values[roi_idx] = values[value_idx] value_idx += 1 else: final_values[roi_idx] = np.nan roi_idx += 1 return final_values n_rois = constants['A'].shape[1] masked_frame = frame[constants['masked_pixels']] # Determine which pixels and ROIs were imaged this frame # If none were, just return with all NaNs imaged_pixels = np.isfinite(masked_frame) if not np.any(imaged_pixels): nan_result = np.empty((n_rois, 1)) nan_result.fill(np.nan) return (frame_idx, nan_result, nan_result) # If there is overlapping pixels between the ROIs calculate the full # pseudoinverse of A, if not use a shortcut if constants['is_overlap']: A = constants['A'][imaged_pixels, :] # Identify all of the rois that were imaged this frame imaged_rois = np.unique( constants['mask_stack'][:, imaged_pixels].nonzero()[0]) if len(imaged_rois) < n_rois: A = A.tocsc()[:, imaged_rois].tocsr() # First assume ROIs are independent, else fall back to full pseudo-inv try: weights = inv(A.T * A) * A.T except RuntimeError: weights = csc_matrix(np.linalg.pinv(A.todense())) else: orig_masks = constants['mask_stack'].copy() imaged_masks = orig_masks[:, imaged_pixels] imaged_rois = np.unique(imaged_masks.nonzero()[0]) if len(imaged_rois) < n_rois: orig_masks = orig_masks.tocsr()[imaged_rois, :].tocsc() imaged_masks = imaged_masks.tocsr()[imaged_rois, :].tocsc() orig_masks.data **= 2 imaged_masks.data **= 2 scale_factor = old_div( orig_masks.sum(axis=1), imaged_masks.sum(axis=1)) scale_factor = np.array(scale_factor).flatten() weights = diags(scale_factor, 0) \ * constants['mask_stack'][imaged_rois][:, imaged_pixels] # Extract signals values = weights * masked_frame[imaged_pixels, np.newaxis] weights_sums = weights.sum(axis=1) result = values + weights_sums if len(imaged_rois) < n_rois: result = put_back_nans(result, imaged_rois, n_rois) if constants['demixer'] is None: return (frame_idx, result, None) # Same as 'values' but with the demixed frame data demixed_frame = masked_frame + constants['demixer'] demixed_values = weights * demixed_frame[imaged_pixels, np.newaxis] demixed_result = demixed_values + weights_sums if len(imaged_rois) < n_rois: demixed_result = put_back_nans(demixed_result, imaged_rois, n_rois) return (frame_idx, result, demixed_result)
def extract_rois(dataset, rois, signal_channel=0, remove_overlap=True, n_processes=1, demix_channel=None): """Extracts imaging data from the current dataset using the supplied ROIs file. Parameters ---------- dataset : ImagingDataset The dataset from which signals are to be extracted. rois : ROIList ROIList of rois to extract signal_channel : string or int, optional Channel containing the signal to be extracted, either an integer index or a name in self.channel_names. remove_overlap : bool, optional If True, remove any pixels that overlap between masks. n_processes : int, optional Number of processes to farm out the extraction across. Should be at least 1 and at most one less then the number of CPUs in the computer. Defaults to 1. demix_channel : int, optional Index of channel to demix from the signal channel. If None, do not demix signals. Returns ------ signals : dict The extracted signals along with parameters and values calculated during extraction. See sima.ImagingDataset.extract for details of the signals format. See also -------- sima.ImagingDataset.extract """ signal_channel = dataset._resolve_channel(signal_channel) if n_processes > 1: pool = Pool(processes=n_processes) num_sequences = dataset.num_sequences num_planes, num_rows, num_columns, num_channels = dataset.frame_shape for roi in rois: roi.im_shape = (num_planes, num_rows, num_columns) masks = [hstack([mask.reshape((1, num_rows * num_columns)) for mask in roi.mask]) for roi in rois] # Find overlapping pixels overlap = _identify_overlapping_pixels(masks) # Remove pixels that overlap between ROIs if remove_overlap: masks = _remove_pixels(masks, overlap) # If mask is boolean convert to float and normalize values such that # the sum of the weights in each ROI is 1 for mask_idx, mask in zip(it.count(), masks): if mask.dtype == bool and mask.nnz: masks[mask_idx] = old_div(mask.astype('float'), mask.nnz) # Identify non-empty ROIs original_n_rois = len(masks) rois_to_include = np.array( [idx for idx, mask in enumerate(masks) if mask.nnz > 0]) n_rois = len(rois_to_include) if n_rois != original_n_rois: with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn("Empty ROIs will return all NaN values: " + "{} empty ROIs found".format( original_n_rois - n_rois)) if not n_rois: raise ValueError('No valid ROIs found.') # Stack masks to a 2-d array mask_stack = vstack([masks[idx] for idx in rois_to_include]).tocsc() # Only include pixels that are included in a ROI masked_pixels = np.unique(mask_stack.nonzero()[1]) mask_stack = mask_stack[:, masked_pixels] # A is defined as the pseudoinverse of the mask weights if n_rois != 1: try: A = mask_stack.T * inv(mask_stack * mask_stack.T).tocsc() except RuntimeError: A = csc_matrix(np.linalg.pinv(mask_stack.todense())) else: mask_mask_t = mask_stack * mask_stack.T mask_mask_t.data = old_div(1, mask_mask_t.data) A = mask_stack.T * mask_mask_t.tocsc() demixer = None if demix_channel is not None: demixed_signal = [None] * num_sequences demix_matrix = _demixing_matrix(dataset) demixer = demix_matrix[signal_channel, demix_channel] * \ dataset.time_averages[demix_channel] demixer = demixer.flatten().astype('float32')[masked_pixels] raw_signal = [None] * num_sequences def _data_chunker(cycle, time_averages, channel=0): """Takes an aligned_data generator for a single cycle and returns df/f of each pixel formatted correctly for extraction""" while True: df_frame = ( next(cycle)[..., channel] - time_averages[..., channel] ) / time_averages[..., channel] yield df_frame.flatten() for cycle_idx, sequence in zip(it.count(), dataset): signal = np.empty((n_rois, len(sequence)), dtype='float32') if demixer is not None: demix = np.empty((n_rois, len(sequence)), dtype='float32') constants = {} constants['demixer'] = demixer constants['mask_stack'] = mask_stack constants['A'] = A constants['masked_pixels'] = masked_pixels constants['is_overlap'] = len(overlap[0]) > 0 and not remove_overlap # Determine chunksize and limit to prevent pools from hanging chunksize = min(1 + old_div(len(sequence), n_processes), 200) # This will farm out signal extraction across 'n_processes' CPUs # The actual extraction is in _roi_extract, it's a separate # top-level function due to Pool constraints. if n_processes > 1: map_generator = pool.imap_unordered(_roi_extract, zip( _data_chunker( iter(sequence), dataset.time_averages, signal_channel), it.count(), it.repeat(constants)), chunksize=chunksize) else: map_generator = map(_roi_extract, zip( _data_chunker( iter(sequence), dataset.time_averages, signal_channel), it.count(), it.repeat(constants))) # Loop over generator and extract signals while True: try: frame_idx, raw_result, demix_result = next(map_generator) except StopIteration: break signal[:, frame_idx] = np.array(raw_result).flatten() if demixer is not None: demix[:, frame_idx] = np.array(demix_result).flatten() raw_signal[cycle_idx] = signal if demixer is not None: demix[np.isinf(demix)] = np.nan demixed_signal[cycle_idx] = demix if n_processes > 1: pool.close() pool.join() def put_back_nan_rois(signals, included_rois, n_rois): """Put NaN rows back in the signals file for ROIs that were never imaged or entirely overlapped with other ROIs and were removed. """ final_signals = [] for cycle_signals in signals: signals_idx = 0 roi_idx = 0 final_cycle_signals = np.empty((n_rois, cycle_signals.shape[1])) nan_row = np.empty((1, cycle_signals.shape[1])) nan_row.fill(np.nan) while roi_idx < n_rois: if roi_idx in included_rois: final_cycle_signals[roi_idx] = cycle_signals[signals_idx] signals_idx += 1 else: final_cycle_signals[roi_idx] = nan_row roi_idx += 1 final_signals.append(final_cycle_signals) return final_signals if original_n_rois > n_rois: raw_signal = put_back_nan_rois( raw_signal, rois_to_include, original_n_rois) if demixer is not None: demixed_signal = put_back_nan_rois( demixed_signal, rois_to_include, original_n_rois) signals = {'raw': raw_signal} if demixer is not None: signals['demixed_raw'] = demixed_signal signals['_masks'] = [masks[idx].tolil() for idx in rois_to_include] signals['mean_frame'] = dataset.time_averages[..., signal_channel] if remove_overlap: signals['overlap'] = overlap signals['signal_channel'] = signal_channel if demix_channel is not None: signals['demix_channel'] = demix_channel signals['rois'] = [roi.todict() for roi in rois] timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d-%Hh%Mm%Ss') signals['timestamp'] = timestamp return signals
m = inner(u, v)*dx L = Constant(0.)*v*dx bc = DirichletBC(V, Constant(0), DomainBoundary()) A, _ = assemble_system(a, L, bc) M, _ = assemble_system(m, L, bc) # Prepare A and M for scipy rows, cols, values = A.data() A = csr_matrix((values, cols, rows)) rows, cols, values = M.data() M = csr_matrix((values, cols, rows)) print 'Inverting %d by %d matrix' % (V.dim(), V.dim()) timer = Timer('inverse') timer.start() T = inv(M).dot(A) timer.stop() print '\t\t done in %g' % timing('inverse') # Plot plt.figure() plt.subplot(131) plt.spy(A) plt.subplot(132) plt.spy(M) plt.subplot(133) plt.spy(T) plt.show()
def reduce_arrays(n_bus, Ymat, slack_indices, Vset, S, types): """ Reduction of the circuit magnitudes. Args: n_bus: Number of buses of the circuit Ymat: Circuit admittance matrix slack_indices: Array of indices of the slack nodes Vset: Vector of voltages of those nodes where the voltage is controlled (AKA Slack and PV buses) S: Vector of power injections at all the nodes types: Vector of nde types Output: Yred: Reduced admittance matrix (Without the rows and columns belonging to slack buses) I: Matrix of currents (In practice only one slack bus is selected, hence it is a vector) injected by the slack buses Sred: Array of power injections of the buses that are not of type slack types_red: Array of types of the buses that are not of type slack non_slack_indices: Array of indices of the buses that are not of type slack """ # Compose the list of buses indices excluding the indices of the slack buses pv = np.where(types == 2)[0] pq = np.where(types == 1)[0] non_slack_indices = r_[pq, pv] # Types of the non slack buses types_red = types[non_slack_indices] # now to have efficient arrays of coefficients map_idx = zeros(len(types_red), dtype=np.int) npq = 0 npv = 0 for i in range(len(types_red)): if types_red[i] == 1: # PQ map_idx[i] = npq npq += 1 elif types_red[i] == 2: # PV map_idx[i] = npv npv += 1 # obtain the vector of the sums per row of the admittance matrix # Yrow = zeros(n_bus-1, dtype=complex_type) # for i in range(n_bus-1): # Yrow[i] = Ymat[i, :].sum() # F = np.ndarray.flatten(np.array(Ymat.sum(axis=1))) # Ymat2 = Ymat # # Ymat2 = Ymat - np.diag(F/2) # # F = F[non_slack_indices] # Compose a reduced admittance matrix without the rows and columns that correspond to the slack buses Yred = Ymat[non_slack_indices, :][:, non_slack_indices] # matrix of the columns of the admittance matrix that correspond to the slack buses Yslack = Ymat[non_slack_indices, :][:, slack_indices] # vector of slack voltages (Complex) Vslack = Vset[slack_indices] # vector of currents being injected by the slack nodes (Matrix vector product) Iind = -1 * np.ndarray.flatten(array(Yslack.dot(Vslack))) # Invert Yred Zred = inv(Yred) # Vind = Zred * Iind (not needed) # Vector of reduced power values (Non slack power injections) Sred = S[non_slack_indices] return Yred, Zred, Iind, Sred, Vslack, types_red, non_slack_indices, map_idx, npq, npv
m = 3 # Blocksize # # Large scale # n = 2500 A,B, w_ex = sakurai(n) # Mikota pair import numpy X,_ = numpy.linalg.qr(rand(n,3*m)) data=[] tt = time.clock() eigs,vecs, resnh = lobpcg(A,X,B, tol=1e-6, maxiter=500, retResidualNormsHistory=1,largest=True) from scipy.sparse.linalg import eigs as eeigs from scipy.sparse.linalg import inv, svds #eigs2,vecs2 = eeigs(A,m,B) ZZ=inv(B).dot(A) ZZZ= ZZ.T.dot(ZZ) Zs=svds(ZZZ,2499,return_singular_vectors=False) print Zs #numpy.sqrt(Zs[-m:]) #eigs2,vecs2 = eeigs(ZZ.dot(A)) data.append(time.clock()-tt) print 'Results by LOBPCG for n='+str(n) print print eigs print print 'Exact eigenvalues' print print w_ex[:m] print print 'Eig eigenvalues' print
Cinv = sparse.diags(1/temp['conductivityfull'],0) q = np.asmatrix(c1.InFlow).transpose() mpiBz = scipy.linalg.pinv(Bz.todense()) q_newmethod = Bz.transpose()*C*Bz*(mpiBz*(Cinv*(mpiBz.transpose()*q))) q_originalmethod = Bz.transpose()*C*Bz*P print('original method works?', np.allclose(q, q_originalmethod, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4)) print('new method works?', np.allclose(q, q_newmethod, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4)) print('Is it a right inverse?', np.allclose(q, Bz.transpose()*mpiBz.transpose()*q, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4)) print('Is it a right inverse?', np.allclose(Bz.transpose()*C*Bz*P, Bz.transpose()*mpiBz.transpose()*q, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4)) print('Is it a right inverse?', np.allclose(C*Bz*P, mpiBz.transpose()*q, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4)) print('Is it a right inverse?', np.allclose(Bz*P, ssl.inv(C)*mpiBz.transpose()*q, rtol=np.mean(abs(q))*10**-4, atol=np.mean(abs(q))*10**-4))
def reduce_arrays(n_bus, Ymat, slack_indices, Vset, S, types): """ Reduction of the circuit magnitudes. Args: n_bus: Number of buses of the circuit Ymat: Circuit admittance matrix slack_indices: Array of indices of the slack nodes Vset: Vector of voltages of those nodes where the voltage is controlled (AKA Slack and PV buses) S: Vector of power injections at all the nodes types: Vector of nde types Output: Zred: Reduced impedance matrix C: Reduced voltage constant Sred: Array of power injections of the buses that are not of type slack Vset_red: Reduced set voltage module array pv_idx_red: indices of the PV nodes in the reduced schemae npv: Number of PV nodes Vslack: Slack voltages array non_slack_indices: Indices of the non-slack nodes in the complete scheme nbus: number of nodes in the reduced scheme """ # Compose the list of buses indices excluding the indices of the slack buses non_slack_indices = list(range(n_bus)) for i in slack_indices[::-1]: non_slack_indices.pop(i) non_slack_indices = array(non_slack_indices) nbus = len(non_slack_indices) # Types of the non slack buses types_red = types[non_slack_indices] # Compose a reduced admittance matrix without the rows and columns that correspond to the slack buses Yred = Ymat[non_slack_indices, :][:, non_slack_indices] # matrix of the columns of the admittance matrix that correspond to the slack buses Yslack = Ymat[non_slack_indices, :][:, slack_indices] # vector of slack voltages (Complex) Vslack = Vset[slack_indices] # vector of currents being injected by the slack nodes (Matrix vector product) Islack = -1 * Yslack.dot(Vslack) # Vector of reduced power values (Non slack power injections) Sred = S[non_slack_indices] # reduced impedance matrix Zred = inv(Yred) # Reduced voltage constant C = Zred.dot(Islack) # list of PV indices in the reduced scheme pv_idx_red = where(types_red == 2)[0] npv = len(pv_idx_red) # Set voltage modules in the reduced scheme Vset_red = abs(Vset)[non_slack_indices] return Zred, Yred, C, Sred, Vset_red, pv_idx_red, npv, Vslack, non_slack_indices, nbus