def _laplacian_blocks(self, Adjacency): """Creates block diagonal matrices from adjacency matrices Parameters: ----------- W_datasets - a list of k (n x n) sparse matrices Returns ------- L - a sparse (n*k x n*k) Laplacian matrix L_datasets - a list of k (n x n) sparse Laplacian matrices D - a sparse (n*k x n*k) Diagonal matrix D_datasets - a list of k (n x n) sparse Diagonal matrices References ---------- 1) Clever trick with the dual output list comprehension stackoverflow: http://goo.gl/ojpRQg It don't think it's faster to compute the lists separately because I'm doing some heavier calculations behind the scenes as compared to actual number creating. """ L_datasets, D_datasets = zip(*[create_laplacian(dataset, norm_lap=self.norm_lap, method=self.lap_method, sparse=self.sparse_mat) \ for dataset in Adjacency]) return block_diag(L_datasets), L_datasets, \ block_diag(D_datasets), D_datasets
def generate_suffix_counting_cstr(cycle_set, X, R): """Generate inequalities (47b) for suffix counting""" # Variables a[0] ... a[C-1] + slack vars b[c][l] J = len(cycle_set) N_cycle_tot = sum(len(C) for C in cycle_set) # First set: A_iq1_a a + A_iq1_b b \leq b_iq1 # guarantee that count in each cycle is less than # slack var A_iq1_a = sp.block_diag(tuple([_cycle_matrix(C, X) for C in cycle_set])) A_iq1_b = sp.block_diag(tuple([-np.ones([len(C), 1]) for C in cycle_set])) b_iq1 = np.zeros(N_cycle_tot) # Second set: A_iq2_b b \leq b_iq2 # guarantees that sum of slack vars # less than R A_iq2_a = sp.coo_matrix((1, N_cycle_tot)) A_iq2_b = sp.coo_matrix((np.ones(J), (np.zeros(J), range(J))), shape=(1, J)) b_iq2 = np.array([R]) return A_iq1_a, A_iq1_b, b_iq1, A_iq2_a, A_iq2_b, b_iq2
def blockLiftingAnalysis(self): """ Build, lift and analyze block matrix form of the collected LPs. """ # Extract lists of matrices for each LP element from the collected LPs. # The coo_matrix calls are needed because vectors are delivered in dense format # by the (block) grounder and the stacking below needs a sparse representation. am = [ x["a"] for x in self.ground] bm = [ sp.coo_matrix(x["b"]) for x in self.ground] cm = [ sp.coo_matrix(x["c"]) for x in self.ground] gm = [ x["g"] for x in self.ground] hm = [ sp.coo_matrix(x["h"]) for x in self.ground] # stack it block_a = sp.block_diag(am) block_b = sp.vstack(bm) block_c = sp.vstack(cm) block_g = sp.block_diag(gm) block_h = sp.vstack(hm) # lift it ground = mdict(block_a,block_b,block_c,block_g,block_h) lifted = lift(ground, self.sparse, self.orbits) # say it print >> self.report, "BLOCK LP LIFTING" reportToFile(self.report,ground,lifted, self.dumpBlockMatrix)
def sparse_WFE(self,SVD_threshold, gs_wfs_mag=None, spotFWHM_arcsec=None, pixelScale_arcsec=None, ron=0.0, nPhBackground=0.0, controller=None, G_ncpa=None): self.C.threshold = SVD_threshold M_wfs = sprs.block_diag(self.C.M) D = list(self.C.D) D[-1] = np.insert(D[-1],[2,7],0,axis=1) D_wfs = sprs.block_diag(D) Q = sprs.eye(M_wfs.shape[0]) - M_wfs.dot(D_wfs) L = sprs.block_diag(self.L) self.Qb2 = Q.dot(L.dot(Q.T)) if G_ncpa is not None: Delta_ncpa = M_wfs.dot(G_ncpa.dot(M_wfs.T)) self.Qb2 += Delta_ncpa n = self.Os[0].shape[0] wfe_rms = lambda x : np.sqrt(x.diagonal().sum()/n)*1e9 Osp = sprs.block_diag(self.Osp) self.Cov_wo_noise = Osp.dot(self.Qb2.dot(Osp.T)) self.noise_free_wfe = wfe_rms(self.Cov_wo_noise) """
def Jfull(self, m=None, f=None): if f is None: f = self.fields(m) nn = len(f)-1 Asubs, Adiags, Bs = list(range(nn)), list(range(nn)), list(range(nn)) for ii in range(nn): dt = self.timeSteps[ii] bc = self.getBoundaryConditions(ii, f[ii]) Asubs[ii], Adiags[ii], Bs[ii] = self.diagsJacobian( m, f[ii], f[ii+1], dt, bc ) Ad = sp.block_diag(Adiags) zRight = Utils.spzeros( (len(Asubs)-1)*Asubs[0].shape[0], Adiags[0].shape[1] ) zTop = Utils.spzeros( Adiags[0].shape[0], len(Adiags)*Adiags[0].shape[1] ) As = sp.vstack((zTop, sp.hstack((sp.block_diag(Asubs[1:]), zRight)))) A = As + Ad B = np.array(sp.vstack(Bs).todense()) Ainv = self.Solver(A, **self.solverOpts) AinvB = Ainv * B z = np.zeros((self.mesh.nC, B.shape[1])) du_dm = np.vstack((z, AinvB)) J = self.survey.deriv(f, du_dm_v=du_dm) # not multiplied by v return J
def aveE2CCV(self): "Construct the averaging operator on cell edges to cell centers." if(self.dim == 1): return self.aveEx2CC elif(self.dim == 2): return sp.block_diag((self.aveEx2CC, self.aveEy2CC), format="csr") elif(self.dim == 3): return sp.block_diag((self.aveEx2CC, self.aveEy2CC, self.aveEz2CC), format="csr")
def generate_prefix_dyn_cstr(G, T, init): """Generate equalities (47c), (47e) for prefix dynamics""" K = G.K() M = G.M() # variables: u[0], ..., u[T-1], x[1], ..., x[T] # Obtain system matrix B = G.system_matrix() # (47c) # T*K equalities A_eq1_u = sp.block_diag((B,) * T) A_eq1_x = sp.block_diag((sp.identity(K),) * T) b_eq1 = np.zeros(T * K) # (47e) # T*K equalities A_eq2_u = sp.block_diag((_id_stacked(K, M),) * T) A_eq2_x = sp.bmat([[sp.coo_matrix((K, K * (T - 1))), sp.coo_matrix((K, K))], [sp.block_diag((sp.identity(K),) * (T - 1)), sp.coo_matrix((K * (T - 1), K))] ]) b_eq2 = np.hstack([init, np.zeros((T - 1) * K)]) # Forbid non-existent modes # T * len(ban_idx) equalities ban_idx = [G.order_fcn(v) + m * K for v in G.nodes_iter() for m in range(M) if G.mode(m) not in G.node_modes(v)] A_eq3_u_part = sp.coo_matrix( (np.ones(len(ban_idx)), (range(len(ban_idx)), ban_idx)), shape=(len(ban_idx), K * M) ) A_eq3_u = sp.block_diag((A_eq3_u_part,) * T) A_eq3_x = sp.coo_matrix((T * len(ban_idx), T * K)) b_eq3 = np.zeros(T * len(ban_idx)) # Stack everything A_eq_u = sp.bmat([[A_eq1_u], [A_eq2_u], [A_eq3_u]]) A_eq_x = sp.bmat([[-A_eq1_x], [-A_eq2_x], [A_eq3_x]]) b_eq = np.hstack([b_eq1, b_eq2, b_eq3]) return A_eq_u, A_eq_x, b_eq
def _noise_eval(self, fx, theta): """ Evaluate the noise. """ fx_part = self._extract_parts(fx, self._idx_dim, self.model.num_input) theta_part = self._extract_parts(theta, self._idx_like_params, self.num_like_params) states = [c._noise_eval(f, t) for f, t in izip(fx_part, theta_part)] state = {} state['L'] = np.sum([s['L'] for s in states]) state['L_grad_f'] = np.hstack([s['L_grad_f'] for s in states]) state['L_grad_theta'] = np.hstack([s['L_grad_theta'] for s in states]) state['L_grad_2_f'] = block_diag([s['L_grad_2_f'] for s in states]) state['L_grad_2_theta'] = block_diag([s['L_grad_2_theta'] for s in states]) state['L_grad_2_theta_f'] = block_diag([s['L_grad_2_theta_f'] for s in states]) return state
def eigbh(cm,bm,return_vecs=True): ''' Get the eigenvalues and eigenvectors for matrice with block structure specified by block marker. Parameters: :cm: csr_matrix/bsr_matrix, the input matrix. :return_vecs: bool, return the eigenvectors or not. :bm: <BlockMarkerBase>, the block marker. Return: (eigenvalues,eigenvectors) if return vecs==True. (eigenvalues) if return vecs==False. ''' EL,UL=[],[] is_sparse=sps.issparse(cm) for i in xrange(bm.nblock): mi=bm.extract_block(cm,(i,i)) if return_vecs: if is_sparse: mi=mi.toarray() ei,ui=eigh(mi) EL.append(ei) UL.append(ui) else: ei=eigvalsh(mi) EL.append(ei) if return_vecs: return concatenate(EL),sps.block_diag(UL) else: return concatenate(EL)
def build_projective_lcp(self): """ Build the projective lcp """ Q = self.Q N = self.num_nodes K = self.num_bases A = self.num_actions # Block diagonal basis RepQ = [Q]*(A+1) if isinstance(Q, np.ndarray): BigQ = sp.linalg.block_diag(*RepQ) else: BigQ = sps.block_diag(RepQ) assert((N,K) == BigQ.shape) self.BigQ = BigQ # Build the U part of the matrix U = mdp.mdp_skew_assembler([Q.T]*A) assert((N,N) == U.shape) self.U = U q = self.build_q() assert((N,) == q.shape) self.proj_lcp_obj = lcp.ProjectiveLCPObj(BigQ,U,q) return self.proj_lcp_obj
def generate_regularization_matrix(self): regmat = self.reg() shape = regmat.shape shape = (shape[0]*(self.num_epochs-1), shape[1]*(self.num_epochs-1)) zero = sparse.csc_matrix(shape) L = sparse.block_diag([regmat, zero]) return L
def kron_mat(lin_op): """Returns the coefficient matrix for KRON linear op. Parameters ---------- lin_op : LinOp The conv linear op. Returns ------- list of SciPy CSC matrix The matrix representing the Kronecker product. """ constant = const_mat(lin_op.data) lh_rows, lh_cols = constant.shape rh_rows, rh_cols = lin_op.args[0].size # Stack sections for each column of the output. col_blocks = [] for j in range(lh_cols): # Vertically stack A_{ij}Identity. blocks = [] for i in range(lh_rows): blocks.append(constant[i, j]*sp.eye(rh_rows)) column = sp.vstack(blocks) # Make block diagonal matrix by repeating column. col_blocks.append( sp.block_diag(rh_cols*[column]) ) coeff = sp.vstack(col_blocks).tocsc() return [coeff]
def fit(self, X, y, sample_weight): self.queries = X[self.request_column] self.y = y self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True) self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True) self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks] self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)] self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float) for r1 in self.possible_ranks: for r2 in self.possible_ranks: if r1 < r2: if self.messup_penalty == 'square': self.rank_penalties[r1, r2] = (r2 - r1) ** 2 elif self.messup_penalty == 'linear': self.rank_penalties[r1, r2] = r2 - r1 else: raise NotImplementedError() self.penalty_matrices = [] self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y))) n_queries = numpy.bincount(normed_queries) assert len(n_queries) == len(self.possible_queries) self.penalty_matrices.append(sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries])) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)
def _adjacency_blocks(self, X): """Create adjacency block matrices from adjacency matrices Parameters ---------- X - a k list of (n x m) dense array entries Returns ------- W - a sparse diagonal (n*k) x (n*k) matrix W_datasets - a list of sparse (n x m) adjacency matrix entries """ # create adjacency matrices W_datasets = [compute_adjacency(dataset, n_neighbors=self.n_neighbors, affinity=self.affinity, weight=self.weight, sparse=self.sparse_mat, neighbors_algorithm=self.nn_algo, metric=self.metric, trees=self.trees, gamma=self.gamma) \ for dataset in X] ''' TODO: list comprehensions of list comprehensions for different values in the adjacency matrix. e.g. different weight, affinity, n_neighbors, metric, trees, gamma, algorithm for each matrix. ''' # return return block_diag(W_datasets), W_datasets
def make_precon(self, atoms): if self.H0 is not None: # only build H0 on first call return NotImplemented variable_cell = False if isinstance(atoms, Filter): variable_cell = True atoms = atoms.atoms # position DoF omega = self.phonon_frequency mass = atoms.get_masses().mean() block = np.eye(3) / (mass * omega**2) blocks = [block] * len(atoms) # cell DoF if variable_cell: coeff = 1.0 if self.apply_cell: coeff = 1.0 / (3 * self.bulk_modulus) blocks.append(np.diag([coeff] * 9)) self.H0 = sparse.block_diag(blocks, format='csr') return NotImplemented
def block_matrix(N): """Construct the block-diagonal matrix for the Dicke basis. Parameters ---------- N: int Number of two-level systems. Returns ------- block_matr: ndarray A 2D block-diagonal matrix of ones with dimension (nds,nds), where nds is the number of Dicke states for N two-level systems. """ nds = _num_dicke_states(N) # create a list with the sizes of the blocks, in order blocks_dimensions = int(N/2 + 1 - 0.5*(N % 2)) blocks_list = [(2 * (i+1 * (N % 2)) + 1*((N+1) % 2)) for i in range(blocks_dimensions)] blocks_list = np.flip(blocks_list, 0) # create a list with each block matrix as element square_blocks = [] k = 0 for i in blocks_list: square_blocks.append(np.ones((i, i))) k = k + 1 return block_diag(square_blocks)
def run_figure_3_8(): # perturbation size epsilon = 1e-8 # define base matrix B = numpy.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) n = 20 C = numpy.diag(numpy.ones(n-1), -1) C[0, -1] = 1 A = block_diag((B, 1e2*C)).todense() # get pseudospectrum (normal matrix!) from pseudopy import Normal pseudo = Normal(A) # get polynomial p = utils.NormalizedRootsPolynomial(numpy.linalg.eigvals(B)) # define deltas for evaluation deltas = numpy.logspace(numpy.log10(epsilon*1.01), 8, 400) # compute bound bound = utils.bound_perturbed_gmres(pseudo, p, epsilon, deltas) pyplot.loglog(deltas, bound) # add labels and legend pyplot.xlabel(r'$\delta$') pyplot.ylabel(r'bound') pyplot.show()
def _grad(self, values): """Gives the (sub/super)gradient of the atom w.r.t. each argument. Matrix expressions are vectorized, so the gradient is a matrix. Args: values: A list of numeric values for the arguments. Returns: A list of SciPy CSC sparse matrices or None. """ X = values[0] Y = values[1] DX_rows = self.args[0].size[0]*self.args[0].size[1] cols = self.args[0].size[0]*self.args[1].size[1] # DX = [diag(Y11), diag(Y12), ...] # [diag(Y21), diag(Y22), ...] # [ ... ... ...] DX = sp.dok_matrix((DX_rows, cols)) for k in range(self.args[0].size[0]): DX[k::self.args[0].size[0], k::self.args[0].size[0]] = Y DX = sp.csc_matrix(DX) DY = sp.block_diag([X.T for k in range(self.args[1].size[1])], 'csc') return [DX, DY]
def get_sample_data(n_sess, full_brain=False, subj=1): """ Download the data for the current session and subject Parameters ---------- n_sess: int number of session, one of {0, 1, 2, 3, 4} subj: int number of subject, one of {1, 2} """ DIR = tempfile.mkdtemp() ds = np.DataSource(DIR) BASEDIR = 'http://fa.bianp.net/projects/hrf_estimation/data' BASEDIR_COMMON = BASEDIR + '/data_common/' if full_brain: BASEDIR += '/full_brain' BASEDIR_SUBJ = BASEDIR + '/data_subj%s/' % subj event_matrix = io.mmread(ds.open( BASEDIR_COMMON + 'event_matrix.mtx')).toarray() print('Downloading BOLD signal') voxels = np.load(ds.open( BASEDIR_SUBJ + 'voxels_%s.npy' % n_sess)) # print('Downloading Scatting Stim') # scatt_stim = np.load(ds.open( # BASEDIR_SUBJ + 'scatt_stim_%s.npy' % n_sess)) em = sparse.coo_matrix(event_matrix) fir_matrix = utils.convolve_events(event_matrix, np.eye(HRF_LENGTH)) events_train = sparse.block_diag([event_matrix] * 5).toarray() conditions_train = sparse.coo_matrix(events_train).col onsets_train = sparse.coo_matrix(events_train).row return voxels, conditions_train, onsets_train
def setMs(self, nSensors=10): '''Creates an n-grid mesh across the surface for the 3D case ''' self.nSen = nSensors*nSensors '''First find the appropriate 10 indexes within the PML & illumination region ''' indx = np.round(np.linspace(self.npml+5,self.nx-self.npml-5, nSensors)).astype(int)-1; indx = np.unique(indx) # print (indx + 1) ''' make the exact X operator using strides ''' xl,zl = np.meshgrid(indx+1,indx) Mx = sparse.dok_matrix((self.nSen,(self.nx+1)*self.ny*self.nz)) for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())): pts = loc[0]*self.ny*self.nz + self.div*self.nz + loc[1] Mx[ix,pts] = 1.0 xl,zl = np.meshgrid(indx,indx) My = sparse.dok_matrix((self.nSen,self.nx*(self.ny+1)*self.nz)) for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())): pts = loc[0]*(self.ny+1)*self.nz + (self.div+1)*self.nz + loc[1] My[ix,pts] = 1.0 '''make the exact Z operator using strides ''' xl,zl = np.meshgrid(indx,indx+1) Mz = sparse.dok_matrix((self.nSen,self.nx*self.ny*(self.nz+1))) for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())): pts = loc[0]*self.ny*(self.nz+1) + self.div*(self.nz+1) + loc[1] Mz[ix,pts] = 1.0 ''' smush together in block diagonal format ''' self.Ms = sparse.block_diag((Mx,My,Mz),'csr') self.nSen = 3*self.nSen
def make_weak_corr_mat(corr_size): raw_corr_mats = [np.ones((corr_size, corr_size)) * float(i)/10 for i in range(10)] corr_mats = [np.maximum(_, np.eye(corr_size)) for _ in raw_corr_mats] corr_mat_sqrts = [np.linalg.cholesky(_) for _ in corr_mats] output_corr_mats = np.random.choice(len(corr_mat_sqrts), N_SNPS/corr_size, replace=True) output_corr_mat_sqrts = [corr_mat_sqrts[_] for _ in output_corr_mats] output_corr_mat_sqrt = block_diag(output_corr_mat_sqrts) return output_corr_mat_sqrt
def sparse_prior(sigma, omega, trial_lengths, rank): # [diagonal(G1, G2, ..., Gq)] from scipy import sparse return [ sparse.block_diag([s * ichol_gauss(l, w, rank) for s, w in zip(sigma, omega)]) for l in trial_lengths ]
def _build_sparse_mtx(): """ Create 3 topics and each have 3 words """ n_topics = 3 alpha0 = eta0 = 1.0 / n_topics block = n_topics * np.ones((3, 3)) X = sp.block_diag([block] * n_topics).tocsr() return n_topics, alpha0, eta0, X
def gen_basis_matrix(self): res = [] for nth, _ in enumerate(self.yf[0:-1]): for mth, _ in enumerate(self.xf[0:-1]): slip = self.gen_slip_mesh(mth, nth) res.append(slip.reshape([-1,1])) res1 = sparse.csr_matrix(hstack(res)) res2 = sparse.block_diag([res1]*self.num_epochs) return res2
def mitochondrial_relationship_matrix(self, ids=None): """ Returns a block diagonal matrix of mitochondrial relationships for each pedigree. See notes on Pedigree.mitochondrial_relationship_matrix """ mats = [x.mitochondrial_relationship_matrix(ids) for x in sorted(self.pedigrees, key=lambda x: x.label)] return block_diag(mats, format='bsr')
def build_chebyshev_basis(a,b,N,k): x = np.linspace(-1,1,N) B = np.polynomial.chebyshev.chebvander(x,k-3) B = np.hstack([a[:,np.newaxis], b[:,np.newaxis], B]) B = orthonorm(B) P = sps.block_diag([B]*3) assert (3*N,3*k) == P.shape return P
def additive_relationship_matrix(self, ids=None): """ Returns a block diagonal matrix of additive relationships for each pedigree. See notes on Pedigree.additive_relationship_matrix """ mats = [x.additive_relationship_matrix(ids) for x in sorted(self.pedigrees, key=lambda x: x.label)] mats = [x for x in mats if x.size > 0] return block_diag(mats, format='bsr')
def invrecovar(reData, sampleCov): numre = len(reData) temp = [] for i in range(numre): nop = len(np.unique(reData[i][1])) temp.append(sp.kron(sp.identity(nop),np.linalg.inv(sampleCov[i]))); result = temp[0] if numre>1: for i in np.arange(1,numre): result = block_diag((result, temp[i])) return (result)
def fget(self): if(self._cellGradBC is None): BC = self.setCellGradBC(self._cellGradBC_list) n = self.vnC if(self.dim == 1): G = ddxCellGradBC(n[0], BC[0]) elif(self.dim == 2): G1 = sp.kron(speye(n[1]), ddxCellGradBC(n[0], BC[0])) G2 = sp.kron(ddxCellGradBC(n[1], BC[1]), speye(n[0])) G = sp.block_diag((G1, G2), format="csr") elif(self.dim == 3): G1 = kron3(speye(n[2]), speye(n[1]), ddxCellGradBC(n[0], BC[0])) G2 = kron3(speye(n[2]), ddxCellGradBC(n[1], BC[1]), speye(n[0])) G3 = kron3(ddxCellGradBC(n[2], BC[2]), speye(n[1]), speye(n[0])) G = sp.block_diag((G1, G2, G3), format="csr") # Compute areas of cell faces & volumes S = self.area V = self.aveCC2F*self.vol # Average volume between adjacent cells self._cellGradBC = sdiag(S/V)*G return self._cellGradBC
def setMd(self, xrng, yrng, zrng): '''Tell me the xrange,yrange, and zrange and Ill 1) specify nRx,nRy, and nRz 2) produce a matrix that achieves a 1:1 sampling, self.Md ''' '''set the right dimensions''' self.nRx = xrng[1]-xrng[0] self.nRy = yrng[1]-yrng[0] self.nRz = zrng[1]-zrng[0] nR = self.nRx*self.nRy*self.nRz ''' ok have to use spans: loc = i*J*K + j*K + k for row-major ordering ''' ''' populate the locations in the X grid''' #sX = sparse.dok_matrix((self.nx+1,self.ny,self.nz),dtype='bool') #sX[xrng[0]+1:xrng[1]+1,yrng[0]:yrng[1],zrng[0]:zrng[1]] = True ''' make it an operator ''' ''' nested for should give reshape-able vectors ''' cnt = 0 Mx = sparse.dok_matrix((nR,(self.nx+1)*self.ny*self.nz)) for x in xrange(xrng[0]+1,xrng[1]+1): for y in xrange(yrng[0],yrng[1]): for z in xrange(zrng[0],zrng[1]): pts = x*self.ny*self.nz + y*self.nz + z Mx[cnt,pts] = 1.0 cnt += 1 '''populate the locations in the Y grid''' My = sparse.dok_matrix((nR,self.nx*(self.ny+1)*self.nz)) cnt = 0 for x in xrange(xrng[0],xrng[1]): for y in xrange(yrng[0]+1,yrng[1]+1): for z in xrange(zrng[0],zrng[1]): pts = x*(self.ny+1)*self.nz + y*self.nz + z My[cnt,pts] = 1.0 cnt += 1 '''populate the locations in the Z grid''' Mz = sparse.dok_matrix((nR,self.nx*self.ny*(self.nz+1))) cnt = 0 for x in xrange(xrng[0],xrng[1]): for y in xrange(yrng[0],yrng[1]): for z in xrange(zrng[0]+1,zrng[1]+1): pts = x*(self.ny)*(self.nz+1) + y*(self.nz+1) + z Mz[cnt,pts] = 1.0 cnt += 1 ''' put them all together in a block matrix ''' self.Md = spt.vCat([Mx.T,My.T,Mz.T]).T print 'Md shape ' + repr(self.Md.shape) self.x2u = sparse.block_diag((Mx,My,Mz), 'csc').T print 'x2u shape ' + repr(self.x2u.shape)
def test_multithread(self): data = [] n_rep = 20 for i in range(n_rep): m = 1000 n = 500 Ad = sparse.random(m, n, density=0.3, format='csc') b = np.random.randn(m) # OSQP data P = sparse.block_diag( [sparse.csc_matrix((n, n)), sparse.eye(m)], format='csc') q = np.zeros(n+m) A = sparse.vstack([ sparse.hstack([Ad, -sparse.eye(m)]), sparse.hstack([sparse.eye(n), sparse.csc_matrix((n, m))])], format='csc') l = np.hstack([b, np.zeros(n)]) u = np.hstack([b, np.ones(n)]) data.append((P, q, A, l, u)) def f(i): P, q, A, l, u = data[i] m = osqp.OSQP() m.setup(P, q, A, l, u, verbose=False) m.solve() pool = ThreadPool(2) tic = time.time() for i in range(n_rep): f(i) t_serial = time.time() - tic tic = time.time() pool.map(f, range(n_rep)) t_parallel = time.time() - tic self.assertLess(t_parallel, t_serial)
def cvx_solve_u(A, B, x_cell): T = len(x_cell) ni, nj = x_cell[0].shape n = ni * nj Xvec = np.empty((0, n)) for id in range(T): xt = np.transpose(x_cell[id]) Xvec = np.append(Xvec, [xt.flatten()], axis=0) BigA = copy.deepcopy(A) BigB = copy.deepcopy(B) for id in range(T - 2): BigA = block_diag((BigA, A)) BigB = vstack((B, B)) # u = cvxpy.Variable(n) # uu = u # for i in range(T-2): # uu = cvxpy.vstack(uu,u) Xvec = Xvec.flatten() coef = Xvec[n:n * T] - BigA * Xvec[0:n * (T - 1)] def func(params, xdata, ydata): return (ydata - np.dot(xdata, params)) u = np.zeros(n) u = optimization.leastsq(func, u, args=(BigB.toarray(), coef)) # fn = cvxpy.norm(coef - BigB.toarray() * uu) # eta = 2.22*1e-16 # soltol = eta**(3/8) # stdtol = eta**(1/4) # redtol = eta**(1/4) # objective = cvxpy.Minimize(fn) # prob = cvxpy.Problem(objective) # # TODO: Change solver and tolerance levels # # prob.solve(solver=cvxpy.ECOS, verbose=False, abstol=1e-3, reltol=1e-3, feastol=1e-3) # prob.solve(solver=cvxpy.ECOS_BB, verbose=True, abstol=soltol, reltol=stdtol, feastol=soltol) # print prob.value return u[0]
def gen_qp_matrices(k, n, gammas, dens_lvl=0.5): """ Generate QP matrices for portfolio optimization problem """ # Generate data F = spa.random(n, k, density=dens_lvl, format='csc') D = spa.diags(np.random.rand(n) * np.sqrt(k), format='csc') mu = np.random.randn(n) # Construct the problem # minimize x' D x + y' I y - (1/gamma) * mu' x # subject to 1' x = 1 # F' x = y # 0 <= x <= 1 P = spa.block_diag((2 * D, 2 * spa.eye(k)), format='csc') A = spa.vstack([ spa.hstack([spa.csc_matrix(np.ones((1, n))), spa.csc_matrix((1, k))]), spa.hstack([F.T, -spa.eye(k)]) ]).tocsc() l = np.hstack([1., np.zeros(k)]) # Linear constraints u = np.hstack([1., np.zeros(k)]) lx = np.zeros(n) # Bounds ux = np.ones(n) # Create linear cost vectors q = np.empty((k + n, 0)) for gamma in gammas: q = np.column_stack((q, np.append(-mu / gamma, np.zeros(k)))) qp_matrices = utils.QPmatrices(P, q, A, l, u, lx, ux) # Add further matrices for CVXPY modeling qp_matrices.F = F qp_matrices.D = D qp_matrices.mu = mu qp_matrices.gammas = gammas # Return QP matrices return qp_matrices
def spline_dIm_dphi(img, phi): rows, columns = img.shape lx = np.linspace(0, columns - 1, columns) ly = np.linspace(0, rows - 1, rows) phi_x = phi[:, 0] phi_y = phi[:, 1] spline = interpolate.RectBivariateSpline( ly, lx, # coords once only img.astype(np.float)) x_grad = spline.ev(phi_y, phi_x, dx=1, dy=0) y_grad = spline.ev(phi_y, phi_x, dx=0, dy=1) all_grads = [x_grad, y_grad] gradient_array = np.dstack( [dim_arr.flatten(order='F') for dim_arr in all_grads[::-1]])[0] block_diag = sparse.block_diag(gradient_array) return block_diag
def solveLaplacianMesh(self, anchors, anchorsIdx, cotangent=True): n = self.mesh.VPos.shape[0] # N x 3 k = anchorsIdx.shape[0] if cotangent: self.getLaplacianMatrixCotangent(anchorsIdx) # get LaplacianMatrix cotangent=True else: self.getLaplacianMatrixUmbrella(anchorsIdx) delta = np.array(self.L.dot(self.mesh.VPos)) # numpy data (5853, 3) print(type(delta)) print("delta shape is {}".format(delta.shape)) print("n is {}".format(n)) # augment delta solution matrix with weighted anchors for i in range(k): delta[n + i, :] = self.WEIGHT[i] * anchors[i, :] # update mesh vertices with least-squares solution # save_pickle_file("L.pkl", self.L) # save_pickle_file("delta.pkl", delta) l = block_diag((self.L, self.L, self.L)) d = np.hstack((delta[:, 0], delta[:, 1], delta[:, 2])) ans = lsqr(l, d) self.mesh.VPos = ans[0].reshape(3, -1).T
def fit(self, X, y, sample_weight): self.queries = X[self.request_column] self.y = y self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True) self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True) self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks] self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)] self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float) for r1 in self.possible_ranks: for r2 in self.possible_ranks: if r1 < r2: self.rank_penalties[r1, r2] = (r2 - r1) ** self.penalty_power self.penalty_matrices = [] self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y))) n_queries = numpy.bincount(normed_queries) assert len(n_queries) == len(self.possible_queries) self.penalty_matrices.append( sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries])) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)
def __init__(self, item_model, rating_matrix, alpha=0.005): inter_item = item_model / norm(item_model, np.inf) # stochasticity adjustment adjustment = 1 - inter_item.sum(axis=1).A.squeeze() inter_item += diags(adjustment, shape=item_model.shape, format='csr') # M matrix transition = block_diag( (eye(rating_matrix.shape[0], format='csr'), inter_item), format='csr', dtype='float64', ) # H matrix walk_model = bmat( [[None, rating_matrix], [rating_matrix.T, None]], format='csr', dtype='float64', ) k = np.reciprocal(walk_model.sum(axis=1).A.squeeze()) walk_model = diags(k, format='csr').dot(walk_model) self._p = alpha * walk_model + (1 - alpha) * transition
def change_of_basis(self) -> sparse.coo_matrix: r""" The change of basis matrix which decomposes the field types representation into irreps, given as a sparse (block diagonal) matrix (:class:`scipy.sparse.coo_matrix`). It is the direct sum of the change of basis matrices of each representation in :attr:`e2cnn.nn.FieldType.representations`. .. seealso :: :attr:`e2cnn.group.Representation.change_of_basis` Returns: the change of basis """ change_of_basis = [] for repr in self.representations: change_of_basis.append(repr.change_of_basis) return sparse.block_diag(change_of_basis)
def matrices_A_B_sparse(C, theta, x, f): """ C : ensemble des coordonnées des K caméras theta : ensemble des angles de rotations associés aux caméras x : ensemble des coordonnées des images sur les K caméras des N points (array de taille (N, K, 2)) Calcule les matrices A et B """ K, N = C.shape[0], x.shape[0] A = [] diag_B = [] liste_couples = genere_liste_couples(K) for j in range(N): Aj, Bj=matrices_Aj_Bj(C, theta, x, j, f, liste_couples) if j==0: A=Aj else: A=np.concatenate((A, Aj), axis=0) diag_B.append(Bj) B=ss.block_diag(diag_B) return coo_matrix(A), B
def build_diag_matrix(fields, func, func_args, restriction=None): if restriction is None: restrict = [None] * len(fields) else: try: n = len(restriction[0]) if len(restriction) == len(fields): restrict = restriction else: raise RuntimeError( 'Restriction size does not match number of fields') except TypeError: restrict = [restriction] * len(fields) tmp = [] for j, field_row in enumerate(fields): args = func_args + (field_row, ) tmp.append(func(*args, restriction=restrict[j])) return spsp.block_diag(tmp, format='coo')
def _test_disjoint_mode(layer, sparse=False, **kwargs): A = sp.block_diag( [np.ones((N1, N1)), np.ones((N2, N2)), np.ones((N3, N3))] ).todense() X = np.random.normal(size=(N, F)) I = np.array([0] * N1 + [1] * N2 + [2] * N3).astype(int) A_in = Input(shape=(None,), sparse=sparse) X_in = Input(shape=(F,)) I_in = Input(shape=(), dtype=tf.int32) inputs = [X_in, A_in, I_in] if sparse: input_data = [X, sp_matrix_to_sp_tensor(A), I] else: input_data = [X, A, I] layer_instance = layer(**kwargs) output = layer_instance(inputs) model = Model(inputs, output) output = model(input_data) X_pool, A_pool, I_pool, mask = output N_pool_expected = ( np.ceil(kwargs["ratio"] * N1) + np.ceil(kwargs["ratio"] * N2) + np.ceil(kwargs["ratio"] * N3) ) N_pool_expected = int(N_pool_expected) N_pool_true = A_pool.shape[0] _check_number_of_nodes(N_pool_expected, N_pool_true) assert X_pool.shape == (N_pool_expected, F) assert A_pool.shape == (N_pool_expected, N_pool_expected) assert I_pool.shape == (N_pool_expected,) output_shape = [o.shape for o in output] _check_output_and_model_output_shapes(output_shape, model.output_shape)
def mul_by_const(constant, rh_coeffs, size): """Multiplies a constant by a list of coefficients. Parameters ---------- constant : numeric type The constant to multiply by. rh_coeffs : list The coefficients of the right hand side. size : tuple (product rows, product columns) Returns ------- list A list of (id, size, coefficient) tuples. """ new_coeffs = [] rep_mat = sp.block_diag(size[1] * [constant]).tocsc() # Multiply all left-hand constants by right-hand terms. for (id_, rh_size, coeff) in rh_coeffs: # For scalar left hand constants, # if right hand term is constant, # or single column, just multiply. # Keeps scalars and dense constants as original type. if intf.is_scalar(constant) or \ id_ is lo.CONSTANT_ID or size[1] == 1: product = constant * coeff # For promoted variables with matrix coefficients, # flatten the matrix. elif size != (1, 1) and intf.is_scalar(coeff): flattened_const = flatten(constant) product = flattened_const * coeff # Otherwise replicate the matrix. else: product = rep_mat * coeff new_coeffs.append((id_, rh_size, product)) rh_coeffs = new_coeffs return new_coeffs
def _build_uncoupled_matrices(self, problem, names, cacheid=None): matrices = {name: [] for name in (names + ['select'])} zbasis = self.domain.bases[-1] dtype = zbasis.coeff_dtype for last_index in range(zbasis.coeff_size): submatrices = self._build_uncoupled_submatrices(problem, names, last_index, cacheid=cacheid) for name in matrices: matrices[name].append(submatrices[name]) for name in matrices: blocks = matrices[name] matrix = sparse.block_diag(blocks, format='csr', dtype=dtype) matrix.eliminate_zeros() matrices[name] = matrix # Store minimal CSR matrices for fast dot products for name in names: matrix = matrices[name] matrix.eliminate_zeros() setattr(self, name, matrix.tocsr()) # Store expanded CSR matrices for fast combination self.LHS = zeros_with_pattern(*[matrices[name] for name in names]).tocsr() for name in names: matrix = matrices[name] matrix = expand_pattern(matrix, self.LHS) setattr(self, name + '_exp', matrix.tocsr()) # Store operators for RHS self.G_eq = matrices['select'] self.G_bc = None # no Dirichlet self.dirichlet = None
def _build_appearance_model_block_diagonal(all_patches_array, n_points, patch_shape, n_channels, n_appearance_parameters, level_str, verbose): # build appearance model if verbose: print_dynamic('{}Training appearance distribution per ' 'patch'.format(level_str)) # compute mean appearance vector app_mean = np.mean(all_patches_array, axis=-1) # number of images n_images = all_patches_array.shape[-1] # compute covariance matrix for each patch all_cov = [] for e in range(n_points): # print progress if verbose: print_dynamic('{}Training appearance distribution ' 'per patch - {}'.format( level_str, progress_bar_str(float(e + 1) / n_points, show_bar=False))) # select patches and vectorize patches_vector = all_patches_array[e, ...].reshape(-1, n_images) # compute covariance cov_mat = np.cov(patches_vector) # compute covariance inverse inv_cov_mat = _covariance_matrix_inverse(cov_mat, n_appearance_parameters) # store covariance all_cov.append(inv_cov_mat) # create final sparse covariance matrix return app_mean, block_diag(all_cov).tocsr()
def compute(self, X1, X2): if np.array_equal(X1, X2): print('Running efficient version.') # We can do something cleverer. input_dim = self.input_dims[0] ind_sorted = np.argsort(X1[:, input_dim]) # FIXME: Quietly assuming that the input dim is all zeros, then all # 1s, and so on. This needs to be asserted somehow! sub_kernels = list() # Now, we can just compute the kernels individually and then put # them together to be block diagonal. for i, cur_kernel in enumerate(self.kernels): relevant = X1[:, input_dim] == i if np.sum(relevant) == 0: # Nothing to do continue relevant_rows = X1[relevant, :] # Otherwise, compute the kernel for these rows computed_kernel = cur_kernel.compute( relevant_rows, relevant_rows) sub_kernels.append(computed_kernel) return sps.block_diag(sub_kernels, format='csc') else: print('Running agnostic version.') return self.compute_agnostic(X1, X2)
def parse(self, gb: pp.GridBucket) -> sps.spmatrix: """Convert the Ad expression into a divergence operators on all relevant grids, represented as a sparse block matrix. Pameteres: gb (pp.GridBucket): Not used, but needed for compatibility with the general parsing method for Operators. Returns: sps.spmatrix: Block matrix representation of a divergence operator on multiple grids. """ if self.dim == 1: mat = [pp.fvutils.scalar_divergence(g) for g in self._g] else: mat = [ sps.kron(pp.fvutils.scalar_divergence(g), sps.eye(self.dim)) for g in self._g ] matrix = sps.block_diag(mat) return matrix
def test_serial_solver(self, precond_type=None): print('Testing Serial FETI solver ..........\n\n') solver_obj = SerialFETIsolver(self.K_dict, self.B_dict, self.f_dict, precond_type=precond_type, tolerance=1E-11) sol_obj = solver_obj.solve() self.postproc(sol_obj) K_dual = sparse.block_diag((self.K_dict[1], self.K_dict[2])) f_dual = np.concatenate((self.f_dict[1].data, self.f_dict[2].data)) u_dual = sol_obj.displacement np.testing.assert_almost_equal(self.f_global.data, self.L @ f_dual, decimal=10) u_primal = self.dual2primal(K_dual, u_dual, f_dual, self.L, self.Lexp) np.testing.assert_almost_equal(self.u_global, u_primal, decimal=10) print('end Serial FETI solver ..........\n\n') return u_dual, sol_obj
def __call__(self, A: sparse.spmatrix, b: Union[sparse.spmatrix, np.ndarray], x0: Optional[Union[np.ndarray]] = None) -> np.ndarray: assert A.shape[0] == b.shape[0] x0T = None if x0 is not None: x0T = x0.T.flatten() assert A.shape[1] * b.shape[1] == len(x0T) # Build diagonal block A = A.tocsc() BlockA = sparse.block_diag([A for d in range(b.shape[1])], format="csr") BlockB = b.T.flatten() assert BlockA.shape[0] == BlockB.shape[0] # lsqr_result = sparse.linalg.lsqr(BlockA, BlockB, x0=x0T, **self.kwargs) # return lsqr_result[0].reshape((3, -1)).T return call_solver(self.solver, BlockA, BlockB, x0T, self.kwargs).reshape((3, -1)).T
def getB(A, params): """ Returns the matrix used for computing the value of the RHS of the ODE. Input: A is the adjacency matrix params is a tuple containing the parameters for the ODE. Same order as in modelN """ a, theta0, theta1, g, k, tau = params n = A.shape[0] doublen = 2 * n doublepi = 2 * np.pi q = A.sum(axis=0).A[0] F = sp.lil_matrix((n, n)) #generate F for j in range( n ): #only called once and not too resource intensive, so little gains from optimising this sumqA = 0.0 for l in range(n): sumqA += q[l] * A[l, j] scaling = 0 if sumqA == 0 else tau / sumqA for i in range(n): F[i, j] = scaling * q[i] * A[i, j] del A #no longer needed B = sp.block_diag((F, F, F), format="lil") del F del q #The tau term is given by sum F_ji = tau B.setdiag( np.concatenate((np.zeros(n) - tau - g - k, np.zeros(n) - tau - k - a, np.zeros(n) - tau - k), axis=None)) B.setdiag([a] * n, n) return B.tocsr() #csr is optimised for matrix-vector multiplication
def load_test_data(data_path, output_path=None, loadflag=0): """Loads molecular data required to test a neural network.""" features = [] #features of each node A=[] #list of graph adjacency matrices; each entry is the adjacency matrix for one molecule sizes = [] #list of sizes of molecules; each entry is the size of a molecule num_molecules = 0 target = [] #list of "y's" - each entry is an "answer" for a molecule if (loadflag): return load_pickled(input_path,'adj','features','y','molecule_partitions','num_molecules') # Info for standardizing data elements = np.array([1,6,7,8,9]) elements_mean = np.mean(elements) elements_stdev = np.std(elements) elements_all = [elements, elements_mean, elements_stdev] for file in os.listdir(data_path): features, target, A, sizes, num_molecules = add_sample(data_path+file,features,target,A,sizes,num_molecules,elements_all) print("Total molecules",num_molecules) molecule_partitions=np.cumsum(sizes) #to get partition positions adj = sp.csr_matrix(sp.block_diag(A)) labels = np.array(target) features = sp.coo_matrix(np.array(features)).tolil() if output_path is not None: pickle_file(output_path,('adj', adj), ('features', features), ('y', labels),('molecule_partitions',molecule_partitions),('num_molecules',num_molecules)) print("finished writing to file") data = {'adj': adj, 'features': features, 'y': labels, 'molcule_partitions': molecule_partitions, 'num_molecules': num_molecules} return data
def deriv(self, U, simulation, du_dm_v=None, v=None, adjoint=False): # The water retention curve model should have been updated in the prob P = self.getP(simulation.mesh, simulation.time_mesh) dT_du = sp.block_diag([simulation.water_retention.derivU(ui) for ui in U]) if simulation.water_retention.needs_model: dT_dm = sp.vstack([simulation.water_retention.derivM(ui) for ui in U]) else: dT_dm = Zero() if v is None and not adjoint: # this is called by the fullJ in the problem return P * (dT_du * du_dm_v) + P * dT_dm if not adjoint: return P * (dT_du * du_dm_v) + P * (dT_dm * v) # for the adjoint return both parts of the sum separately if v is None: raise Exception("v must be provided if computing adjoint") PTv = P.T * v return dT_du.T * PTv, dT_dm.T * PTv
def __init__(self, x0, v0, theta0, thetadot0): self.N = 50 self.NVars = 5 T = 2.0 dt = T / self.N self.dtinv = 1. / dt #Px = sparse.eye(N) #sparse.csc_matrix((N, N)) # The three deifferent weigthing matrices for x, v, and external force reg = sparse.eye(self.N) * 0.05 z = sparse.bsr_matrix((self.N, self.N)) # sparse.diags(np.arange(N)/N) P = sparse.block_diag([reg, reg, sparse.eye(self.N), reg, reg]) #1*reg,1*reg]) #P[N,N]=10 THETA = 2 q = np.zeros((self.NVars, self.N)) q[THETA, :] = np.pi q[0, :] = 0.5 #q[N,0] = -2 * 0.5 * 10 q = q.flatten() q = -P @ q #u = np.arr self.x = np.random.randn(self.N, self.NVars).flatten() #x = np.zeros((N,NVars)).flatten() #v = np.zeros(N) #f = np.zeros(N) #print(f(ad.seed(x)).dvalue) A, l, u = self.getAlu(self.x, x0, v0, theta0, thetadot0) self.m = osqp.OSQP() self.m.setup(P=P, q=q, A=A, l=l, u=u, time_limit=0.1 ) # **settings # warm_start=False, eps_prim_inf=1e-1 self.results = self.m.solve() print(self.results.x) for i in range(100): self.update(x0, v0, theta0, thetadot0)
def build_phase_1(self): """ Build the Linear equation that need to be optimized: left_mat @ src_vts_with_nm => right_mat "@" means the dot product(inner product) of two sparse matrix By using LU factorization to solve the linear equation, we could get the optimal deformed "src_vts_with_nm" If you can't understand why I construct the linear equation in this way. Try to learn something about: Moore-Penrose Pseudo Inverse and Lagrange Multiplier """ s_left, s_right, s_weight = self.build_smooth_optimization_term() i_left, i_right, i_weight = self.build_identity_optimization_term() cons_left, cons_right, cons_weight = self.build_constraint_optimization_term( ) tmp_left_mat = sps.vstack([s_left, i_left, cons_left], format='csc') tmp_right_mat = sps.vstack([s_right, i_right, cons_right], format='csc') tmp_weight_mat = sps.block_diag([s_weight, i_weight, cons_weight], format='csc') self.solver_left_mat = ((tmp_left_mat.transpose()).dot(tmp_weight_mat) ).dot(tmp_left_mat).tocsc() self.solver_right_mat = ((tmp_left_mat.transpose()).dot(tmp_weight_mat) ).dot(tmp_right_mat).tocsc() return
def sparse_factor_solve_kkt(Q_tilde, D_tilde, A_, C_tilde, rx, rs, rz, ry, ns): nineq, nz, neq, nBatch = ns # H_ = csc_matrix((nz + nineq, nz + nineq)) # H_[:nz, :nz] = Q_tilde # H_[-nineq:, -nineq:] = D_tilde H_ = block_diag([Q_tilde, D_tilde], format='csc') if neq > 0: g_ = torch.cat([rx, rs], 1).squeeze(0).numpy() h_ = torch.cat([rz, ry], 1).squeeze(0).numpy() else: g_ = torch.cat([rx, rs], 1).squeeze(0).numpy() h_ = rz.squeeze(0).numpy() H_LU = splu(H_) invH_A_ = csc_matrix(H_LU.solve(A_.todense().transpose())) invH_g_ = H_LU.solve(g_) S_ = A_.dot(invH_A_) + C_tilde S_LU = splu(S_) # t_ = invH_g_[np.newaxis].dot(A_.transpose()).squeeze(0) - h_ t_ = A_.dot(invH_g_) - h_ w_ = -S_LU.solve(t_) # t_ = -g_ - w_[np.newaxis].dot(A_).squeeze(0) t_ = -g_ - A_.transpose().dot(w_) v_ = H_LU.solve(t_) dx = v_[:nz] ds = v_[nz:] dz = w_[:nineq] dy = w_[nineq:] if neq > 0 else None dx = torch.DoubleTensor(dx).unsqueeze(0) ds = torch.DoubleTensor(ds).unsqueeze(0) dz = torch.DoubleTensor(dz).unsqueeze(0) dy = torch.DoubleTensor(dy).unsqueeze(0) if neq > 0 else None return dx, ds, dz, dy
def assemble_dkg_du_SIMP(dkg_du, dkg_rows, dkg_cols, element_map, densities): """ Build global Matrix dkg/du for each dof, applying the SIMP densities Kg(x,u) = x * Kg0 dK(x,u)/du = x * dKg/du """ ndof = len(dkg_du) rows = dkg_rows cols = dkg_cols #dofs = np.arange(ndof) dkg_list = np.array([]) # print(dkg_du[0].shape)#dkg_du = csr_matrix((int(ndof**2), ndof)) for ui in np.arange(ndof): # Apply densities fro SIMP #length = len(dkg) #print(length) #print('dkg_du_type') #print('ubiannoancnanwpnawnmdcpádc') #print(dui.shape) # print(dui.shape) #print(type(dui[2])) vals = apply_densities(dkg_du[ui], len(dkg_du[ui]), element_map[ui], densities) kg = coo_matrix((vals, (rows[ui], cols[ui])), shape=(ndof, ndof)) dkg_list = np.append(dkg_list, kg) #dkg_list.append(kg) # kgline = kg.reshape((int(ndof ** 2), 1)) # dkg_du = hstack(dkg_du, kgline) #t0 = time.process_time() dkg_du_block_d = block_diag(dkg_list, format='csc') #print('Build block(%f s)' % (time.process_time()-t0)) return dkg_du_block_d
def flux_upwind(q, grid: StaggeredGrid): """ This function computes the upwind flux matrix from the flux vector. @param q: Nf by 1 flux vector from the flow problem @param grid: structure containing all pertinent information about the grid @return: Nf by N matrix containing the upwinded fluxes """ if grid.is_problem_1d(): idx = 0 if grid.n_cell_dofs[0] == grid.n_cell_dofs_total else 1 qn = np.minimum(q[:grid.n_cell_dofs[idx]], 0) qp = np.maximum(q[1:], 0) return diags(diagonals=[qp, qn], offsets=[-1, 0], shape=(grid.n_cell_dofs[idx] + 1, grid.n_cell_dofs[idx])) elif grid.is_problem_2d(): qn_x = np.minimum(q[:grid.n_flux_dofs[0] - grid.n_cell_dofs[1]], 0) qp_x = np.maximum(q[grid.n_cell_dofs[1]:grid.n_flux_dofs[0]], 0) offsets = [-grid.n_cell_dofs[1], 0] A_x = diags(diagonals=[qp_x, qn_x], offsets=offsets, shape=(grid.n_flux_dofs[0], grid.n_flux_dofs[0] - grid.n_cell_dofs[1])) A_y = [] for i in range(grid.n_cell_dofs[0]): step = grid.n_flux_dofs[0] + i * (grid.n_cell_dofs[1] + 1) qn_y = np.minimum(q[step:step + grid.n_cell_dofs[1]], 0) qp_y = np.maximum(q[step + 1:step + grid.n_cell_dofs[1] + 1], 0) A_y.append( diags(diagonals=[qp_y, qn_y], offsets=[-1, 0], shape=(grid.n_cell_dofs[1] + 1, grid.n_cell_dofs[1]))) A_y = block_diag(mats=tuple(A_y), format='csr') return vstack([A_x, A_y], format='csr') else: raise ValueError("3d flux is not implemented.")
def load_spectra(self): self.phase, self.X = self.load_phase_wavelength(self.sn_name[0]) self.Y_cardelli_corrected_cosmo_corrected = np.zeros( (len(self.sn_name), len(self.X))) self.Y_cosmo_corrected = np.zeros((len(self.sn_name), len(self.X))) self.CovY = [] for i, sn in enumerate(self.sn_name): print sn self.Y_cosmo_corrected[ i], self.Y_cardelli_corrected_cosmo_corrected[i] = ( self.load_spectra_GP(sn)) Cov = self.load_cov_matrix(sn) COV = [] for i in range(self.number_bin_wavelength): COV.append( coo_matrix(Cov[i * self.number_bin_phase:(i + 1) * self.number_bin_phase, i * self.number_bin_phase:(i + 1) * self.number_bin_phase])) self.CovY.append(block_diag(COV))
def is_max(self, image, alpha=10): """ This is soft version of 'is_max' rule. Strong version returns 1 it it is max among neighbors, 0 otherwise. The greater alpha, the closer we are to 'strong' version :param image: Hough image(s) from transformation(s) :param alpha: Weight of exponential reweighting """ # Exponentially reweight the data exponents = np.exp(alpha * image) # Check the number of maxima we expect to get back # Note, we expect two maxima for each tested hough transform # One for even, one for odd. # Testing multiple transforms scales as 2*n_transforms n_parts = image.shape[1] // self.track_neighs.shape[1] assert n_parts * self.track_neighs.shape[1] == image.shape[1] # Block diagnol matrix, with each block being one copy of the # neighs_matrix full_neigh = block_diag([self.track_neighs]*n_parts, format='csr') # Return the value at the point # normalized the sum of its values and its neighbouring values return exponents / full_neigh.dot(exponents.T).T
def __init__(self, limit=20, L=256, number=10, filename='schrodinger_data.h5', potential_generator=V_SHO): self.filename = filename self.limit = limit self.L = L self.number = number self.potential_generator = potential_generator x = np.linspace(-self.limit, self.limit, self.L) y = np.linspace(-self.limit, self.limit, self.L) #grid spacing self.dx = x[1]-x[0] self.dy = y[1]-y[0] self.mesh = np.meshgrid(x, y) block = sp.diags([-1,4,-1], [-1,0,1],(L,L)) #main tri-diagonal dia = sp.block_diag((block,)*L) #repeat it num times to create the main block-diagonal sup = sp.diags([-1],[L],(L**2,L**2)) # super-diagonal fringe sub = sp.diags([-1],[-L],(L**2,L**2)) #sub-diagonal fringe self.T = (dia + sup + sub) / (2*self.dx*self.dy)
def H_SOTI(size, p, q, t=-1, M=2.3, D1=0.8, D2=0.5): """ SOTI Hamiltonian Fourier transformed in z and real in x,y """ # put blocks in diagonal - 1 for every zu blocks = np.zeros((size, 4 * size**2, 4 * size**2), dtype=complex) zus = np.linspace(-np.pi, np.pi, size, endpoint=True) for i in range(size): zu = zus[i] blocks[i, :, :] = soti_block(size=size, p=p, q=q, zu=zu, t=t, M=M, D1=D1, D2=D2) # use sparse.block_diag instead of scipy.ditto because it takes in an array H = ss.block_diag(blocks).toarray() # <- still needs testing return H