def LSS_KKT(R, D): R, D = array(R), array(D) assert R.ndim == 3 assert R.shape[1] == R.shape[2] N, m = R.shape[:2] bigR = sparse.bsr_matrix((R, r_[:N], r_[:N+1]), \ shape=(N*m, (N+1)*m)) I = array([eye(m)] * N) bigI = sparse.bsr_matrix((I, r_[1:N+1], r_[:N+1]), \ shape=(N*m, (N+1)*m)) bigL = bigI - bigR assert D.shape == (N+1, m, m) bigD = sparse.bsr_matrix((D, r_[:N+1], r_[:N+2]), \ shape=((N+1)*m, (N+1)*m)) O = zeros([N, m, m]) bigO = sparse.bsr_matrix((O, r_[:N], r_[:N+1]), \ shape=(N*m, N*m)) return sparse.vstack([sparse.hstack([bigD, bigL.T]), sparse.hstack([bigL, bigO])])
def collectB(smallB, dim, N,COMM): B = sparse.bsr_matrix(smallB,blocksize=(dim,dim)) if head(COMM): send_data = B.data.ravel() recv_data = np.empty((dim*dim*2*N)) elif tail(COMM): send_data = np.zeros((dim*dim*2*N/COMM.Get_size())) send_data[:-2*dim*dim] = B.data.ravel()[2*dim*dim:] recv_data = None else: send_data = B.data.ravel()[2*dim*dim:] recv_data = None COMM.Barrier() COMM.Gather(send_data,recv_data,root=0) if head(COMM): data = recv_data.ravel()[:-dim*dim*2] data = data.reshape((2*N-2,dim,dim)) temp = np.arange(N).repeat(2).ravel() indices = temp[1:-1] indptr = 2*np.arange(N) bigB = sparse.bsr_matrix((data,indices, indptr),blocksize=(dim,dim)) return bigB else: return None
def _get_PECM(self): PECM = bsr_matrix(((self.nelx+1)*(self.nely+1),(self.nelx+1)*(self.nely+1))) for pipe in self.pipes: C = self.shepard(pipe.xi, pipe.yi, pipe.xj, pipe.yj) pecm = C.T*bsr_matrix(pipe.c_m)*C PECM += pecm return pecm
def _check_bsr_matmat(self, m): n = self.n # _bsr_matmat m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2)) m.dot(m2) # shouldn't SIGSEGV # _bsr_matmat m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0])) m2.dot(m) # shouldn't SIGSEGV
def test_shape_compatibility(self): use_solver(useUmfpack=True) A = csc_matrix([[1., 0], [0, 2]]) bs = [ [1, 6], array([1, 6]), [[1], [6]], array([[1], [6]]), csc_matrix([[1], [6]]), csr_matrix([[1], [6]]), dok_matrix([[1], [6]]), bsr_matrix([[1], [6]]), array([[1., 2., 3.], [6., 8., 10.]]), csc_matrix([[1., 2., 3.], [6., 8., 10.]]), csr_matrix([[1., 2., 3.], [6., 8., 10.]]), dok_matrix([[1., 2., 3.], [6., 8., 10.]]), bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), ] for b in bs: x = np.linalg.solve(A.toarray(), toarray(b)) for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: x1 = spsolve(spmattype(A), b, use_umfpack=True) x2 = spsolve(spmattype(A), b, use_umfpack=False) # check solution if x.ndim == 2 and x.shape[1] == 1: # interprets also these as "vectors" x = x.ravel() assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1))) assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2))) # dense vs. sparse output ("vectors" are always dense) if isspmatrix(b) and x.ndim > 1: assert_(isspmatrix(x1), repr((b, spmattype, 1))) assert_(isspmatrix(x2), repr((b, spmattype, 2))) else: assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) # check output shape if x.ndim == 1: # "vector" assert_equal(x1.shape, (A.shape[1],)) assert_equal(x2.shape, (A.shape[1],)) else: # "matrix" assert_equal(x1.shape, x.shape) assert_equal(x2.shape, x.shape) A = csc_matrix((3, 3)) b = csc_matrix((1, 3)) assert_raises(ValueError, spsolve, A, b)
def dot_scipy_bsr_with_conversion(matrix_1: np.ndarray, matrix_2: np.ndarray): """ Calculates the dot product by converting the parameters to Block Sparse Row matrices Parameters ---------- matrix_1: numpy-array matrix_2: numpy-array Returns: a numpy-array which results from the dot product ------- """ sparse_result = sparse.bsr_matrix(matrix_1).dot(sparse.bsr_matrix(matrix_2)) return np.array(sparse_result.todense())
def solve(self): R, b = np.array(self.Rs), np.array(self.bs) assert R.ndim == 3 and b.ndim == 2 assert R.shape[0] == b.shape[0] assert R.shape[1] == R.shape[2] == b.shape[1] nseg, subdim = b.shape eyes = np.eye(subdim, subdim) * np.ones([nseg, 1, 1]) matrix_shape = (subdim * nseg, subdim * (nseg+1)) I = sparse.bsr_matrix((eyes, np.r_[1:nseg+1], np.r_[:nseg+1])) D = sparse.bsr_matrix((R, np.r_[:nseg], np.r_[:nseg+1]), shape=matrix_shape) B = (D - I).tocsr() Schur = B * B.T #+ 1E-5 * sparse.eye(B.shape[0]) alpha = -(B.T * splinalg.spsolve(Schur, np.ravel(b))) # alpha1 = splinalg.lsqr(B, ravel(bs), iter_lim=10000) return alpha.reshape([nseg+1,-1])[:-1]
def readdata(self,filename): print "Read Data..." data = np.genfromtxt(filename,delimiter=self.sep,dtype=self.format['type']) self.record_no = len(data) print "Record No.:%d"%self.record_no for k in xrange(data.shape[1]): if k == self.format['row']: self.row = data[:,k] if k == self.format['col']: self.col = data[:,k] if k == self.format['rate']: self.rate = np.int32(data[:,k]) p = 0 q = 0 for k in xrange(self.record_no): if self.row[k] not in self.uid_dict: self.uid_dict[self.row[k]] = p p += 1 if self.col[k] not in self.iid_dict: self.iid_dict[self.col[k]] = q q += 1 self.num_u = p self.num_i = q row = np.array(map(lambda x:self.uid_dict[x],self.row)) col = np.array(map(lambda x:self.iid_dict[x],self.col)) self.sparsity = self.record_no*100.0/(self.num_u*self.num_i) print "Mat Sparsity:%f %%"%self.sparsity self.bsrmat = bsr_matrix((self.rate,(self.row,self.col)),shape=(self.num_u,self.num_i),dtype=float)
def readdata(self,filename): print "Read data..." f=open(filename) for l in f.readlines(): l=l.strip().split(self.sep) self.uid.append(l[0]) self.iid.append(l[1]) self.rate.append(float(l[2])) # self.tiem.append(l[3]) f.close() k=0 for ui in self.uid: if ui not in self.uid_dict: self.uid_dict[ui] = k k+=1 k=0 for iti in self.iid: if iti not in self.iid_dict: self.iid_dict[iti] = k k+=1 self.num_u = len(self.uid_dict) self.num_i = len(self.iid_dict) print "%d users,%d items" %(self.num_u,self.num_i) self.row = np.array(map(lambda x:self.uid_dict[x],self.uid)) self.col = np.array(map(lambda x:self.iid_dict[x],self.iid)) self.rate = np.array(self.rate) self.M = len(self.rate) self.bsrmat = bsr_matrix((self.rate,(self.row,self.col)),shape=(self.num_u,self.num_i),dtype=float) self.sparsity = len(self.rate)*1.0/(self.num_u*self.num_i) print "Mat Sparsity:%f"%self.sparsity
def odd_even_bsr(N,dim,order): # generates a block row permutation matrix in BSR form. # odd-even or even-odd permutation only. indptr = np.arange(N+1) # build data matrix data = np.zeros((N,dim,dim)) for i in np.arange(dim): data[:,i,i] = 1.0 # build index array indices = np.zeros(N,dtype = 'int') if order == 'odd-even': if N%2 == 0: indices[np.arange(N/2)] = 2*np.arange(N/2)+1 indices[np.arange(N/2)+N/2] = 2*np.arange(N/2) else: indices[np.arange((N-1)/2)] = 2*np.arange((N-1)/2)+1 indices[np.arange((N+1)/2) + (N-1)/2] = 2*np.arange((N+1)/2) elif order == 'even-odd': if N%2 == 0: indices[np.arange(N/2)] = 2*np.arange(N/2) indices[np.arange(N/2)+N/2] = 2*np.arange(N/2)+1 else: indices[np.arange((N+1)/2)] = 2*np.arange((N+1)/2) indices[np.arange((N-1)/2) + (N+1)/2] = 2*np.arange((N-1)/2)+1 else: print('Permutation order must be odd-even or even-odd') spP = sparse.bsr_matrix((data,indices,indptr),shape=(N*dim,N*dim)) return spP
def test_check_symmetric(): arr_sym = np.array([[0, 1], [1, 2]]) arr_bad = np.ones(2) arr_asym = np.array([[0, 2], [0, 2]]) test_arrays = {'dense': arr_asym, 'dok': sp.dok_matrix(arr_asym), 'csr': sp.csr_matrix(arr_asym), 'csc': sp.csc_matrix(arr_asym), 'coo': sp.coo_matrix(arr_asym), 'lil': sp.lil_matrix(arr_asym), 'bsr': sp.bsr_matrix(arr_asym)} # check error for bad inputs assert_raises(ValueError, check_symmetric, arr_bad) # check that asymmetric arrays are properly symmetrized for arr_format, arr in test_arrays.items(): # Check for warnings and errors assert_warns(UserWarning, check_symmetric, arr) assert_raises(ValueError, check_symmetric, arr, raise_exception=True) output = check_symmetric(arr, raise_warning=False) if sp.issparse(output): assert_equal(output.format, arr_format) assert_array_equal(output.toarray(), arr_sym) else: assert_array_equal(output, arr_sym)
def create_sparse_dataset(py_obj, h_group, call_id=0, **kwargs): """ dumps an sparse array to h5py file Args: py_obj: python object to dump; should be a numpy array or np.ma.array (masked) h_group (h5.File.group): group to dump data into. call_id (int): index to identify object's relative location in the iterable. """ h_sparsegroup = h_group.create_group('data_%i' % call_id) data = h_sparsegroup.create_dataset('data', data=py_obj.data, **kwargs) indices = h_sparsegroup.create_dataset('indices', data=py_obj.indices, **kwargs) indptr = h_sparsegroup.create_dataset('indptr', data=py_obj.indptr, **kwargs) shape = h_sparsegroup.create_dataset('shape', data=py_obj.shape, **kwargs) if isinstance(py_obj, type(sparse.csr_matrix([0]))): type_str = 'csr' elif isinstance(py_obj, type(sparse.csc_matrix([0]))): type_str = 'csc' elif isinstance(py_obj, type(sparse.bsr_matrix([0]))): type_str = 'bsr' if six.PY2: h_sparsegroup.attrs["type"] = [b'%s_matrix' % type_str] data.attrs["type"] = [b"%s_matrix_data" % type_str] indices.attrs["type"] = [b"%s_matrix_indices" % type_str] indptr.attrs["type"] = [b"%s_matrix_indptr" % type_str] shape.attrs["type"] = [b"%s_matrix_shape" % type_str] else: h_sparsegroup.attrs["type"] = [bytes(str('%s_matrix' % type_str), 'ascii')] data.attrs["type"] = [bytes(str("%s_matrix_data" % type_str), 'ascii')] indices.attrs["type"] = [bytes(str("%s_matrix_indices" % type_str), 'ascii')] indptr.attrs["type"] = [bytes(str("%s_matrix_indptr" % type_str), 'ascii')] shape.attrs["type"] = [bytes(str("%s_matrix_shape" % type_str), 'ascii')]
def to_sparse(D, format="csc"): """ Transform dense matrix to sparse matrix of return_type bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format. csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix. lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix :param D: Dense matrix :param format: how to save the sparse matrix :return: sparse version """ if format == "bsr": return sprs.bsr_matrix(D) elif format == "coo": return sprs.coo_matrix(D) elif format == "csc": return sprs.csc_matrix(D) elif format == "csr": return sprs.csr_matrix(D) elif format == "dia": return sprs.dia_matrix(D) elif format == "dok": return sprs.dok_matrix(D) elif format == "lil": return sprs.lil_matrix(D) else: return to_dense(D)
def blockInnerProducts(quadweights, leftvalsiter, rightvalsiter, leftI, rightI): """ Evaluate the inner product matrix returns a sparse matrix equal to leftI.transpose * L.transpose * quadweights * R * rightI where L and R are block diagonal matrices whose blocks are given by the iterables, leftvalsiter and rightvalsiter If the left or right vals have more than 2 dimensions, the extra dimensions are multiplied and summed (tensor-contracted), with broadcasting as necessary, i,e, this is an inner-product - it can't be used for a more general multiplication' """ import scipy.sparse as ss data = [] idx = [] ip = [0] for e, (leftvals, rightvals, weights) in enumerate(it.izip(leftvalsiter, rightvalsiter, quadweights)): if len(weights): lvs = len(leftvals.shape) rvs = len(rightvals.shape) vs = max(lvs,rvs) leftvals = leftvals.reshape(leftvals.shape + (1,)*(vs - lvs)) rightvals = rightvals.reshape(rightvals.shape + (1,)*(vs - rvs)) lvw = leftvals * weights.reshape((-1,) + (1,)*(vs-1)) # print lvw.shape, rightvals.shape data.append(numpy.tensordot(lvw, rightvals, ([0]+range(2,vs), [0]+range(2,vs)))) idx.append(e) ip.append(len(idx)) # print e, idx, ip V = ss.bsr_matrix((data, idx, ip),dtype=float, shape=(leftI.shape[0],rightI.shape[0])) return leftI.transpose() * V * rightI
def main(): with open("aas/corpus.json") as f: corpus = json.loads(f.read()) corpus = [(k,v) for k,v in corpus.items() if v > 5] corpus = sorted(corpus, key=lambda x: x[1]) corpus = corpus[:-6] Ncorpus = len(corpus) with open("aas/abstracts.json") as f: abstracts = json.loads(f.read()) X = np.zeros((len(abstracts),Ncorpus)) for jj,abstract in enumerate(abstracts): for ii in range(Ncorpus): try: X[jj,ii] = abstract['counts'][corpus[ii][0]] except KeyError: continue X = bsr_matrix(X) print("Initializing k-means") km = MiniBatchKMeans(n_clusters=50, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=True) print("fitting") t0 = time.time() km.fit(X) # X is nsamples, nfeatures print("Took {} seconds".format(time.time()-t0)) return km
def solve_init(self): nb = self.nspec+1 # block size nn = self.nz * nb # system size YH = np.hstack((self.Y,self.H.reshape((self.nz,1)))).flatten() # initialize sparse jacobian, source term, and identity matrices # sparse block matrix J: # len(i) = num of active blocks = nz-2 # len(j) = num of block rows +1 = nz+1 # initial J with dummy data (ones) to set structure i = np.arange(1,self.nz-1) j = np.hstack((0,range(self.nz-1),self.nz-2)) d = np.ones(self.nz-2).repeat(nb*nb).reshape(self.nz-2,nb,nb) J = bsr_matrix((d,i,j),shape=(self.nz*nb,self.nz*nb)).tocsr() S = np.zeros(YH.size) I = eye(nn,nn) # indexing to get Y and H from YH iy = np.zeros((self.nz,self.nspec),dtype="int") ih = np.zeros(self.nz,dtype="int") for iz in range(self.nz): i = iz*(self.nspec+1) ih[iz] = i+self.nspec iy[iz,:] = np.arange(i,i+self.nspec) # reference values to scale residuals YHref = np.ones(self.nspec+1) YHref[-1] = YH[ih].max(0)-YH[ih].min(0) YHRef = np.tile(YHref,self.nz) return J,S,I,YH,YHref,YHRef,iy,ih
def _constructMatrices(obj): u_mid = 0.5*(obj.traj[1:] + obj.traj[:-1]) obj.Jac = obj.ns.dfdu( u_mid, obj.t ) A = obj.Jac I = eye(obj.m)[newaxis,:,:] obj.F = -I/obj.dt - A/2. obj.G = I/obj.dt - A/2. N = obj.n m = obj.m obj._B = bsr_matrix( (obj.F, r_[:N], r_[:N+1]), blocksize=(m,m), shape=(N*m, (N+1)*m) ) \ + bsr_matrix( (obj.G, r_[1:N+1], r_[:N+1]), blocksize=(m,m), shape=(N*m, (N+1)*m) ) obj._BT = obj._B.T.tobsr() obj._S = obj._B * obj._B.T
def sp_create_data(data,rows,cols,dim1,dim2,format): """ Account for slightly different sparse matrix constructors. """ if format == "dok": result = sp.dok_matrix((dim1,dim2)) for (d,i,j) in zip(data,rows,cols): result[i,j] = d elif format == "csr": result = sp.csr_matrix((data,(rows,cols)), shape = (dim1,dim2)) elif format == "csc": result = sp.csc_matrix((data,(rows,cols)), shape = (dim1,dim2)) elif format == "coo": result = sp.coo_matrix((data,(rows,cols)), shape = (dim1,dim2)) elif format == "lil": result = sp.lil_matrix((dim1,dim2)) for (d,i,j) in zip(data,rows,cols): result[i,j] = d elif format == "bsr": result = sp.bsr_matrix((data,(rows,cols)), shape = (dim1,dim2)) elif format == "dia": raise NotImplementedError elif format == "raw": return (data, rows, cols, dim1, dim2) # just return raw data elif format == "rawdict": return dict(zip(rows,data)) else: raise ValueError, "Unknown sparse format!" return result
def train_lr(self, train_data, lab_data, C = 1.0): train_data_features = self.vectorizer.fit_transform(train_data) train_data_features = bsr_matrix(train_data_features) print train_data_features.shape print "Training the logistic regression..." self.lr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=C, fit_intercept=True, intercept_scaling=1.0, class_weight=None, random_state=None) self.lr = self.lr.fit(train_data_features, lab_data)
def test_is_scipy_sparse(): tm._skip_if_no_scipy() from scipy.sparse import bsr_matrix assert com.is_scipy_sparse(bsr_matrix([1, 2, 3])) assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3])) assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
def get_matrix(): n = self.n data = np.ones((n, n, 1), dtype=np.int8) indptr = np.array([0, n], dtype=np.int32) indices = np.arange(n, dtype=np.int32) m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) del data, indptr, indices return m
def test_BSR_Get_Row(self): indptr = array([0, 2, 3, 6]) indices = array([0, 2, 2, 0, 1, 2]) data = array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) B = bsr_matrix((data, indices, indptr), shape=(6, 6)) r, i = BSR_Get_Row(B, 2) assert_equal(r, mat(array([[3], [3]]))) assert_equal(i, array([4, 5]))
def psi(n,i): ret=bsr_matrix(np.identity(1)) for k in range(i-1): ret=sp.kron(ret,sx) ret=sp.kron(ret,sz) for k in range(n-i): if ret.shape[0]<math.pow(2,n): ret=sp.kron(ret,np.identity(2)) return ret
def validate_lr(self, train_data, lab_data, C = 1.0): train_data_features = self.vectorizer.fit_transform(train_data) train_data_features = bsr_matrix(train_data_features) lab_data = np.array(lab_data) print "start k-fold validate..." lr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=C, fit_intercept=True, intercept_scaling=1.0, class_weight=None, random_state=None) cv = np.mean(cross_validation.cross_val_score(lr, train_data_features, lab_data, cv=10, scoring='roc_auc')) return cv
def _get_BGCM(self): BCGM = np.zeros(((self.nelx+1)*(self.nely+1), (self.nelx+1)*(self.nely+1))) for x in range(self.nelx): for y in range(self.nely): nodes = np.array([x*(self.nely+1)+y, (x+1)*(self.nely+1)+y, \ (x+1)*(self.nely+1)+y+1, x*(self.nely+1)+y+1]) BCGM[nodes[:, np.newaxis], nodes[np.newaxis, :]] += \ self.BG_density[x, y]**self.penalty*self.ECM return bsr_matrix(BCGM)
def test_zero_variance(): """Test VarianceThreshold with default setting, zero variance.""" for X in [data, csr_matrix(data), csc_matrix(data), bsr_matrix(data)]: sel = VarianceThreshold().fit(X) assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) assert_raises(ValueError, VarianceThreshold().fit, [0, 1, 2, 3]) assert_raises(ValueError, VarianceThreshold().fit, [[0, 1], [0, 1]])
def Schur(self,COMM): N, m = self.u.shape[0] - 1, self.u.shape[1] J = self.dfdu(self.u[:-1], self.s1) eye = np.eye(m,m) + np.zeros([N,m,m]) L = sparse.bsr_matrix((J, np.r_[:N], np.r_[:N+1]), shape=(N*m, (N+1)*m)) I = sparse.bsr_matrix((eye, np.r_[1:N+1], np.r_[:N+1])) self.B = I.tocsr() - L.tocsr() S = (self.B * self.B.T).tobsr(blocksize=(m,m)) S.sort_indices() if not head(COMM): S.data = S.data[1:,:,:] S.indptr[1:] -= 1 S.indices = S.indices[1:] return S
def loadSparseData(fname, nr): data = read_csv('./COML_data/' + fname + '_data.csv', header = None, dtype = float) label = read_csv('./COML_data/' + fname + '_label.csv', header = None, dtype = float) X = bsr_matrix((data[2], (data[0], data[1]))).toarray() X = array(X)[:nr, :] Y = array(label)[:nr, :] print 'load ', fname, ': ', X.shape return X, Y
def scipy_bsr_dot_numpy_with_top_n(dense_matrix: np.ndarray, sparse_matrix: np.ndarray, n=20): """ Calculates the dot product of two Matrices of type numpy array. The first array is convert to a sparse matrix with top N items in every row. Afterwards both matrices are converted to Sparse matrices from type BSR for fast multiplication. Parameters ---------- dense_matrix - The first matrix, which will be converted to a top-n matrix sparse_matrix - the second matrix n = the n value for the top n matrix. Returns a numpy array, which is the result of the matrix multiplication. ------- """ convert_matrix_to_sparse_with_top_n(dense_matrix, n) result = sparse.bsr_matrix(dense_matrix).dot(sparse.bsr_matrix(sparse_matrix)) return np.array(result.todense())
def Schur(self): """ Builds the Schur complement of the KKT system' Also build B: the block-bidiagonal matrix, and E: the dudt matrix """ N, m = self.u.shape[0] - 1, self.u.shape[1] J = self.dfdu(self.u[:-1], self.s) eye = np.eye(m,m) + np.zeros([N,m,m]) L = sparse.bsr_matrix((J, np.r_[:N], np.r_[:N+1]), \ shape=(N*m, (N+1)*m)) I = sparse.bsr_matrix((eye, np.r_[1:N+1], np.r_[:N+1])) self.B = I.tocsr() - L.tocsr() return (self.B * self.B.T)
def train_lr(self, train_data, lab_data, C=1.0): train_data_features = self.vectorizer.fit_transform(train_data) train_data_features = bsr_matrix(train_data_features) print train_data_features.shape print "Training the logistic regression..." self.lr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=C, fit_intercept=True, intercept_scaling=1.0, class_weight=None, random_state=None) self.lr = self.lr.fit(train_data_features, lab_data)
def prediction_to_sparse(prediction, flip=FLIP): prediction_sparse = dict() prediction_sparse['rois'] = prediction['rois'] prediction_sparse['class_ids'] = prediction['class_ids'] prediction_sparse['scores'] = prediction['scores'] prediction_sparse['masks'] = [] for i in range(len(prediction['scores'])): if flip: mask = np.fliplr(prediction['masks'][:, :, i]) else: mask = prediction['masks'][:, :, i] prediction_sparse['masks'].append(sparse.bsr_matrix(mask)) return prediction_sparse
def estimate_unwanted_factors(self, control_voxels: Array) -> Array: logger.debug("Estimating unwanted factors") _, _, all_unwanted_factors = ( np.linalg.svd(control_voxels, full_matrices=False) if not self.sparse_svd else svds( bsr_matrix(control_voxels), k=self.num_unwanted_factors, return_singular_vectors="vh", ) ) unwanted_factors: Array = all_unwanted_factors.T[ :, 0 : self.num_unwanted_factors ] return unwanted_factors
def get_rotation_matrix(self, flag_active_joint_displacements): """ Get rotation matrix Parameters ---------- flag_active_joint_displacements : array asd """ # rotation as direction cosine matrix indptr = np.array([0, 1, 2]) indices = np.array([0, 1]) data = np.tile(self.get_rotation().as_dcm(), (2, 1, 1)) # matrix rotation for a joint t1 = bsr_matrix((data, indices, indptr), shape=(6, 6)).tolil() flag_active_joint_displacements = np.nonzero(flag_active_joint_displacements)[0] n = 2 * np.size(flag_active_joint_displacements) t1 = t1[flag_active_joint_displacements[:, None], flag_active_joint_displacements].toarray() data = np.tile(t1, (2, 1, 1)) return bsr_matrix((data, indices, indptr), shape=(n, n)).toarray()
def test_scale_rows_and_cols(self): D = matrix([[1, 0, 0, 2, 3], [0, 4, 0, 5, 0], [0, 0, 6, 7, 0]]) #TODO expose through function S = csr_matrix(D) v = array([1, 2, 3]) csr_scale_rows(3, 5, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), diag(v) * D) S = csr_matrix(D) v = array([1, 2, 3, 4, 5]) csr_scale_columns(3, 5, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), D @ diag(v)) # blocks E = kron(D, [[1, 2], [3, 4]]) S = bsr_matrix(E, blocksize=(2, 2)) v = array([1, 2, 3, 4, 5, 6]) bsr_scale_rows(3, 5, 2, 2, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), diag(v) @ E) S = bsr_matrix(E, blocksize=(2, 2)) v = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) bsr_scale_columns(3, 5, 2, 2, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), E @ diag(v)) E = kron(D, [[1, 2, 3], [4, 5, 6]]) S = bsr_matrix(E, blocksize=(2, 3)) v = array([1, 2, 3, 4, 5, 6]) bsr_scale_rows(3, 5, 2, 3, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), diag(v) @ E) S = bsr_matrix(E, blocksize=(2, 3)) v = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) bsr_scale_columns(3, 5, 2, 3, S.indptr, S.indices, S.data, v) assert_equal(S.todense(), E @ diag(v))
def test_construction(self): nblocks = 10 np.random.seed(nblocks) data = np.random.random((nblocks, 2, 3)) indices = np.random.randint(0, 7, size=nblocks) indptr = np.arange(nblocks + 1) sbrm = DBSRMatrix() for i in range(nblocks): sbrm.append_row(indices[i], data[i]) bsr = bsr_matrix((data, indices, indptr)) self.assertTrue(np.allclose(bsr.todense(), sbrm.to_bsr().todense()))
def scipy_bsr_dot_numpy_with_swap(dense_matrix: np.ndarray, sparse_matrix: np.ndarray): """ Calculates the dot product of two numpy arrays. The matrices are converted to BSR format for fast multiplication. Parameters ---------- matrix_dense - the first array matrix_sparse - the second array. Returns a numpy array, which is the result of the matrix multiplication. ------- """ result = sparse.bsr_matrix(sparse_matrix.T).dot(dense_matrix.T) return result.T
def measure(self): """Measure qubit register. Collapses to 1 definite state. Simulates real measurement, as intermediate values of qubit registers during computation remain unknown. """ self.normalise() data = self.array.toarray()[0] pos = np.arange(len(data)) probs = probs = data * np.conjugate(data) #If probs is not normalised (usually due to rounding errors), re-normalise #probs = probs/np.sum(probs) dist = stats.rv_discrete(values=(pos, probs)) self.array = np.zeros(data.shape) self.array[dist.rvs()] = 1 r = self.array self.array = sp.bsr_matrix(self.array) return r
def rho_block_D_inv_A(A, Dinv): """Return the (approx.) spectral radius of block D^-1 * A. Parameters ---------- A : sparse-matrix size NxN Dinv : array Inverse of diagonal blocks of A size (N/blocksize, blocksize, blocksize) Returns ------- approximate spectral radius of (Dinv A) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.relaxation.smoothing import rho_block_D_inv_A >>> from pyamg.util.utils import get_block_diag >>> A = poisson((10,10), format='csr') >>> Dinv = get_block_diag(A, blocksize=4, inv_flag=True) """ if not hasattr(A, 'rho_block_D_inv'): blocksize = Dinv.shape[1] if Dinv.shape[1] != Dinv.shape[2]: raise ValueError('Dinv has incorrect dimensions') if Dinv.shape[0] != int(A.shape[0] / blocksize): raise ValueError('Dinv and A have incompatible dimensions') Dinv = sparse.bsr_matrix( (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)), shape=A.shape) # Don't explicitly form Dinv*A def matvec(x): return Dinv * (A * x) D_inv_A = LinearOperator(A.shape, matvec, dtype=A.dtype) A.rho_block_D_inv = approximate_spectral_radius(D_inv_A) return A.rho_block_D_inv
def getAlu(self, x, x0, v0, th0, thd0): N = self.N gt = np.zeros((2, N)) gt[0, :] = -0.1 # 0.15 # x is greaer than 0.15 gt[1, :] = -3 # -1 #veclotu is gt -1m/s # gt[4,:] = -10 control_n = max(3, int(0.1 / self.dt)) # I dunno. 4 seems to help # print(control_n) gt[:, :control_n] = -100 # gt[1,:2] = -100 # gt[1,:2] = -15 # gt[0,:3] = -10 gt = gt.flatten() lt = np.zeros((2, N)) lt[0, :] = 1 # 0.75 # x less than 0.75 lt[1, :] = 3 # 1 # velocity less than 1m/s # lt[4,:] = 10 lt[:, :control_n] = 100 # lt[1,:2] = 100 # lt[0,:3] = 10 # lt[1,:2] = 15 lt = lt.flatten() z = sparse.bsr_matrix((N, N)) ineqA = sparse.bmat([[sparse.eye(N), z, z, z, z], [z, sparse.eye(N), z, z, z]]) # .tocsc() # print(ineqA.shape) # print(ineqA) cons = self.constraint(forward.seed_sparse_gradient(x), x0, v0, th0, thd0) A = sparse.vstack(map(lambda z: z.dvalue, cons)) # y.dvalue.tocsc() # print(A.shape) totval = np.concatenate(tuple(map(lambda z: z.value, cons))) temp = A @ x - totval A = sparse.vstack((A, ineqA)).tocsc() # print(tuple(map(lambda z: z.value, cons))) # print(temp.shape) # print(lt.shape) # print(gt.shape) u = np.concatenate((temp, lt)) l = np.concatenate((temp, gt)) return A, l, u
def make_prob_dict(graph, n_samps, n_labels): y_t_s = np.zeros( (n_samps + 1, n_labels )) # dictionary to store probabilities indexed by time and label F = [0] for t in range(n_samps + 1): # y_t_s[t] = {} for s in F: arcs = graph[s].arcs for a in arcs: osym = graph.osyms.find(a.olabel) osym = osym[osym.find("(") + 1:osym.find(")")] y_t_s[t][int(osym)] = np.exp(-1 * float(a.weight)) F = map(lambda x: map(lambda y: y.nextstate, graph[x].arcs), F) F = set([s for ss in F for s in ss]) y_t_s = bsr_matrix(y_t_s, dtype='float32') return y_t_s
def hebbian_tensor(delta__ksi_i_mu__k, random_seed): """ Computes the hebbian tensor J_i_k_k_l, i.e. interactions between units. Parameters ---------- delta__ksi_i_mu__k -- 2D array Set of patterns stored in the network Returns ------- J_i_j_k_l -- 2D array Interaction tensor, given by the hebbian learning rule Notes ----- Intuitively, J_i_j_k_l should be of dimenion 4. However, in order to use the sparse module from scipy, one has to have less than 2 dimensions. We use the convention that unit i in state k is indexed by ii = i*S + k. """ rd.seed(random_seed + 2) # Building the connectivity matrix mask = spsp.lil_matrix((N, N)) # connectivity matrix deck = np.linspace(0, N - 1, N, dtype=int) for i in range(N): rd.shuffle(deck) mask[i, deck[:int(cm)]] = True cpt = 0 # Put diagonal coefficient to 0 keeping cm connections while mask[i, i]: mask[i, i] = False if int(cm) + cpt < N: mask[i, deck[int(cm) + cpt]] = True cpt += 1 # Has to be expanded to fit the convention used in the notes kronMask = spsp.kron(mask, np.ones((S, S))) kronMask = kronMask.tobsr(blocksize=(S, S)) J_i_j_k_l = np.dot((delta__ksi_i_mu__k - a / S), np.transpose(delta__ksi_i_mu__k - a / S)) J_i_j_k_l = kronMask.multiply(J_i_j_k_l) / (cm * a * (1 - a / S)) return spsp.bsr_matrix(J_i_j_k_l, blocksize=(S, S)), mask
def injection_interpolation(A, splitting, cost=[0]): """ Create interpolation operator by injection, that is C-points are interpolated by value and F-points are not interpolated. Parameters ---------- A : {csr_matrix} NxN matrix in CSR format or BSR format splitting : array C/F splitting stored in an array of length N Returns ------- NxNc interpolation operator, P """ if isspmatrix_bsr(A): blocksize = A.blocksize[0] n = A.shape[0] / blocksize elif isspmatrix_csr(A): n = A.shape[0] blocksize = 1 else: try: A = A.tocsr() warn("Implicit conversion of A to csr", SparseEfficiencyWarning) n = A.shape[0] blocksize = 1 except: raise TypeError("Invalid matrix type, must be CSR or BSR.") P_rowptr = np.append(np.array([0], dtype='int32'), np.cumsum(splitting, dtype='int32')) nc = P_rowptr[-1] P_colinds = np.arange(start=0, stop=nc, step=1, dtype='int32') if blocksize == 1: return csr_matrix((np.ones( (nc, ), dtype=A.dtype), P_colinds, P_rowptr), shape=[n, nc]) else: P_data = np.array(nc * [np.identity(blocksize, dtype=A.dtype)], dtype=A.dtype) return bsr_matrix((P_data, P_colinds, P_rowptr), blocksize=[blocksize, blocksize], shape=[n * blocksize, nc * blocksize])
def get_hessian(packing, stable=False): hes = packing.get_real_hessian() hes = hes.reshape(hes.size // packing.num_dim**2, packing.num_dim, packing.num_dim) hes = sparse.bsr_matrix((hes, packing.adj_indices, packing.adj_indptr), shape=(packing.num_particles * packing.num_dim, packing.num_particles * packing.num_dim)) hes += hes.T if stable: stable_mask = np.repeat(packing.get_stable(), packing.num_dim) hes = ((hes.tocsr()[stable_mask, :][:, stable_mask]).tobsr( blocksize=(packing.num_dim, packing.num_dim))) hes -= sparse.block_diag([ hes.data[hes.indptr[i]:hes.indptr[i + 1]].sum(axis=0) for i in range(hes.indptr.size - 1) ], format="bsr") return hes
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"): Y = np.zeros((M, N), dtype=dtype) assert M % BS_R == 0 assert N % BS_C == 0 nnz = int(density * M * N) num_blocks = int(nnz / (BS_R * BS_C)) + 1 candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C)))) assert candidate_blocks.shape[0] == M // BS_R * N // BS_C chosen_blocks = candidate_blocks[np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)] for i in range(len(chosen_blocks)): r, c = chosen_blocks[i] Y[r:r+BS_R,c:c+BS_C] = np.random.randn(BS_R, BS_C) s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C)) assert s.data.shape == (num_blocks, BS_R, BS_C) assert s.data.size >= nnz assert s.indices.shape == (num_blocks, ) assert s.indptr.shape == (M // BS_R + 1, ) return s
def sp_create(dim1, dim2, format): if format == "dok": result = sp.dok_matrix((dim1, dim2)) elif format == "csr": result = sp.csr_matrix((dim1, dim2)) elif format == "csc": result = sp.csc_matrix((dim1, dim2)) elif format == "coo": result = sp.coo_matrix((dim1, dim2)) elif format == "lil": result = sp.lil_matrix((dim1, dim2)) elif format == "bsr": result = sp.bsr_matrix((dim1, dim2)) elif format == "dia": result = sp.dia_matrix((dim1, dim2)) else: raise ValueError("Unknown sparse format!") return result
def _StressStiffness(fem, matVals, U): """ Assembles the stress stiffness matrix and it's design sensitivity Parameters ---------- fem : FEM object An object describing the underlying finite element analysis matVals : dict Interpolated material values and their sensitivities U : array_like Displacement vector Returns ------- Ks : sparse matrix Stress stiffness matrix dKs : array_like Stress stiffness matrix sensitivity to design values """ if not hasattr(fem, "dof"): offset = np.arange(fem.nDof).reshape(1, -1) fem.dof = [(fem.nDof * el.reshape(-1, 1) + offset).ravel() for el in fem.elements] Ks = np.zeros_like(fem.i, dtype=float) dKs = np.zeros_like(fem.i, dtype=float) ind = 0 for el in range(fem.nElem): if fem.uniform: stress = np.dot(fem.DB[0], U[fem.dof[el]]) G = fem.G[0] else: stress = np.dot(fem.DB[el], U[fem.dof[el]]) G = fem.G[el] ks = np.dot(G.T, np.dot(_sigtos(stress), G)) for i in range(1, fem.nDof): ks[i::fem.nDof, i::fem.nDof] = ks[::fem.nDof, ::fem.nDof] Ks[ind:ind + ks.size] = -matVals['Es'][el] * ks.ravel() dKs[ind:ind + ks.size] = -matVals['dEsdy'][el] * ks.ravel() ind += ks.size return sparse.bsr_matrix((Ks, (fem.i, fem.j)), blocksize=(fem.nDof, fem.nDof)), dKs
def __init__(self, x0, v0, theta0, thetadot0): self.N = 50 self.NVars = 5 T = 8.0 self.dt = T / self.N dt = self.dt self.dtinv = 1. / dt # Px = sparse.eye(N) # sparse.csc_matrix((N, N)) # The three different weigthing matrices for x, v, and external force reg = sparse.eye(self.N) * 0.05 z = sparse.bsr_matrix((self.N, self.N)) # sparse.diags(np.arange(N)/N) pp = sparse.diags(np.linspace(1, 7, self.N)) # sparse.eye(self.N) P = sparse.block_diag([reg, 10 * reg, pp, reg, 10 * reg]) # 1*reg,1*reg]) # P[N,N]=10 self.P = P THETA = 2 q = np.zeros((self.NVars, self.N)) q[THETA, :] = np.pi q[0, :] = 0.5 # q[N,0] = -2 * 0.5 * 10 q = q.flatten() q = -P @ q # u = np.arr self.x = np.random.randn(self.N, self.NVars).flatten() # x = np.zeros((N,NVars)).flatten() # v = np.zeros(N) # f = np.zeros(N) # print(f(ad.seed(x)).dvalue) A, l, u = self.getAlu(self.x, x0, v0, theta0, thetadot0) self.m = osqp.OSQP() self.m.setup( P=P, q=q, A=A, l=l, u=u, time_limit=0.1, verbose=False ) # , eps_rel=1e-2 # **settings # warm_start=False, eps_prim_inf=1e-1 self.results = self.m.solve() # print(self.results.x) for i in range(100): self.update(x0, v0, theta0, thetadot0)
def init_map(self): xs = [ 27, 11, 31, 22, 21, 11, 25, 11, 26, 25, 17, 4, 31, 17, 19, 35, 6, 10, 38, 14, 23, 24, 25, 15 ] ys = [ 7, 4, 30, 21, 18, 18, 30, 9, 24, 18, 14, 12, 13, 11, 3, 10, 26, 13, 11, 1, 16, 13, 11, 4 ] self.connections = [[14, 15, 22], [7, 11, 19, 23], [8], [4, 9, 16, 20], [3, 5, 10], [4, 10, 16, 17], [8], [1, 13, 17], [2, 6, 9], [3, 8, 20], [4, 5, 13, 17, 21], [1, 16, 17], [15, 20, 22], [7, 10, 22, 23], [0, 23], [0, 12, 18], [3, 5, 11], [5, 7, 10, 11], [15], [1, 23], [3, 9, 12, 21], [10, 20, 22], [0, 12, 13, 21], [1, 13, 14, 19]] self.adj_matrix = np.ones((len(xs), len(xs))) * -np.inf for stop_name, i in Controller.bus_stop_names.items(): # Creates bus stop and appends it to the list self.bus_stops[i] = BusStop(i, stop_name, xs[i], ys[i]) orig = i # Creates the bidirectional connection between stop orig and stop dest for dest in self.connections[i]: self.adj_matrix[orig, dest] = np.sqrt((xs[orig] - xs[dest])**2 + (ys[orig] - ys[dest])**2) not_connected = self.adj_matrix == -np.inf adj_copy = self.adj_matrix.copy() adj_copy[not_connected] = 0 self.average_travel_time = adj_copy[adj_copy > 0].mean() adj_copy = sparse.bsr_matrix(adj_copy) self.min_dist = sparse.csgraph.dijkstra(adj_copy) self.average_minumum_delivery_time = self.min_dist[ self.min_dist > 0].mean() self.init_probability_distribution() self.init_attractivity_tensor() self.init_similarity_matrix()
def regular_svd(A, k=0): if (k <= 0 or k >= min(A.shape)): U, s, VT = la.svd(A, full_matrices=False) else: U, s, VT = sparsela.svds(sparse.bsr_matrix(A), k=k) U = U[:, ::-1] s = s[::-1] VT = VT[::-1, :] V = VT.T # If the actual rank is smaller than k, part of the metrics will be nan. if (not np.all(np.isfinite(s))): print >> sys.stderr, "[WARN] regular_svd: s has non-finite numbers." finite_indices = np.isfinite(s) U = U[:, finite_indices] s = s[finite_indices] V = V[:, finite_indices] return U, s, V
def on_off_representation(streams, phraseStarts): with open('indexes.csv', 'r', encoding='utf-8') as csv_file: note_dict = dict(csv.reader(csv_file)) phrases = [] x = 0 for stream in streams: for note in stream.notesAndRests: if (x in phraseStarts): if (x != 0): bsr = sparse.bsr_matrix( (np.array(data), (np.array(rows), np.array(cols)))).toarray() shape = (len(note_dict), step) bsr.resize(shape) phrases.append(bsr) step = 0 current_notes = [] rows = [] cols = [] data = [] thirty_two_length = int(note.quarterLength * 8) if (thirty_two_length != 0): current_notes.append(note) for n in current_notes: if (str(type(note)) == str("<class 'music21.note.Rest'>")): string_rep = "R0" + str(n.quarterLength) if (str(type(note)) == str("<class 'music21.note.Note'>")): string_rep = str(n.pitch) + str(n.quarterLength) rows.append(int(note_dict[string_rep])) cols.append(step) #rows.append(int(note_dict[string_rep])) #cols.append(step + thirty_two_length - 1) data += [1] #data += [1,1] step += thirty_two_length current_notes = [] else: current_notes += note x += 1 return phrases
def process_params(expr, params, block_size, sparsity_threshold): """[summary] Parameters ---------- expr : Relay.Expr Expr of the network params : Dict[String, tvm.nd.array] parameters of the network block_size : Tuple(int, int) Blocksize in BSR matrix sparsity_threshold : float Minimal sparsity requirement for converting to sparse operation Returns ------- ret : Namedtuple[weight_name: Array[String], weight_shape: Array[Array[IntImm]]] return names of qualified dense weight and the shape in BSR format """ memo = SparseAnalysisResult(weight_name=[], weight_shape=[]) weight_names = _search_dense_op_weight(expr) for name in weight_names: name = str(name) w_np = params[name].asnumpy() sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size) if sparsity >= sparsity_threshold: sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size) # remove dense weight del params[name] memo.weight_name.append(name) memo.weight_shape.append( list(sparse_weight.data.shape) + list(sparse_weight.indices.shape) + list(sparse_weight.indptr.shape) ) params[name + ".data"] = tvm.nd.array(sparse_weight.data) params[name + ".indices"] = tvm.nd.array(sparse_weight.indices) params[name + ".indptr"] = tvm.nd.array(sparse_weight.indptr) ret = SparseAnalysisResult( weight_name=tvm.runtime.convert(memo.weight_name), weight_shape=tvm.runtime.convert(memo.weight_shape), ) return ret
def load_sparse_matrix_data(h_node): _, base_type, data = get_type_and_data(h_node) h_root = h_node.parent indices = h_root.get('indices')[:] indptr = h_root.get('indptr')[:] shape = h_root.get('shape')[:] if base_type == b'csc_matrix': smat = sparse.csc_matrix((data, indices, indptr), dtype=data.dtype, shape=shape) elif base_type == b'csr_matrix': smat = sparse.csr_matrix((data, indices, indptr), dtype=data.dtype, shape=shape) elif base_type == b'bsr_matrix': smat = sparse.bsr_matrix((data, indices, indptr), dtype=data.dtype, shape=shape) return smat
def _relabel_nearest(self, newlabel, nearest_t, varargin={}): arg = dict() arg['maxcelldistance'] = 25 nearest_label = self.Labels[nearest_t] if isinstance(nearest_label, bsr_matrix): nearest_label = nearest_label.toarray() nearest = [(p.label, p.centroid, p.coords) for p in regionprops(nearest_label)] nearest_label, nearest_xy, coords = zip(*nearest) new = [(p.label, p.centroid, p.coords) for p in regionprops(newlabel)] new_label, new_xy, _ = zip(*new) labels_coords = {k: v for k, _, v in new} knntree = KDTree(nearest_xy) dists, idx = knntree.query(new_xy, k=2, eps=arg['maxcelldistance']) qdata = zip(new_label, dists, idx) label_map = {} for newlbl_idx, d, j in qdata: if d[0] > arg['maxcelldistance']: label_map[newlbl_idx] = 0 else: label_map[newlbl_idx] = nearest_label[j[0]] counts = Counter(label_map.values()) for l, c in counts.items(): if c > 1: label_map[l] = 0 if 0 in counts: del counts[0] if 0 in label_map: del label_map[0] if counts.most_common(1) > 1: print('Warning two cells were assigned the same label.', counts.most_common(4)) for k, v in label_map.items(): try: coords = labels_coords[k] except: print(k) for x, y in coords: newlabel[x, y] = v return bsr_matrix(newlabel)
def test_merge_cols(self): nblocks = 13 np.random.seed(nblocks) data = np.random.random((nblocks, 2, 3)) indices = np.random.randint(0, 6, size=nblocks) indptr = np.arange(len(indices) + 1) sbrm = DBSRMatrix() for i in range(nblocks): sbrm.append_row(indices[i], data[i]) indices[indices == 2] = 1 indices[indices > 2] += -1 sbrm.merge_cols((1, 2)) bsr = bsr_matrix((data, indices, indptr)) self.assertTrue(np.allclose(bsr.todense(), sbrm.to_bsr().todense()))
def test_from_numpy_sparse(self): domain = Domain([ContinuousVariable(c) for c in "abc"]) x = np.arange(12).reshape(4, 3) t = Table.from_numpy(domain, x, None, None) self.assertFalse(sp.issparse(t.X)) t = Table.from_numpy(domain, sp.csr_matrix(x)) self.assertTrue(sp.isspmatrix_csr(t.X)) t = Table.from_numpy(domain, sp.csc_matrix(x)) self.assertTrue(sp.isspmatrix_csc(t.X)) t = Table.from_numpy(domain, sp.coo_matrix(x)) self.assertTrue(sp.isspmatrix_csr(t.X)) t = Table.from_numpy(domain, sp.lil_matrix(x)) self.assertTrue(sp.isspmatrix_csr(t.X)) t = Table.from_numpy(domain, sp.bsr_matrix(x)) self.assertTrue(sp.isspmatrix_csr(t.X))
def test_remove_row(self): nblocks = 12 np.random.seed(nblocks) data = np.random.random((nblocks, 2, 2)) indices = np.random.randint(0, 5, size=nblocks) sbrm = DBSRMatrix() for i in range(nblocks): sbrm.append_row(indices[i], data[i]) data = np.delete(data, 3, 0) indices = np.delete(indices, 3) indptr = np.arange(nblocks) sbrm.remove_row(3) bsr = bsr_matrix((data, indices, indptr)) self.assertTrue(np.allclose(bsr.todense(), sbrm.to_bsr().todense()))
def validate_lr(self, train_data, lab_data, C=1.0): train_data_features = self.vectorizer.fit_transform(train_data) train_data_features = bsr_matrix(train_data_features) lab_data = np.array(lab_data) print("start k-fold validate...") lr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=C, fit_intercept=True, intercept_scaling=1.0, class_weight=None, random_state=None) cv = np.mean( cross_val_score(lr, train_data_features, lab_data, cv=10, scoring='roc_auc')) return cv
def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type): """With cuda, we modify use alter_op_layout to swap the default sparse_dense implementation for one that operates on a padded matrix. We also padd the matrix. """ if (isinstance(inputs[1], relay.Constant) and isinstance(inputs[2], relay.Constant) and isinstance(inputs[3], relay.Constant)): sparse_matrix = sp.bsr_matrix( (inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())) warp_size = int( tvm.target.Target.current(allow_none=False).thread_warp_size) sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size) return relay.nn._make.sparse_dense_padded( inputs[0], relay.Constant(tvm.nd.array(sparse_matrix.data)), relay.Constant(tvm.nd.array(sparse_matrix.indices)), relay.Constant(tvm.nd.array(sparse_matrix.indptr)), ) return None
def ConstructSystem(self, E): """ Constructs the linear system by defining K and F. Does not solve the system (must call SolveSystem()). Parameters ---------- E : array_like The densities of each element Returns ------- None """ # Initial construction self.K = sparse.bsr_matrix((self.k * E[self.e], (self.i, self.j)), blocksize=(self.nDof, self.nDof)) # Add any springs springK = np.zeros(self.U.size) springK[self.springDof] = self.stiff self.K += sparse.spdiags(springK, [0], springK.size, springK.size) # Adjust right-hand-side self.b = self.F - self.K.tocsr()[:, self.fixDof] * self.U[self.fixDof] self.b[self.fixDof] = self.U[self.fixDof] # Apply Dirichlet BC interiorDiag = np.zeros(self.K.shape[0]) interiorDiag[self.freeDof] = 1. interiorDiag = sparse.spdiags( interiorDiag, 0, self.K.shape[0], self.K.shape[1]).tobsr(blocksize=(self.nDof, self.nDof)) exteriorDiag = np.zeros(self.K.shape[0]) exteriorDiag[self.fixDof] = 1. exteriorDiag = sparse.spdiags( exteriorDiag, 0, self.K.shape[0], self.K.shape[1]).tobsr(blocksize=(self.nDof, self.nDof)) self.K = interiorDiag * self.K * interiorDiag + exteriorDiag self.K = self.K.tobsr(blocksize=(self.nDof, self.nDof))