def test_small_values(format1, format2): s1 = format1(sparse.COO(coords=[[0, 10]], data=[3.6e-100, 7.2e-009], shape=(20,))) s2 = format2( sparse.COO(coords=[[0, 0], [4, 28]], data=[3.8e-25, 4.5e-225], shape=(20, 50)) ) dense_convertor = lambda x: x.todense() if isinstance(x, sparse.SparseArray) else x x1, x2 = dense_convertor(s1), dense_convertor(s2) assert_eq(x1 @ x2, s1 @ s2)
def node_attr_matrix(G, attrs=None, exclude_attrs=None, nodes=None): """Return node attributes as a scipy.sparse.coo_matrix """ attrs, nodes = _validate_args_node_attr_func(G, attrs, exclude_attrs, nodes) M, N = len(nodes), len(attrs) # collect and check attribute shape attr_shapes = dict(zip(attrs, [None] * N)) for node in G.nodes: for attr in G.nodes[node]: s = np.shape(G.nodes[node][attr]) if attr in attr_shapes: if attr_shapes[attr] is None: attr_shapes[attr] = s elif attr_shapes[attr] != s: raise ValueError('Shape of attribute:', attr, ' is not' 'consistent.') attr_shapes = [attr_shapes[x] for x in attrs] # collect data data, I, J = [], [], [] for node_idx, node in enumerate(nodes): node_data = G.nodes[node] for attr_idx, attr in enumerate(attrs): try: val = node_data[attr] except KeyError: continue data.append(val) I.append(node_idx) J.append(attr_idx) # construct array return sparse.COO((I, J), data, shape=(M, N))
def get_boundary_matrices(self, global_index): if not os.path.exists(self.cache + '/Am_' + str(global_index) + '.p'): kdir = self.kappa_directional[global_index] k_angle = sparse.COO([0, 1, 2], self.kappa_directional[global_index], shape=(3)) tmp = sparse.tensordot(self.mesh.CP, k_angle, axes=1) AP = tmp.clip(min=0) #.to_scipy_sparse().todok() AM = (tmp - AP) #.tocsc().todok() #preserve the sign of AM #if self.argv.setdefault('antialiasing',False): # self.get_antialiasing(self.mesh.CP,self.kappa_directional_not_int[global_index],AM,AP) AP = AP.todense() AM = -AM.todense() if self.save_data: AM.dump( open(self.cache + '/Am_' + str(global_index) + '.p', 'wb')) AP.dump( open(self.cache + '/Ap_' + str(global_index) + '.p', 'wb')) else: AM = np.load(open(self.cache + '/Am_' + str(global_index) + '.p', 'rb'), allow_pickle=True) AP = np.load(open(self.cache + '/Ap_' + str(global_index) + '.p', 'rb'), allow_pickle=True) return AM, AP
def test_asnumpy(): s = sparse.COO(data=[1], coords=[2], shape=(5, )) assert_eq(sparse.asnumpy(s), s.todense()) assert_eq(sparse.asnumpy(s, dtype=np.float64), np.asarray(s.todense(), dtype=np.float64)) a = np.array([1, 2, 3]) # Array passes through with no copying. assert sparse.asnumpy(a) is a
def flows_to_tensor(flows): """ Transforms pandas dataframe to suitable for network format """ coords_columns = ["sim_id", "x", "y", "time"] return sparse.COO(np.transpose(flows[coords_columns].values), flows['vehicles_number'], shape=(flows['sim_id'].max() + 1, 32, 32, flows['time'].max() + 1))
def safeCastPydataTensorToInts(tensor): data = numpy.zeros(len(tensor.data), dtype='int64') for i in range(len(data)): # If the cast would turn a value into 0, instead write a 1. This preserves # the sparsity pattern of the data. if int(tensor.data[i]) == 0: data[i] = 1 else: data[i] = int(tensor.data[i]) return sparse.COO(tensor.coords, data, tensor.shape)
def get_multiscale_diffusive(self,index,n,SDIFF,TDIFF,TDIFFGrad): angle = self.control_angle[index] aa = sparse.COO([0,1,2],angle,shape=(3)) HW_PLUS = sparse.tensordot(self.mesh.CP,aa,axes=1).clip(min=0) s = SDIFF[n] * self.mat['mfp'][n] temp = TDIFF[n] - self.mat['mfp'][n]*np.dot(self.control_angle[index],TDIFFGrad[n].T) t = temp*self.mat['domega'][index] j = np.multiply(temp,HW_PLUS)*self.mat['domega'][index] return t,s,j
def test_sparse_coo_left(benchmark): data = np.zeros((16384, 2 * 16384), dtype=np.float32) masks = sparse.COO( scipy.sparse.csr_matrix( ([1.] * 1000, ([0, 1, 2, 3, 4, 5, 6, 7] * 125, range(0, 8000, 8))), shape=(8, 16384), dtype=np.float32)) def doit(data, masks): return masks @ data benchmark(doit, data, masks)
def test_sparse_coo_right(benchmark): data = np.zeros((2 * 16384, 16384), dtype=np.float32) masks = sparse.COO( scipy.sparse.csr_matrix( ([1.] * 1000, (range(0, 8000, 8), [0, 1, 2, 3, 4, 5, 6, 7] * 125)), shape=(16384, 8), dtype=np.float32)) def doit(data, masks): return data @ masks benchmark(doit, data, masks)
def diag(a): """ Perform equivalent of :obj:`numpy.diag`. """ if len(a.shape) == 2: return a.to_scipy_sparse().diagonal() elif len(a.shape) == 1: shape = (a.shape[0], a.shape[0]) return sparse.COO(coords=np.vstack([a.coords, a.coords]), data=a.data, shape=shape) else: raise RuntimeError
def test_result_type(t1, t2, func, data): a = np.array(data, dtype=t1) b = np.array(data, dtype=t2) expect = np.result_type(a, b) assert func(a, sparse.COO(b)) == expect assert func(sparse.COO(a), b) == expect assert func(sparse.COO(a), sparse.COO(b)) == expect assert func(a.dtype, sparse.COO(b)) == np.result_type(a.dtype, b) assert func(sparse.COO(a), b.dtype) == np.result_type(a, b.dtype)
def get_bulk_data(self,global_index,TB,TL): index_irr = self.mat['temp_vec'][global_index] aa = sparse.COO([0,1,2],self.control_angle[global_index],shape=(3)) mfp = self.mat['mfp'][global_index] irr_angle = self.mat['angle_map'][global_index] if not irr_angle == self.old_index: self.old_index = irr_angle if not os.path.exists(self.cache + '/A_' + str(irr_angle) + '.npz'): test2 = sparse.tensordot(self.mesh.N,aa,axes=1) AP = test2.clip(min=0) AM = (test2 - AP) AP = spdiags(np.sum(AP,axis=1).todense(),0,self.n_elems,self.n_elems,format='csc') self.mesh.B = self.mesh.B.tocsc() self.P = np.sum(np.multiply(AM,self.mesh.B),axis=1).todense() tmp = sparse.tensordot(self.mesh.CM,aa,axes=1) HW_PLUS = tmp.clip(min=0) self.HW_MINUS = (HW_PLUS-tmp) BP = spdiags(np.sum(HW_PLUS,axis=1).todense(),0,self.n_elems,self.n_elems,format='csc') self.A = AP + AM + BP self.P.dump(self.cache + '/P_' + str(irr_angle) + '.p') sparse.io.save_npz(self.cache + '/A_' + str(irr_angle) + '.npz',self.A) sparse.io.save_npz(self.cache + '/HW_MINUS_' + str(irr_angle) + '.npz',self.HW_MINUS) else: self.P = np.load(open(self.cache +'/P_' + str(irr_angle) +'.p','rb'),allow_pickle=True) self.A = sparse.load_npz(self.cache + '/A_' + str(irr_angle) + '.npz') self.HW_MINUS = sparse.load_npz(self.cache + '/HW_MINUS_' + str(irr_angle) + '.npz') boundary = np.sum(np.multiply(TB,self.HW_MINUS),axis=1).todense() RHS = mfp * (self.P + boundary) + TL[index_irr] #Add connection----- #RHS += self.TL_old[global_index] - self.temp_old[global_index] - self.delta_old[global_index] #------------------- #else: # RHS = mfp * (boundary) + TL[index_irr] F = scipy.sparse.eye(self.n_elems,format='csc') + self.A * mfp lu = splu(F.tocsc()) return lu.solve(RHS)
def test_setting_into_numpy_slice(): actual = np.zeros((5, 5)) s = sparse.COO(data=[1, 1], coords=(2, 4), shape=(5, )) # This calls s.__array__(dtype('float64')) which means that __array__ # must accept a positional argument. If not this will raise, of course, # TypeError: __array__() takes 1 positional argument but 2 were given with auto_densify(): actual[:, 0] = s # Might as well check the content of the result as well. expected = np.zeros((5, 5)) expected[:, 0] = s.todense() assert_eq(actual, expected) # Without densification, setting is unsupported. with pytest.raises(RuntimeError): actual[:, 0] = s
def test_scipy_sparse_interface(): n = 100 m = 10 row = np.random.randint(0, n, size=n, dtype=np.uint16) col = np.random.randint(0, m, size=n, dtype=np.uint16) data = np.ones(n, dtype=np.uint8) inp = (data, (row, col)) x = scipy.sparse.coo_matrix(inp) xx = sparse.COO(inp) assert_eq(x, xx) assert_eq(x.T, xx.T) assert_eq(xx.to_scipy_sparse(), x) assert_eq(COO.from_scipy_sparse(xx.to_scipy_sparse()), xx) assert_eq(x, xx) assert_eq(x.T.dot(x), xx.T.dot(xx))
def shiftLastMode(self, tensor): coords = tensor.coords data = tensor.data resultCoords = [] for j in range(len(tensor.shape)): resultCoords.append([0] * len(data)) resultValues = [0] * len(data) for i in range(len(data)): for j in range(len(tensor.shape)): resultCoords[j][i] = coords[j][i] # resultValues[i] = data[i] # TODO (rohany): Temporarily use a constant as the value. resultValues[i] = 2 # For order 2 tensors, always shift the last coordinate. Otherwise, shift only coordinates # that have even last coordinates. This ensures that there is at least some overlap # between the original tensor and its shifted counter part. if len(tensor.shape) <= 2 or resultCoords[-1][i] % 2 == 0: resultCoords[-1][i] = (resultCoords[-1][i] + 1) % tensor.shape[-1] return sparse.COO(resultCoords, resultValues, tensor.shape)
def trips_to_tensor(data): # only_origins = data[['x_from', 'y_from', 'x_to', 'y_to']].drop_duplicates().sort_values(by=['x_from', 'y_from', 'x_to', 'y_to']) data['cell_from'] = data['x_from'].astype(str).str.cat( data['y_from'].astype(str), sep="_") data['cell_to'] = data['x_to'].astype(str).str.cat( data['y_to'].astype(str), sep="_") data['code'] = data['cell_from'].astype(str).str.cat( data['cell_to'].astype(str), sep="_") trips_hist = data.groupby('code')['sim_id'].count() threshold = trips_hist.quantile(0.95) trips_interested = np.array(trips_hist[trips_hist > threshold].index) data = data[data['code'].isin(trips_interested)] data['code_encoded'] = le_trips.fit_transform(data['code']) trips_per_sim = data.groupby(['sim_id', 'code_encoded']).size().reset_index() return sparse.COO(np.transpose(trips_per_sim[['sim_id', 'code_encoded']].values), trips_per_sim[0].values, shape=((data['sim_id'].max() + 1, data['code_encoded'].max() + 1)))
def test_scalar_list_init(): a = sparse.COO([], [], ()) b = sparse.COO([], [1], ()) assert a.todense() == 0 assert b.todense() == 1
def load(self, path): dims, coords, values = self.loader.load(path) return sparse.COO(coords, values, tuple(dims))
def test_tensordot_valueerror(): x1 = sparse.COO(np.array(1)) x2 = sparse.COO(np.array(1)) with pytest.raises(ValueError): x1 @ x2
def solve_bte(self, **argv): comm = MPI.COMM_WORLD rank = comm.rank if rank == 0 and self.verbose: print( ' Iter Thermal Conductivity [W/m/K] Error Diffusive - BTE - Ballistic' ) print( ' ---------------------------------------------------------------------------------------' ) n_mfp = self.mat['n_serial'] temp_vec = self.mat['temp_vec'] nT = np.shape(self.mat['B'])[0] kdir = self.kappa_directional if self.argv.setdefault('load_state', False): #data = dd.io.load(self.argv.setdefault('filename','solver.hdf5')) data = pickle.load( open(argv.setdefault('filename', 'solver.p'), 'rb')) error_vec = data['error_vec'] kappa_vec = data['kappa_vec'] ms_vec = data['ms_vec'] TL = data['TL'] TB = data['TB'] temp_fourier = data['temp_fourier'] temp_fourier_grad = data['temp_fourier_grad'] if rank == 0: for n in range(len(ms_vec)): print( ' {0:7d} {1:20.4E} {2:25.4E} {3:10.2F} {4:10.2F} {5:10.2F}' .format(n + 1, kappa_vec[n], error_vec[n], ms_vec[n][0], ms_vec[n][1], ms_vec[n][2])) else: #fourier first guess---- if rank == 0: kappa_fourier, temp_fourier, temp_fourier_grad, temp_fourier_int, flux_fourier_int = self.get_diffusive_suppression_function( self.mat['kappa_bulk_tot']) kappa_fourier = np.array([kappa_fourier]) data = { 'kappa_fourier': kappa_fourier, 'temp_fourier': temp_fourier, 'temp_fourier_grad': temp_fourier_grad, 'temp_fourier_int': temp_fourier_int, 'flux_fourier_int': flux_fourier_int } else: data = None data = comm.bcast(data, root=0) comm.Barrier() temp_fourier = data['temp_fourier'] temp_fourier_int = data['temp_fourier_int'] flux_fourier_int = data['flux_fourier_int'] temp_fourier_grad = data['temp_fourier_grad'] kappa_fourier = data['kappa_fourier'] #TL = np.tile(temp_fourier,(nT,1)) TL = np.zeros((nT, self.mesh.nle)) Tnew = temp_fourier.copy() #TB = np.tile(temp_fourier,(self.n_side_per_elem,1)).T TB = np.tile(np.ones(self.mesh.nle), (self.n_side_per_elem, 1)).T #---------------------------------------------------------------- kappa_vec = [float(kappa_fourier)] #kappa_vec = [0] error_vec = [1.0] ms_vec = [[1, 0, 0]] if rank == 0 and self.verbose: print( ' {0:7d} {1:20.4E} {2:25.4E} {3:10.2F} {4:10.2F} {5:10.2F}' .format(1, float(kappa_fourier), 1, 1, 0, 0)) #--------------------------- #Save if only Fourier--- flux_fourier = [ -temp_fourier_grad[i] * self.elem_kappa_map[self.mesh.l2g[i]] for i in range(self.mesh.nle) ] data_save = { 'flux_fourier': flux_fourier, 'temperature_fourier': temp_fourier, 'kappa_fourier': kappa_fourier, 'kappa_map': self.elem_kappa_map, 'temperature_fourier_int': temp_fourier_int, 'flux_fourier_int': flux_fourier_int } self.state = data_save if self.argv.setdefault('only_fourier', False) or self.argv.setdefault( 'bte_max_iter', True) == 1: if self.save_state: pickle.dump(self.state, open(argv.setdefault('filename', 'solver.p'), 'wb'), protocol=pickle.HIGHEST_PROTOCOL) #Initialize data----- n_iter = len(kappa_vec) self.TL_old = TL.copy() self.delta_old = np.zeros_like(TL) TB_old = TB.copy() self.temp_old = self.TL_old.copy() #------------------------------- while n_iter < argv.setdefault('max_bte_iter',10) and \ error_vec[-1] > argv.setdefault('max_bte_error',1e-2): a = time.time() self.n_iter = n_iter #Compute diffusive approximation--------------------------- if self.multiscale: TDIFF, TDIFFp = np.zeros((2, n_mfp, self.mesh.nle)) TDIFFGrad = np.zeros((n_mfp, self.mesh.n_le, 3)) TDIFFGradp = np.zeros((n_mfp, self.mesh.n_le, 3)) Jx, Jxp = np.zeros((2, n_mfp, self.mesh.n_le)) Jy, Jyp = np.zeros((2, n_mfp, self.mesh.n_le)) Jz, Jzp = np.zeros((2, n_mfp, self.mesh.n_le)) block = self.mat['n_serial'] // comm.size + 1 for kk in range(block): n = rank * block + kk if n < n_mfp: kappa_value = self.mat['mfp'][n] * self.mat['mfp'][ n] / 3.0 #The first index is MFP anyway G = self.mat['mfp'][n] / 2.0 * np.ones( self.n_side_per_elem ) #we have to define this for each side #dummy,TDIFFp[n],TDIFFGradp[n] = self.get_diffusive_suppression_function(kappa_value,TL=TL[0],TB=TB,G = G) dummy, TDIFFp[n], TDIFFGradp[ n] = self.solve_modified_fourier_law(kappa_value, TL=TL[0], TB=TB, G=G) comm.Allreduce([TDIFFp, MPI.DOUBLE], [TDIFF, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([TDIFFGradp, MPI.DOUBLE], [TDIFFGrad, MPI.DOUBLE], op=MPI.SUM) #Print diffusive Kappa--------- KBp, KB = np.zeros((2, self.mesh.nle, 3, 3)) TL2p, TL2 = np.zeros((2, nT, self.mesh.nle)) Tp, T = np.zeros((2, self.mesh.nle)) Vp, V = np.zeros((2, self.mesh.nle, 3)) Jp, J = np.zeros((2, self.mesh.nle, 3)) TBp_minus, TB_minus = np.zeros( (2, self.mesh.nle, self.n_side_per_elem)) TBp_plus, TB_plus = np.zeros( (2, self.mesh.nle, self.n_side_per_elem)) ndifp = np.zeros(1) ndif = np.zeros(1) nbal = np.zeros(1) nbalp = np.zeros(1) (KAPPA, KAPPAp) = np.zeros((2, self.n_parallel, self.n_serial)) eta_vec = np.zeros(self.n_parallel * self.n_serial) eta_vecp = np.zeros(self.n_parallel * self.n_serial) K2p = np.zeros(1) K2 = np.zeros(1) block = self.n_parallel // comm.size + 1 for kk in range(block): index = rank * block + kk if index < self.n_parallel: #print(index) #------------------------------------------------------- idx = [self.n_serial] if self.multiscale: #Compute ballistic regime--- a = time.time() temp_bal = self.get_bulk_data( index * self.n_serial + self.n_serial - 1, TB, TL, Tnew) self.mesh.B_with_area_old.dot(temp_bal).sum() eta_bal = np.ones( self.n_serial) * self.mesh.B_with_area_old.dot( temp_bal).sum() #------------------------------------- #Compute diffusive regime---------------------------------- eta_diff = [] for n in range(n_mfp): m = self.mat['mfp'][n] a = np.array([m]) vv = TDIFF[n] - self.mat['mfp'][n] * np.einsum( 'ci,i->c', TDIFFGrad[n], self.control_angle[index * self.n_serial + 1]) eta_diff.append( self.mesh.B_with_area_old.dot(vv).sum()) #---------------------------------------------------------- #Compute the intersection index----- idx = np.argwhere(np.diff(np.sign(eta_diff - eta_bal))).flatten() if len(idx) == 0: idx = [self.n_serial - 1] #------------------------------------------------------ fourier = False eta_plot = [] for n in range(self.n_serial)[idx[0]::-1]: global_index = index * self.n_serial + n if fourier == True: ndifp[0] += 1 temp = TDIFF[n] eta = eta_diff[n] else: temp = self.get_bulk_data(global_index, TB, TL, Tnew) eta = self.mesh.B_with_area_old.dot(temp).sum() if index == 11: eta_plot.append(eta) #if self.multiscale: # if abs(eta_diff[n] - eta)/abs(eta) < 1e-2: # fourier = True #test--- eta_vecp[global_index] = eta (Am, Ap) = self.get_boundary_matrices(global_index) TBp_minus += Am TBp_plus += np.einsum('es,e->es', Ap, temp) TL2p += np.outer(self.mat['B'][:, global_index], temp) Tp += temp * self.mat['TCOEFF'][global_index] kdir = self.mat['kappa_directional'][global_index] K2p += np.array( [eta * np.dot(kdir, self.mesh.applied_grad)]) KAPPAp[index, n] = np.array( [eta * np.dot(kdir, self.mesh.applied_grad)]) Jp += np.outer(temp, kdir) * 1e9 #Experimental----------------------------------------- aa = sparse.COO([0, 1, 2], self.control_angle[global_index], shape=(3)).todense() mfp = self.mat['mfp'][global_index] KBp += mfp * np.einsum('i,j,c->cij', aa, kdir, temp) #------------------------------------------------------- #Ballistic component ballistic = False for n in range(self.n_serial)[idx[0] + 1:]: global_index = index * self.n_serial + n if ballistic == True: nbalp[0] += 1 temp = temp_bal eta = eta_bal[n] else: temp = self.get_bulk_data(global_index, TB, TL, Tnew) #self.temp[global_index] = temp eta = self.mesh.B_with_area_old.dot(temp).sum() if self.multiscale: if abs(eta_bal[n] - eta) / abs(eta) < 1e-2: ballistic = True (Am, Ap) = self.get_boundary_matrices(global_index) TBp_minus += Am TBp_plus += np.einsum('es,e->es', Ap, temp) TL2p += np.outer(self.mat['B'][:, global_index], temp) Tp += temp * self.mat['TCOEFF'][global_index] kdir = self.mat['kappa_directional'][global_index] K2p += np.array( [eta * np.dot(kdir, self.mesh.applied_grad)]) KAPPAp[index, n] = np.array( [eta * np.dot(kdir, self.mesh.applied_grad)]) Jp += np.outer(temp, kdir) * 1e9 eta_vecp[global_index] = eta comm.Allreduce([K2p, MPI.DOUBLE], [K2, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([TL2p, MPI.DOUBLE], [TL2, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([TBp_minus, MPI.DOUBLE], [TB_minus, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([TBp_plus, MPI.DOUBLE], [TB_plus, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([Tp, MPI.DOUBLE], [T, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([Jp, MPI.DOUBLE], [J, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([ndifp, MPI.DOUBLE], [ndif, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([nbalp, MPI.DOUBLE], [nbal, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([KAPPAp, MPI.DOUBLE], [KAPPA, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([eta_vecp, MPI.DOUBLE], [eta_vec, MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([KBp, MPI.DOUBLE], [KB, MPI.DOUBLE], op=MPI.SUM) kappa_vec.append(abs(K2[0] * self.kappa_factor)) error_vec.append( abs(kappa_vec[-1] - kappa_vec[-2]) / abs(max([kappa_vec[-1], kappa_vec[-2]]))) #---------------- TB_new = TB_plus.copy() for i in range(np.shape(TB_new)[1]): for n in range(len(TB_plus.T[i])): if not (TB_plus[n, i] == 0): TB_new[n, i] /= TB_minus[n, i] TB = self.alpha * TB_new + (1 - self.alpha) * TB_old TB_old = TB.copy() Tnew = T.copy() TL = self.alpha * TL2.copy() + (1 - self.alpha) * self.TL_old self.TL_old = TL.copy() n_iter += 1 #Thermal conductivity if rank == 0: ndif = ndif[0] / (self.n_serial * self.n_parallel) nbal = nbal[0] / (self.n_serial * self.n_parallel) nbte = 1 - ndif - nbal ms_vec.append([ndif, nbte, nbal]) kappa_current = kappa_vec[-1] if rank == 0 and self.verbose: print( ' {0:7d} {1:20.4E} {2:25.4E} {3:10.2F} {4:10.2F} {5:10.2F}' .format(n_iter, kappa_current, error_vec[-1], ndif, nbte, nbal)) data = { 'kappa_vec': kappa_vec, 'temperature': T, 'pseudogradient': self.mesh.compute_grad(T), 'flux': J, 'temperature_fourier': temp_fourier, 'flux_fourier': -self.mat['kappa_bulk_tot'] * temp_fourier_grad, 'kappa': kappa_vec[-1], 'kappa_space': KB } data.update({ 'TB': TB, 'TL': TL, 'error_vec': error_vec, 'ms_vec': ms_vec, 'temp_fourier_grad': temp_fourier_grad, 'eta': eta_vec }) self.state = data if self.save_state: pickle.dump(self.state, open(argv.setdefault('filename', 'solver.p'), 'wb'), protocol=pickle.HIGHEST_PROTOCOL) else: data = None self.state = MPI.COMM_WORLD.bcast(data, root=0) #print(time.time()-a) if rank == 0 and self.verbose: print( ' ---------------------------------------------------------------------------------------' ) print(' ')
def clustering(interactive: Interactive, api: API): window = api.application.document_windows[0] target_data_item = window.target_data_item ctx = iface.get_context() ds = iface.dataset_from_data_item(ctx, target_data_item) fy, fx = tuple(ds.shape.sig) y, x = tuple(ds.shape.nav) # roi = np.random.choice([True, False], tuple(ds.shape.nav), p=[0.01, 0.99]) # We only sample 5 % of the frame for the std deviation map # since the UDF still needs optimization std_roi = np.random.choice([True, False], tuple(ds.shape.nav), p=[0.05, 0.95]) roi = np.ones((y, x), dtype=bool) # roi = np.zeros((y, x), dtype=bool) # roi[:, :50] = True stddev_res = run_stddev(ctx=ctx, dataset=ds, roi=std_roi * roi) ref_frame = stddev_res['std'] # sum_res = ctx.run_udf(udf=SumUDF(), dataset=ds) # ref_frame = sum_res['intensity'].data update_data(target_data_item, ref_frame) peaks = peak_local_max(ref_frame, min_distance=3, num_peaks=500) masks = sparse.COO(shape=(len(peaks), fy, fx), coords=(range(len(peaks)), peaks[..., 0], peaks[..., 1]), data=1) feature_udf = ApplyMasksUDF(mask_factories=lambda: masks, mask_dtype=np.uint8, mask_count=len(peaks), use_sparse=True) feature_res = ctx.run_udf(udf=feature_udf, dataset=ds, roi=roi) f = feature_res['intensity'].raw_data.astype(np.float32) f = np.log(f - np.min(f) + 1) feature_vector = f / np.abs(f).mean(axis=0) # too slow # nion_peaks = peaks / tuple(ds.shape.sig) # with api.library.data_ref_for_data_item(target_data_item): # for p in nion_peaks: # target_data_item.add_ellipse_region(*p, 0.01, 0.01) connectivity = scipy.sparse.csc_matrix( grid_to_graph( # Transposed! n_x=y, n_y=x, )) roi_connectivity = connectivity[roi.flatten()][:, roi.flatten()] threshold = interactive.get_float("Cluster distance threshold: ", 10) clusterer = AgglomerativeClustering( affinity='euclidean', distance_threshold=threshold, n_clusters=None, linkage='ward', connectivity=roi_connectivity, ) clusterer.fit(feature_vector) labels = np.zeros((y, x), dtype=np.int32) labels[roi] = clusterer.labels_ + 1 new_data = api.library.create_data_item_from_data(labels) window.display_data_item(new_data)
def test_raise_on_nd_data(s1): with pytest.raises(ValueError): sparse.COO(s1.coords, s1.data[:, None], shape=(2, 3, 4))
def test_raise_on_nd_data(): s1 = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.COO(s1.coords, s1.data[:, None], shape=(2, 3, 4))
def test_array_as_shape(): coords = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]] data = [10, 20, 30, 40, 50] s = sparse.COO(coords, data, shape=np.array((5, 5)))
def get_bulk_data(self, global_index, TB, TL, Tnew): index_irr = self.mat['temp_vec'][global_index] aa = sparse.COO([0, 1, 2], self.control_angle[global_index], shape=(3)) mfp = self.mat['mfp'][global_index] irr_angle = self.mat['angle_map'][global_index] if (self.keep_lu) and (global_index in self.lu.keys()): lu = self.lu[irr_angle] else: if not irr_angle == self.old_index: self.old_index = irr_angle if not os.path.exists(self.cache + '/A_' + str(irr_angle) + '.npz'): test2 = sparse.tensordot(self.mesh.N, aa, axes=1) AP = test2.clip(min=0) AM = (test2 - AP) AP = spdiags(np.sum(AP, axis=1).todense(), 0, self.mesh.nle, self.mesh.nle, format='csc') self.mesh.B = self.mesh.B.tocsc() self.P = np.sum(np.multiply(AM, self.mesh.B), axis=1).todense() tmp = sparse.tensordot(self.mesh.CM, aa, axes=1).todense() HW_PLUS = tmp.clip(min=0) self.HW_MINUS = (HW_PLUS - tmp) BP = spdiags(np.sum(HW_PLUS, axis=1), 0, self.mesh.nle, self.mesh.nle, format='csc') self.A = AP + AM + BP if self.save_data: self.P.dump(self.cache + '/P_' + str(irr_angle) + '.p') sio.save_npz( self.cache + '/A_' + str(irr_angle) + '.npz', self.A) self.HW_MINUS.dump( open( self.cache + '/HW_MINUS_' + str(irr_angle) + '.npz', 'wb')) else: self.P = np.load(open( self.cache + '/P_' + str(irr_angle) + '.p', 'rb'), allow_pickle=True) self.A = sparse.load_npz(self.cache + '/A_' + str(irr_angle) + '.npz') self.HW_MINUS = np.load(open( self.cache + '/HW_MINUS_' + str(irr_angle) + '.npz', 'rb'), allow_pickle=True) F = scipy.sparse.eye(self.mesh.nle, format='csc') + self.A * mfp lu = splu(F.tocsc()) if (self.keep_lu): self.lu[irr_angle] = lu boundary = np.sum(np.multiply(TB, self.HW_MINUS), axis=1) #RHS = mfp * (self.P + boundary) + Tnew + TL[index_irr] #RHS = mfp * boundary + np.ones(self.mesh.nle) RHS = mfp * np.ones(self.mesh.nle) + np.ones(self.mesh.nle) temp = lu.solve(RHS) return temp