def line_segment(X0, X1): r""" Calculate the voxel coordinates of a straight line between the two given end points Parameters ---------- X0 and X1 : array_like The [x, y] or [x, y, z] coordinates of the start and end points of the line. Returns ------- coords : list of lists A list of lists containing the X, Y, and Z coordinates of all voxels that should be drawn between the start and end points to create a solid line. """ X0 = sp.around(X0).astype(int) X1 = sp.around(X1).astype(int) if len(X0) == 3: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]], [X1[2]-X0[2]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) z = sp.rint(sp.linspace(X0[2], X1[2], L)).astype(int) return [x, y, z] else: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) return [x, y]
def test_distance_center(): shape = sp.array([7, 5, 9]) spacing = sp.array([2, 1, 0.5]) pn = OpenPNM.Network.Cubic(shape=shape, spacing=spacing) sx, sy, sz = spacing center_coord = sp.around(topology.find_centroid(pn['pore.coords']), 7) cx, cy, cz = center_coord coords = pn['pore.coords'] x, y, z = coords.T coords = sp.concatenate((coords, center_coord.reshape((1, 3)))) pn['pore.center'] = False mask1 = (x <= (cx + sx/2)) * (y <= (cy + sy/2)) * (z <= (cz + sz/2)) mask2 = (x >= (cx - sx/2)) * (y >= (cy - sy/2)) * (z >= (cz - sz/2)) center_pores_mask = pn.Ps[mask1 * mask2] pn['pore.center'][center_pores_mask] = True center = pn.Ps[pn['pore.center']] L1 = sp.amax(topology.find_pores_distance(network=pn, pores1=center, pores2=pn.Ps)) L2 = sp.amax(topology.find_pores_distance(network=pn, pores1=pn.Ps, pores2=pn.Ps)) l1 = ((shape[0] - 1) * sx) ** 2 l2 = ((shape[1] - 1) * sy) ** 2 l3 = ((shape[2] - 1) * sz) ** 2 L3 = sp.sqrt(l1 + l2 + l3) assert sp.around(L1 * 2, 7) == sp.around(L2, 7) assert sp.around(L2, 7) == sp.around(L3, 7)
def getDirectLight(latitude, longitude, jourJul, startH, stopH, step=30, decalSun=1, decalGMT=0): # startH and stopH represent starting and stoping hour for the light, given in hour # step are time step, given in minute # decalGMT give 'fuseau horaire' express in hour seq = sp.Sequence() hdeb = seq.heureTSV(jourJul, startH, decalSun, decalGMT, longitude) hfin = seq.heureTSV(jourJul, stopH, decalSun, decalGMT, longitude) az, el, time = seq.positionSoleil(step, radians(latitude), jourJul, hdeb, hfin) sw = 0 for h in el: sw += 0.7**(1. / sin(h)) * sin(h) w = [0.7**(1. / sin(h)) * sin(h) / sw for h in el] tot = 0 for s in w: tot += s if round(tot, 1) != 1.0: print "sum weight : ", tot return [(around(degrees(az[i]), 2), around(degrees(el[i]), 2), w[i]) for i in range(len(az))]
def test_domain_dimensions(self): face1 = self.net.pores('bottom_boundary') face2 = self.net.pores('top_boundary') assert sp.around(self.net.domain_length(face1, face2) / self.scale, 3) == 1.0 assert sp.around(self.net.domain_area(face1) / self.scale**2, 3) == 1.0
def binary_errors(CS_object, nonzero_bounds=[0.7, 1.3], zero_bound=1. / 25): Nn = CS_object.Nn mu_dSs = CS_object.mu_dSs sparse_idxs = CS_object.idxs[0] errors_nonzero = 0 errors_zero = 0 for iN in range(Nn): if iN in sparse_idxs: scaled_estimate = 1. * CS_object.dSs_est[iN] / CS_object.dSs[iN] if nonzero_bounds[0] < scaled_estimate < nonzero_bounds[1]: errors_nonzero += 1 else: if abs(CS_object.dSs_est[iN]) < abs(mu_dSs * zero_bound): errors_zero += 1 errors = dict() errors['errors_nonzero'] = sp.around(1.*errors_nonzero/ \ len(sparse_idxs)*100., 2) errors['errors_zero'] = sp.around(1.*errors_zero/ \ (Nn - len(sparse_idxs))*100., 2) return errors
def test_mesh_surface_area(self): region = self.regions == self.regions.max() mesh = ps.tools.mesh_region(region) a = ps.metrics.mesh_surface_area(mesh) assert sp.around(a, decimals=2) == 258.3 b = ps.metrics.mesh_surface_area(verts=mesh.verts, faces=mesh.faces) assert sp.around(b, decimals=2) == sp.around(a, decimals=2)
def generate(self, **params): r''' radius: int The physical radius of the netowrk. The number of pores determined from this and the lattice_spacing parameter. lattice_spacing : float The lattice constant for the network, used to scale distance between pores. ''' self._logger.info(sys._getframe().f_code.co_name+": Start of network topology generation") #create spherical template, then call normal template procedure self._radius = params['radius'] self._length = params['length'] Nr = sp.around(self._radius/params['lattice_spacing']) Lz = sp.around(self._length/params['lattice_spacing']) temp = sp.ones((2*Nr, 2*Nr, Lz)) temp[Nr, Nr, :] = 0 temp = spim.distance_transform_edt(temp) temp = temp < Nr params['template'] = temp #Call standard generation protocol self._generate_setup(**params) self._generate_pores() self._generate_throats() self._add_boundaries() self._add_labels() self._logger.debug(sys._getframe().f_code.co_name+": Network generation complete") return self
def xover(chrom,N,p): """Single point crossover with probability N,precision p """ N = round(chrom.shape[0]*N) index1 = scipy.arange(chrom.shape[0]) index2 = scipy.unique(scipy.around(scipy.rand(chrom.shape[0],)*chrom.shape[0]))[0:chrom.shape[0]/2] sel1,sel2 = [],[] for i in range(len(index1)): if index1[i] not in index2: sel1.append(index1[i]) else: sel2.append(index1[i]) select1 = sel1[0:min([int(round(len(sel1)*N)),int(round(len(sel2)*N))])] select2 = sel2[0:min([int(round(len(sel1)*N)),int(round(len(sel2)*N))])] # set xover points xoverpnt = scipy.around(scipy.rand(len(select1),)*(chrom.shape[1]-1)) # perform xover nchrom = copy.deepcopy(chrom) for i in range(len(select1)): try: slice1 = chrom[select1[i],0:int(xoverpnt[i])] slice2 = chrom[select2[i],0:int(xoverpnt[i])] nchrom[select2[i],0:int(xoverpnt[i])] = slice1 nchrom[select1[i],0:int(xoverpnt[i])] = slice2 except: nchrom = nchrom return nchrom
def write_extraction_mask(self): """ write both the .roi file and the tif pages ROI file format specification: each row a ROI, first col: kw for roi type if circle: label, layer, pos x, pos y, diameter if poly: label, layer, pos x_1, pos_y1, ... pos x_n, pos y_n """ outpath = self.SaveFileDialog( title='saving ROIs', default_dir=self.Main.Options.general['cwd'], extension='*.roi') outpath = self.append_extension(outpath, '.roi') fh = open(outpath, 'w') # iterate over ROIs for i, ROI in enumerate(self.Main.ROIs.ROI_list): label = str(ROI.label) if type(ROI) == myCircleROI: # pos = sp.array([ROI.pos().x(),ROI.pos().y()]) # pos = self.get_ROI_position(ROI) x = str(sp.around(ROI.center[0], decimals=2)) y = str(sp.around(ROI.center[1], decimals=2)) d = str(sp.around(ROI.diameter, decimals=2)) fh.write('\t'.join(['circle', label, x, y, d, '\n'])) if type(ROI) == myPolyLineROI: fh.write('\t'.join(['polygon', label])) fh.write('\t') handle_pos = [tup[1] for tup in ROI.getSceneHandlePositions()] pos_mapped = [ROI.ViewBox.mapToView(pos) for pos in handle_pos] for pos in (pos_mapped): x = pos.x() y = pos.y() fh.write('\t'.join([ str(sp.around(x, decimals=2)), str(sp.around(y, decimals=2)) ])) fh.write('\t') fh.write('\n') fh.close() print("saved ROIs in .roi format to", outpath) # outpath = os.path.splitext(outpath)[0] + '_mask.tif' # outpath = self.MainWindow.SaveFileDialog(title='saving ROIs',defaultdir=self.path,extension='.tif') # extraction mask self.Main.Processing.calc_extraction_mask() io.save_tstack(self.Main.Data.extraction_mask.astype('uint16'), os.path.splitext(outpath)[0] + '_mask.tif') print("saved ROIs in .tif format to", outpath) pass
def test_distance_center(): shape = sp.array([7, 5, 9]) spacing = sp.array([2, 1, 0.5]) pn = OpenPNM.Network.Cubic(shape=shape, spacing=spacing) sx, sy, sz = spacing center_coord = sp.around(topology.find_centroid(pn['pore.coords']), 7) cx, cy, cz = center_coord coords = pn['pore.coords'] x, y, z = coords.T coords = sp.concatenate((coords, center_coord.reshape((1, 3)))) pn['pore.center'] = False mask1 = (x <= (cx + sx / 2)) * (y <= (cy + sy / 2)) * (z <= (cz + sz / 2)) mask2 = (x >= (cx - sx / 2)) * (y >= (cy - sy / 2)) * (z >= (cz - sz / 2)) center_pores_mask = pn.Ps[mask1 * mask2] pn['pore.center'][center_pores_mask] = True center = pn.Ps[pn['pore.center']] L1 = sp.amax( topology.find_pores_distance(network=pn, pores1=center, pores2=pn.Ps)) L2 = sp.amax( topology.find_pores_distance(network=pn, pores1=pn.Ps, pores2=pn.Ps)) l1 = ((shape[0] - 1) * sx)**2 l2 = ((shape[1] - 1) * sy)**2 l3 = ((shape[2] - 1) * sz)**2 L3 = sp.sqrt(l1 + l2 + l3) assert sp.around(L1 * 2, 7) == sp.around(L2, 7) assert sp.around(L2, 7) == sp.around(L3, 7)
def print_msg(msg, log=True): """prints the msg string with elapsed time and current memory usage. Args: msg (str): the string to print log (bool): write the msg to the log as well """ if os.name == 'posix': mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6 mem_used = sp.around(mem_used, 2) memstr = '(' + str(mem_used) + ' GB): ' timestr = tp.humantime(sp.around(time.time() - t0, 2)) print(colorama.Fore.CYAN + timestr + '\t' + memstr + '\t' + colorama.Fore.GREEN + msg) if log: with open('log.log', 'a+') as fH: log_str = timestr + '\t' + memstr + '\t' + msg fH.writelines(log_str) else: timestr = tp.humantime(sp.around(time.time() - t0, 2)) print(colorama.Fore.CYAN + timestr + '\t' + colorama.Fore.GREEN + msg) if log: with open('log.log', 'a+') as fH: log_str = timestr + '\t' + '\t' + msg fH.writelines(log_str) pass
def do_local(local_graph): local_n = local_graph.number_of_nodes() local_P = generate_transition_matrix(local_graph, local_n) print 'Local probability transition matrix:' print sp.around(local_P, 3) print 'Done getting local P' w, vr = equilibrium_distribution(local_P) # Find the index of the eigenvalue 1 index = sp.where((w > 0.999999999999999) * (w < 1.000000000000001))[0] print index #if index.size == 0: # if roundoff == 0: # raise ValueError # P = sp.around(P, roundoff-1) # return equilibrium_distribution(P, roundoff-1) eigen_index = 0 while(1): try: chosen_index = index[eigen_index] except IndexError: print 'No eigenvalue of 1 exists, or no eigenvector of 1 exists' print 'which allows the fundamental matrix to be non-singular' raise local_eq_pi = vr[:, chosen_index] print 'Local eigenvector:' print local_eq_pi print 'Done getting local pi' print 'Finiteness check:', try: local_eq_pi = sp.asarray_chkfinite(local_eq_pi) print 'OK' except ValueError: eigen_index += 1 continue local_W = equilibrium_transition_matrix(local_eq_pi, local_n) print 'Done getting local W' try: local_Z = fundamental_matrix(local_P, local_W, local_n) print 'Done getting local Z' break except linalg.LinAlgError: eigen_index += 1 local_Ei_Ti, local_Ei_Tj, local_Epi_Ti = hitting_times(local_eq_pi, local_Z, local_n) print 'Done getting local hitting times' return local_Ei_Ti, local_Ei_Tj, local_Epi_Ti
def _get_fibre_slice(self, plane=None, index=None): r""" Plot an image of a slice through the fibre image plane contains percentage values of the length of the image in each axis Parameters ---------- plane : array_like List of 3 values, [x,y,z], 2 must be zero and the other must be between zero and one representing the fraction of the domain to slice along the non-zero axis index : array_like similar to plane but instead of the fraction an index of the image is used """ if hasattr(self, '_fibre_image') is False: logger.warning( 'This method only works when a fibre image exists, ' + 'please run make_fibre_image') return None if plane is None and index is None: logger.warning( 'Please provide either a plane array or index array') return None if self._fibre_image is None: self.make_fibre_image() if plane is not None: if 'array' not in plane.__class__.__name__: plane = sp.asarray(plane) if sp.sum(plane == 0) != 2: logger.warning('Plane argument must have two zero valued ' + 'elements to produce a planar slice') return None l = sp.asarray(sp.shape(self._fibre_image)) s = sp.around(plane * l).astype(int) elif index is not None: if 'array' not in index.__class__.__name__: index = sp.asarray(index) if sp.sum(index == 0) != 2: logger.warning('Index argument must have two zero valued ' + 'elements to produce a planar slice') return None if 'int' not in str(index.dtype): index = sp.around(index).astype(int) s = index if s[0] != 0: slice_image = self._fibre_image[s[0], :, :] elif s[1] != 0: slice_image = self._fibre_image[:, s[1], :] else: slice_image = self._fibre_image[:, :, s[2]] return slice_image
def generate_transition_matrix(graph, n=None): """ Generates the probability transition matrix for a given graph. It is assumed that all edges have weights and that the probability of transition from one node to a neighbouring node is proportional to the weight of the edge connecting the two nodes, normalised over all current outgoing edges. Parameters ---------- graph : networkx.Graph Graph whose transition matrix is to be found. It is assumed that the weight of each edge can be found under the attribute name ``weight``. n : int (optional) Number of nodes in the graph. Returns ------- P : numpy.ndarray Markov probability transition matrix for the given graph Notes ----- This method works only for an undirected graph. If the adjacency matrix is represented as A[i,j], then it is assumed that an edge "goes" from node j to node i. The transition matrix is appropriately defined. The equilibrium distribution of states would therefore be the right eigenvectors of the resulting matrix. """ # First, get the adjacency matrix of the graph P = sp.array(nx.adjacency_matrix(graph)) print 'Adjacency matrix:' print sp.around(P) # We'll also need the number of nodes if n is None: n = graph.number_of_nodes() # The probability transition matrix is simply the adjacency matrix itself, # but with values along each column scaled so that every column sums up to # unity. k = sp.dot(sp.ones((1, n)), P) # Vector of degrees print 'Vector of degrees:', k # Stack degrees vertically: K = sp.multiply(sp.ones((n, 1)), k) P /= K print 'Left stochastic check:', sp.dot(sp.ones((1, n)), P) # If any elements of K were zero, the corresponding elements of P would go # infinite. But logically, if the degree of a vertex is zero, then there is # no way to get to it, or out of it. Therefore, it should just appear as a # zero in the probability transition matrix. We explicitly set this. z = sp.where(sp.isnan(P)) P[z] = 0 return sp.asarray_chkfinite(P)
def _get_fibre_slice(self, plane=None, index=None): r""" Plot an image of a slice through the fibre image plane contains percentage values of the length of the image in each axis Parameters ---------- plane : array_like List of 3 values, [x,y,z], 2 must be zero and the other must be between zero and one representing the fraction of the domain to slice along the non-zero axis index : array_like similar to plane but instead of the fraction an index of the image is used """ if hasattr(self, '_fibre_image') is False: logger.warning('This method only works when a fibre image exists, ' + 'please run make_fibre_image') return None if plane is None and index is None: logger.warning('Please provide either a plane array or index array') return None if self._fibre_image is None: self.make_fibre_image() if plane is not None: if 'array' not in plane.__class__.__name__: plane = sp.asarray(plane) if sp.sum(plane == 0) != 2: logger.warning('Plane argument must have two zero valued ' + 'elements to produce a planar slice') return None l = sp.asarray(sp.shape(self._fibre_image)) s = sp.around(plane*l).astype(int) elif index is not None: if 'array' not in index.__class__.__name__: index = sp.asarray(index) if sp.sum(index == 0) != 2: logger.warning('Index argument must have two zero valued ' + 'elements to produce a planar slice') return None if 'int' not in str(index.dtype): index = sp.around(index).astype(int) s = index if s[0] != 0: slice_image = self._fibre_image[s[0], :, :] elif s[1] != 0: slice_image = self._fibre_image[:, s[1], :] else: slice_image = self._fibre_image[:, :, s[2]] return slice_image
def write_extraction_mask(self): """ write both the .roi file and the tif pages ROI file format specification: each row a ROI, first col: kw for roi type if circle: label, layer, pos x, pos y, diameter if poly: label, layer, pos x_1, pos_y1, ... pos x_n, pos y_n """ outpath = self.SaveFileDialog(title='saving ROIs',default_dir = self.Main.Options.general['cwd'], extension='*.roi') outpath = self.append_extension(outpath, '.roi') fh = open(outpath,'w') # iterate over ROIs for i,ROI in enumerate(self.Main.ROIs.ROI_list): label = str(ROI.label) if type(ROI) == myCircleROI: # pos = sp.array([ROI.pos().x(),ROI.pos().y()]) # pos = self.get_ROI_position(ROI) x = str(sp.around(ROI.center[0],decimals=2)) y = str(sp.around(ROI.center[1],decimals=2)) d = str(sp.around(ROI.diameter,decimals=2)) fh.write('\t'.join(['circle',label,x,y,d,'\n'])) if type(ROI) == myPolyLineROI: fh.write('\t'.join(['polygon',label])) fh.write('\t') handle_pos = [tup[1] for tup in ROI.getSceneHandlePositions()] pos_mapped = [ROI.ViewBox.mapToView(pos) for pos in handle_pos] for pos in (pos_mapped): x = pos.x() y = pos.y() fh.write('\t'.join([str(sp.around(x,decimals=2)),str(sp.around(y,decimals=2))])) fh.write('\t') fh.write('\n') fh.close() print("saved ROIs in .roi format to", outpath) # outpath = os.path.splitext(outpath)[0] + '_mask.tif' # outpath = self.MainWindow.SaveFileDialog(title='saving ROIs',defaultdir=self.path,extension='.tif') # extraction mask self.Main.Processing.calc_extraction_mask() io.save_tstack(self.Main.Data.extraction_mask.astype('uint16'),os.path.splitext(outpath)[0] + '_mask.tif') print("saved ROIs in .tif format to", outpath) pass
def get_cb_ticks(values): min_tick = sp.nanmin(values) max_tick = sp.nanmax(values) med_tick = min_tick + (max_tick - min_tick) / 2.0 if max_tick > 1.0: min_tick = sp.ceil(min_tick) max_tick = sp.floor(max_tick) med_tick = sp.around(med_tick) else: min_tick = sp.ceil(min_tick * 100.0) / 100.0 max_tick = sp.floor(max_tick * 100.0) / 100.0 med_tick = sp.around(med_tick, 2) return [min_tick, med_tick, max_tick]
def binary_errors_dual_odor(CS_object, nonzero_bounds=[0.7, 1.3], zero_bound=1./25): Nn = CS_object.Nn mu_dSs = CS_object.mu_dSs mu_dSs_2 = CS_object.mu_dSs_2 sparse_idxs = CS_object.idxs[0] idxs_2 = CS_object.idxs_2 errors_nonzero = 0 errors_nonzero_2 = 0 errors_zero = 0 errors_zero_2 = 0 for iN in range(Nn): if iN in sparse_idxs: if iN in idxs_2: scaled_estimate = 1.*CS_object.dSs_est[iN]/CS_object.dSs[iN] if nonzero_bounds[0] < scaled_estimate < nonzero_bounds[1]: errors_nonzero_2 += 1 else: scaled_estimate = 1.*CS_object.dSs_est[iN]/CS_object.dSs[iN] if nonzero_bounds[0] < scaled_estimate < nonzero_bounds[1]: errors_nonzero += 1 else: if abs(CS_object.dSs_est[iN]) < abs(mu_dSs*zero_bound): errors_zero += 1 if CS_object.Kk_split != 0: if abs(CS_object.dSs_est[iN]) < abs(mu_dSs_2*zero_bound): errors_zero_2 += 1 errors = dict() # Save errors; special cases if split is 0 or full if set(idxs_2) == set(sparse_idxs): errors['errors_nonzero'] = 0 else: errors['errors_nonzero'] = \ sp.around(1.*errors_nonzero/(len(sparse_idxs) - len(idxs_2))*100, 2) if len(idxs_2) == 0: errors['errors_nonzero_2'] = 0 else: errors['errors_nonzero_2'] = \ sp.around(1.*errors_nonzero_2/len(idxs_2)*100., 2) errors['errors_zero'] = \ sp.around(1.*errors_zero/(Nn - len(sparse_idxs))*100., 2) errors['errors_zero_2'] = \ sp.around(1.*errors_zero_2/(Nn - len(sparse_idxs))*100., 2) return errors
def binary_errors_temporal_run(init_CS_object, dSs, dSs_est, mu_dSs, nonzero_bounds=[0.7, 1.3], zero_bound=1./10, dual=False): # Last index is the actual stimulus vector; first index is timepoint Nn = init_CS_object.Nn sparse_idxs = init_CS_object.idxs[0] if dual == True: idxs_2 = init_CS_object.idxs_2 idxs_1 = [] for idx in sparse_idxs: if idx not in idxs_2: idxs_1.append(idx) nT = dSs.shape[0] # Check dimension of the stimuli assert len(dSs.shape) == 2, "Need to pass rank-2 tensor for dSs; first "\ "index is time, second is Nn" assert len(dSs_est.shape) == 2, "Need to pass rank-2 tensor for dSs_est; "\ "first index is time, second is Nn" assert len(mu_dSs.shape) == 1, "Need to pass 1-rank array for mu_dSs" assert len(mu_dSs) == nT, "mu_dSs must be length nT=%s" % nT errors_nonzero = sp.zeros(nT) errors_zero = sp.zeros(nT) for iN in range(Nn): if iN in sparse_idxs: if (dual == True) and (iN in idxs_2): continue scaled_estimate = 1.*dSs_est[:, iN]/dSs[:, iN] errors_nonzero += (nonzero_bounds[0] < scaled_estimate)*\ (scaled_estimate < nonzero_bounds[1]) else: zero_est = (sp.absolute(dSs_est[:, iN]) < abs(mu_dSs*zero_bound)) errors_zero += zero_est errors = dict() if dual == True: errors['errors_nonzero'] = sp.around(1.*errors_nonzero/ \ len(idxs_1)*100., 2) else: errors['errors_nonzero'] = sp.around(1.*errors_nonzero/ \ len(sparse_idxs)*100., 2) errors['errors_zero'] = sp.around(1.*errors_zero/ \ (Nn - len(sparse_idxs))*100., 2) return errors
def on_shifted_dwp_curves(self, t): a = P4Rm() if a.AllDataDict['model'] == 0: temp_1 = arange(2, len(a.ParamDict['dwp'])+1) temp_2 = temp_1 * t / (len(a.ParamDict['dwp'])) P4Rm.ParamDict['x_dwp'] = t - temp_2 shifted_dwp = a.ParamDict['dwp'][:-1:] temp_3 = in1d(around(a.ParamDict['depth'], decimals=3), around(a.ParamDict['x_dwp'], decimals=3)) temp_4 = a.ParamDict['DW_i'][temp_3] P4Rm.ParamDict['scale_dw'] = shifted_dwp / temp_4 P4Rm.ParamDict['scale_dw'][a.ParamDict['scale_dw'] == 0] = 1. P4Rm.ParamDict['DW_shifted'] = shifted_dwp/a.ParamDict['scale_dw'] P4Rm.ParamDict['dw_out'] = a.ParamDict['dwp'][-1] elif a.AllDataDict['model'] == 1: temp_1 = arange(0, len(a.ParamDict['dwp'])+1-3) temp_2 = temp_1 * t / (len(a.ParamDict['dwp'])-3) P4Rm.ParamDict['x_dwp'] = t - temp_2 shifted_dwp = a.ParamDict['dwp'][1:-1:] temp_3 = in1d(around(a.ParamDict['depth'], decimals=3), around(a.ParamDict['x_dwp'], decimals=3)) temp_4 = a.ParamDict['DW_i'][temp_3] P4Rm.ParamDict['scale_dw'] = shifted_dwp / temp_4 P4Rm.ParamDict['scale_dw'][a.ParamDict['scale_dw'] == 0] = 1. P4Rm.ParamDict['DW_shifted'] = shifted_dwp/a.ParamDict['scale_dw'] temp_5 = array([a.ParamDict['dwp'][0], a.ParamDict['dwp'][-1]]) P4Rm.ParamDict['dw_out'] = temp_5 elif a.AllDataDict['model'] == 2: x_dw_temp = [] x_dw_temp.append(t*(1-a.ParamDict['dwp'][1])) x_dw_temp.append(t*(1-a.ParamDict['dwp'][1] + a.ParamDict['dwp'][2]/2)) x_dw_temp.append(t*(1-a.ParamDict['dwp'][1] - a.ParamDict['dwp'][3]/2)) x_dw_temp.append(t*0.05) P4Rm.ParamDict['x_dwp'] = x_dw_temp y_dw_temp = [] y_dw_temp.append(a.ParamDict['dwp'][0]) y_dw_temp.append(1. - (1-a.ParamDict['dwp'][0])/2) y_dw_temp.append(1. - (1-a.ParamDict['dwp'][0])/2 - (1-a.ParamDict['dwp'][6])/2) y_dw_temp.append(a.ParamDict['dwp'][6]) P4Rm.ParamDict['DW_shifted'] = y_dw_temp
def __init__(self, shape, num_points=None, **kwargs): # Generate Delaunay tessellation from super class, then trim super().__init__(shape=shape, num_points=num_points, **kwargs) points = self['pore.coords'] conns = self['throat.conns'] # Find centroid of each pair of nodes c = points[conns] m = (c[:, 0, :] + c[:, 1, :])/2 # Find radius of circle connecting each pair of nodes r = sp.sqrt(sp.sum((c[:, 0, :] - c[:, 1, :])**2, axis=1))/2 # Use KD-Tree to find distance to nearest neighbors tree = sptl.cKDTree(points) n = tree.query(x=m, k=1)[0] # Identify throats whose centroid is not near an unconnected node g = sp.around(n, decimals=5) == sp.around(r, decimals=5) trim(self, throats=~g)
def test_transient_advection_diffusion(self): sf = op.algorithms.StokesFlow(network=self.net, phase=self.phase) sf.setup(quantity='pore.pressure', conductance='throat.hydraulic_conductance') sf.set_value_BC(pores=self.net.pores('back'), values=1) sf.set_value_BC(pores=self.net.pores('front'), values=0) sf.run() self.phase[sf.settings['quantity']] = sf[sf.settings['quantity']] ad = op.algorithms.TransientAdvectionDiffusion(network=self.net, phase=self.phase) ad.setup(quantity='pore.concentration', diffusive_conductance='throat.diffusive_conductance', hydraulic_conductance='throat.hydraulic_conductance', pressure='pore.pressure', t_initial=0, t_final=100, t_step=1, t_output=50, t_tolerance=1e-20, s_scheme='powerlaw', t_scheme='implicit') ad.set_IC(0) ad.set_value_BC(pores=self.net.pores('back'), values=2) ad.set_value_BC(pores=self.net.pores('front'), values=0) ad.run() x = [ 0., 0., 0., 0.89653, 0.89653, 0.89653, 1.53924, 1.53924, 1.53924, 2., 2., 2. ] y = sp.around(ad[ad.settings['quantity']], decimals=5) assert sp.all(x == y)
def voronoi_edges(shape: List[int], radius: int, ncells: int, flat_faces: bool = True): r""" Create an image of the edges in a Voronoi tessellation Parameters ---------- shape : array_like The size of the image to generate in [Nx, Ny, Nz] where Ni is the number of voxels in each direction. radius : scalar The radius to which Voronoi edges should be dilated in the final image. ncells : scalar The number of Voronoi cells to include in the tesselation. flat_faces : Boolean Whether the Voronoi edges should lie on the boundary of the image (True), or if edges outside the image should be removed (False). Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space """ print(78 * '―') print('voronoi_edges: Generating', ncells, ' cells') shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) im = sp.zeros(shape, dtype=bool) base_pts = sp.rand(ncells, 3) * shape if flat_faces: # Reflect base points Nx, Ny, Nz = shape orig_pts = base_pts base_pts = sp.vstack( (base_pts, [-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0])) base_pts = sp.vstack( (base_pts, [1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0])) base_pts = sp.vstack( (base_pts, [1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz])) base_pts = sp.vstack((base_pts, [-1, 1, 1] * orig_pts)) base_pts = sp.vstack((base_pts, [1, -1, 1] * orig_pts)) base_pts = sp.vstack((base_pts, [1, 1, -1] * orig_pts)) vor = sptl.Voronoi(points=base_pts) vor.vertices = sp.around(vor.vertices) vor.vertices *= (sp.array(im.shape) - 1) / sp.array(im.shape) vor.edges = _get_Voronoi_edges(vor) for row in vor.edges: pts = vor.vertices[row].astype(int) if sp.all(pts >= 0) and sp.all(pts < im.shape): line_pts = line_segment(pts[0], pts[1]) im[line_pts] = True im = spim.distance_transform_edt(~im) > radius return im
def test_compress_geom(self): b1 = self.net.pores('bottom_boundary') b2 = self.net.pores('top_boundary') height1 = self.net.domain_length(b1, b2) self.geo_vox.compress_geometry(factor=[1, 1, 0.5]) height2 = self.net.domain_length(b1, b2) assert sp.around(height1 / height2, 5) == 2.0
def convert2int(move): """ Converts continuous move to indices """ debug_print('convert2int') move_index = scipy.around(move) return move_index.astype(scipy.integer)
def genPhenoCube(sim, Xr, vTotR=4e-3, nCausalR=10, pCommonR=0.8, vTotBg=0.4, pHidd=0.6, pCommon=0.8): # region nCommonR = int(SP.around(nCausalR * pCommonR)) # background vCommonBg = pCommon * vTotBg # noise vTotH = pHidd * (1 - vTotR - vTotBg) vTotN = (1 - pHidd) * (1 - vTotR - vTotBg) vCommonH = pCommon * vTotH all_settings = { 'vTotR': vTotR, 'nCommonR': nCommonR, 'nCausalR': nCausalR, 'vTotBg': vTotBg, 'vCommonBg': vCommonBg, 'pCausalBg': 1., 'use_XX': True, 'vTotH': vTotH, 'vCommonH': vCommonH, 'nHidden': 10, 'vTotN': vTotN, 'vCommonN': 0. } Y, info = sim.genPheno(Xr, **all_settings) return Y, info
def display_data(X, width=None, save=False): m, n = X.shape width = sp.int_(width or sp.around(sp.sqrt(n))) height = sp.int_(n / width) display_rows = sp.int_(sp.floor(sp.sqrt(m))) display_cols = sp.int_(sp.ceil(m / display_rows)) def rightward(acc, curr): return sp.hstack([acc, curr]) def downward(acc, curr): return sp.vstack([acc, curr]) def merge(func, init): return lambda arr: reduce(func, arr, init) init_rightward = sp.matrix([]).reshape([height, 0]) init_downward = sp.matrix([]).reshape([0, width * display_cols]) img_list = [X[i].reshape([height, width]).T for i in range(0, m)] img_list_split = [img_list[i:i+display_cols] for i in range(0, len(img_list), display_cols)] img = merge(downward, init_downward)(map(merge(rightward, init_rightward), img_list_split)) plt.figure(1) plt.imshow(img, cmap='gray') plt.tick_params(labelbottom='off', labelleft='off') if save: plt.savefig('1.png') else: plt.show() return None
def plot_median_errors(RefinementLevels): for i in RefinementLevels[0].cases: x =[]; y =[]; print "Analyzing median error on: ", i ; for r in RefinementLevels: x.append(r.LUT.D_dim*r.LUT.P_dim) r.get_REL_ERR_SU2(i) y.append(r.SU2[i].median_ERR*100) x = sp.array(x) y = sp.array(y) y = y[sp.argsort(x)] x = x[sp.argsort(x)] LHM = sp.ones((len(x),2)) RHS = sp.ones((len(x),1)) LHM[:,1] = sp.log10(x) RHS[:,0] = sp.log10(y) sols = sp.linalg.lstsq(LHM,RHS) b = -sols[0][1] plt.loglog(x,y, label='%s, %s'%(i,r'$O(\frac{1}{N})^{%s}$'%str(sp.around(b,2))), basex=10, basey=10, \ subsy=sp.linspace(10**(-5), 10**(-2),20),\ subsx=sp.linspace(10**(2), 10**(5),50)) #for r in RefinementLevels: # x.append(r.LUT.D_dim*r.LUT.P_dim) # r.get_REL_ERR_SciPy(i) # y.append(r.SciPy[i].median_ERR*100) #plt.plot(x,y, label='SciPy: %s'%i) plt.grid(which='both') plt.xlabel('Grid Nodes (N)') plt.ylabel('Median relative error [%]') return;
def test_hybrid_advection_diffusion_diffusion(self): sf = op.algorithms.StokesFlow(network=self.net, phase=self.phase) sf.setup(quantity='pore.pressure', conductance='throat.hydraulic_conductance') sf.set_value_BC(pores=self.net.pores('back'), values=1) sf.set_value_BC(pores=self.net.pores('front'), values=0) sf.run() self.phase[sf.settings['quantity']] = sf[sf.settings['quantity']] ad = op.algorithms.AdvectionDiffusion(network=self.net, phase=self.phase) ad.setup(quantity='pore.concentration', diffusive_conductance='throat.diffusive_conductance', hydraulic_conductance='throat.hydraulic_conductance', pressure='pore.pressure', s_scheme='hybrid') ad.set_value_BC(pores=self.net.pores('back'), values=2) ad.set_value_BC(pores=self.net.pores('front'), values=0) ad.run() x = [0., 0., 0., 0.89908, 0.89908, 0.89908, 1.54128, 1.54128, 1.54128, 2., 2., 2.] y = sp.around(ad[ad.settings['quantity']], decimals=5) assert sp.all(x == y)
def local_thickness(im): r""" For each voxel, this functions calculates the radius of the largest sphere that both engulfs the voxel and fits entirely within the foreground. This is not the same as a simple distance transform, which finds the largest sphere that could be *centered* on each voxel. Parameters ---------- im : array_like A binary image with the phase of interest set to True Returns ------- An image with the pore size values in each voxel Notes ----- The term *foreground* is used since this function can be applied to both pore space or the solid, whichever is set to True. """ from skimage.morphology import cube if im.ndim == 2: from skimage.morphology import square as cube dt = spim.distance_transform_edt(im) sizes = sp.unique(sp.around(dt, decimals=0)) im_new = sp.zeros_like(im, dtype=float) for r in tqdm(sizes): im_temp = dt >= r im_temp = spim.distance_transform_edt(~im_temp) <= r im_new[im_temp] = r # Trim outer edge of features to remove noise im_new = spim.binary_erosion(input=im, structure=cube(1))*im_new return im_new
def test_compress_geom(self): b1 = self.net.pores('bottom_boundary') b2 = self.net.pores('top_boundary') height1 = self.net.domain_length(b1, b2) self.geo_vox.compress_geometry(factor=[1, 1, 0.5]) height2 = self.net.domain_length(b1, b2) assert sp.around(height1/height2, 5) == 2.0
def rekernel(self): dcontrol = self.control[ord('d')] econtrol = self.control[ord('e')] rcontrol = self.control[ord('r')] radius = rcontrol.val dvalue = dcontrol.val evalue = econtrol.val rmax = rcontrol.limit[2] if self.rlast != radius: inner, outer = float(radius-1), float(radius) shape = (self.edge, self.edge) self.radii = list(product(arange(-rmax,rmax+1,1.0), repeat=2)) self.radii = array([sqrt(x*x+y*y) for x,y in self.radii]).reshape(shape) if True: self.negative = -exp(-dvalue*(self.radii-outer)**2) self.positive = +exp(-dvalue*(self.radii-inner)**2) else: self.radii = around(self.radii) self.negative = zeros((self.edge,self.edge),dtype=float) self.negative[self.radii == outer] = -1.0 self.positive = zeros(shape,dtype=float) self.positive[self.radii == inner] = +1.0 self.negative /= fabs(self.negative.sum()) self.positive /= fabs(self.positive.sum()) self.kernel = self.negative + self.positive self.rlast = radius if self.elast != evalue: self.gauss = exp(-evalue * self.radii**2) self.gauss /= self.gauss.sum() self.elast = evalue
def crtpop(ni,nv,prec): """Create a random population array of size ni by nv in the range 0:preci-1. Use prec = 2 to create binary string """ pop = scipy.around(scipy.rand(ni,nv)*(prec-1)) return pop
def wheelEvent(self, evt): # reimplementation d = sp.around(evt.angleDelta().y() / 120.0) # check this on different machines how much it is updated_frame = self.Main.Data_Display.Frame_Visualizer.frame - d if 0 <= updated_frame < self.Main.Data.nFrames: self.Main.Data_Display.Frame_Visualizer.frame -= d self.update_vline(self.Main.Data_Display.Frame_Visualizer.frame) self.Main.Data_Display.Frame_Visualizer.update_frame()
def do_global(graph): n = graph.number_of_nodes() print n P = generate_transition_matrix(graph, n) print 'Probability transition marix:' print sp.where(P < 0.001, sp.zeros((n, n)), sp.around(P, 3)) print 'Done getting P' w, vr = equilibrium_distribution(P) # Find the index of the eigenvalue 1 index = sp.where((w > 0.999999999999999) * (w < 1.000000000000001))[0] print index #if index.size == 0: # if roundoff == 0: # raise ValueError # P = sp.around(P, roundoff-1) # return equilibrium_distribution(P, roundoff-1) # Instead of randomly choosing the first eigenvalue, choose the first # eigen value which yields a non-singular fundamental matrix eigen_index = 0 while(1): try: chosen_index = index[eigen_index] print 'Trying with index', chosen_index except IndexError: print 'No eigenvalue of 1 exists, or no eigenvector of 1 exists', print 'which allows the fundamental matrix to be non-singular' raise eq_pi = vr[:, chosen_index] print 'Eigenvector: ' print eq_pi print 'Done getting pi' print 'Finiteness check:', try: eq_pi = sp.asarray_chkfinite(eq_pi) print 'OK' except ValueError: eigen_index += 1 continue W = equilibrium_transition_matrix(eq_pi, n) print 'Done getting W' try: Z = fundamental_matrix(P, W, n) print 'Done getting Z' break except linalg.LinAlgError: eigen_index += 1 Ei_Ti, Ei_Tj, Epi_Ti = hitting_times(eq_pi, Z, n) print 'Done getting hitting times' return Ei_Ti, Ei_Tj, Epi_Ti
def add_shape(self, shape, outline_color=[0, 0, 0], fill_color=[255, 255, 255]): """To add a matplotlib shape: ``line2D``, ``circle``, ``ellipse``, ``rectangle`` or ``polygon`` Parameters ---------- shape: matplotlib shape line2D, circle, ellipse, rectangle or polygon outline_color: list rgb color fill_color: list rgb color """ self.__shapes.append(shape) self.__outline_color_shapes.append(outline_color) self.__fill_color_shapes.append(fill_color) if (isinstance(shape, Circle) or isinstance(shape, Ellipse) or isinstance(shape, Rectangle) or isinstance(shape, Polygon)): xy = shape.get_verts() / self.pixel_size xy[:, 1] = self.height - xy[:, 1] self.draw.polygon( sp.around(xy.flatten()).tolist(), outline="rgb(" + str(outline_color[0]) + "," + str(outline_color[1]) + "," + str(outline_color[2]) + ")", fill="rgb(" + str(fill_color[0]) + "," + str(fill_color[1]) + "," + str(fill_color[2]) + ")") linewidth = shape.get_linewidth() self.draw.line(sp.around(xy.flatten()).tolist(), width=int(linewidth), fill="rgb(" + str(outline_color[0]) + "," + str(outline_color[1]) + "," + str(outline_color[2]) + ")") elif isinstance(shape, Line2D): linewidth = shape.get_linewidth() xy = shape.get_xydata() / self.pixel_size xy[:, 1] = self.height - xy[:, 1] self.draw.line(sp.around(xy.flatten()).tolist(), width=int(linewidth), fill="rgb(" + str(outline_color[0]) + "," + str(outline_color[1]) + "," + str(outline_color[2]) + ")")
def mutate(chrom,N,p): """Mutation with probability N and precision p """ index = [] for x in range(int(scipy.around(chrom.shape[0]*chrom.shape[1]*N))): index.append((int(scipy.around(scipy.rand(1,)[0]*(chrom.shape[0]-1))), int(scipy.around(scipy.rand(1,)[0]*(chrom.shape[1]-1))))) for x in index: if p == 1: if chrom[x] == 1: chrom[x] = 0 else: chrom[x] = 1 else: chrom[x] = int(scipy.around(scipy.rand(1,)[0]*(p-1))) return chrom
def test_hybrid_advection_diffusion_diffusion(self): self.ad.setup(s_scheme='hybrid') self.ad.run() x = [ 0., 0., 0., 0.89908, 0.89908, 0.89908, 1.54128, 1.54128, 1.54128, 2., 2., 2. ] y = sp.around(self.ad['pore.concentration'], decimals=5) assert_allclose(actual=y, desired=x)
def toRanks(A): """ converts the columns of A to ranks """ AA=sp.zeros_like(A) for i in range(A.shape[1]): AA[:,i] = st.rankdata(A[:,i]) AA=sp.array(sp.around(AA),dtype="int")-1 return AA
def toRanks(A): """ converts the columns of A to ranks """ AA = sp.zeros_like(A) for i in range(A.shape[1]): AA[:, i] = st.rankdata(A[:, i]) AA = sp.array(sp.around(AA), dtype="int") - 1 return AA
def test_powerlaw_advection_diffusion_diffusion(self): self.ad.setup(s_scheme='powerlaw') self.ad.run() x = [ 0., 0., 0., 0.89653, 0.89653, 0.89653, 1.53924, 1.53924, 1.53924, 2., 2., 2. ] y = sp.around(self.ad['pore.concentration'], decimals=5) assert_allclose(actual=y, desired=x)
def test_upwind_advection_diffusion_diffusion(self): self.ad.setup(s_scheme='upwind') self.ad.run() x = [ 0., 0., 0., 0.86486, 0.86486, 0.86486, 1.51351, 1.51351, 1.51351, 2., 2., 2. ] y = sp.around(self.ad['pore.concentration'], decimals=5) assert_allclose(actual=y, desired=x)
def is_near_constant(self, pid, min_num_diff=10): vals = sp.array(self.phen_dict[pid]["values"]) if sp.std(vals) > 0: vals = 50 * (vals - sp.mean(vals)) / sp.std(vals) vals = vals - vals.min() + 0.1 b_counts = sp.bincount(sp.array(sp.around(vals), dtype="int")) b = b_counts.max() > len(vals) - min_num_diff return b else: return True
def test_general_toroidal(self): phys = self.phys r_tor = 1e-6 phys.add_model(propname='throat.purcell_pressure', model=pm.capillary_pressure.purcell, r_toroid=r_tor) phys['throat.scale_a'] = r_tor phys['throat.scale_b'] = r_tor phys.add_model(propname='throat.general_pressure', model=pm.meniscus.general_toroidal, mode='max', num_points=1000) a = sp.around(phys['throat.purcell_pressure'], 10) b = sp.around(phys['throat.general_pressure'], 10) assert sp.allclose(a, b) h = phys.check_data_health() for check in h.values(): if len(check) > 0: assert 1 == 2
def is_near_constant(self, min_num_diff=10): vals = sp.array(self.values) if sp.std(vals) > 0: vals = 50 * (vals - sp.mean(vals)) / sp.std(vals) vals = vals - vals.min() + 0.1 b_counts = sp.bincount(sp.array(sp.around(vals), dtype='int')) b = b_counts.max() > len(vals) - min_num_diff return b else: return True
def NormLog(self,X,Y,parameterValues, independentValues): """ This kind of normalization is correct if the data are uniform in log scale, as prepared by our code toBinDistributions.py """ lgX = scipy.log10(X) D = scipy.around(lgX[1] - lgX[0],2) bins = 10**(lgX+D/2.) - 10**(lgX-D/2.) return Y/sum(Y*bins)
def _assert_files_equal_testing(e, a): da = sp.loadtxt(a, dtype='str', delimiter='\t') de = sp.loadtxt(e, dtype='str', delimiter='\t') ### check header assert sp.all(da[0, :] == de[0, :]) da = da[1:, ] de = de[1:, ] ### check text cols assert sp.all(da[:, [0, 1]] == de[:, [0, 1]]) da = da[:, 2:] de = de[:, 2:] ### check p-values (up to certain precision) da = sp.around(da.astype('float'), decimals=6) de = sp.around(de.astype('float'), decimals=6) assert sp.all(da == de)
def test_one_value_one_rate(self): alg = op.algorithms.GenericTransport(network=self.net, phase=self.phase) alg.settings['conductance'] = 'throat.diffusive_conductance' alg.settings['quantity'] = 'pore.mole_fraction' alg.set_rate_BC(pores=self.net.pores('bottom'), values=1) alg.set_value_BC(pores=self.net.pores('top'), values=0) alg.run() x = [0., 1., 2., 3., 4., 5., 6., 7., 8.] y = sp.unique(sp.around(alg['pore.mole_fraction'], decimals=3)) assert sp.all(x == y)
def test_two_value_conditions(self): alg = op.algorithms.GenericTransport(network=self.net, phase=self.phase) alg.settings['conductance'] = 'throat.diffusive_conductance' alg.settings['quantity'] = 'pore.mole_fraction' alg.set_value_BC(pores=self.net.pores('top'), values=1) alg.set_value_BC(pores=self.net.pores('bottom'), values=0) alg.run() x = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0] y = sp.unique(sp.around(alg['pore.mole_fraction'], decimals=3)) assert sp.all(x == y)
def readSMO(filename="sample_image.smo"): imageList = [] tempList = [] with open(filename,'rb') as inFile: stringArray = inFile.read().split(",") #import pdb; pdb.set_trace() for elem in stringArray: if elem[0] == '[': tempList = [int(sp.around(float(elem[1:])))] elif elem[-1] == ']': imageList.append(tempList) elif elem == '\n': pass else: tempList.append(int(sp.around(float(elem)))) imageList = sp.array(imageList) misc.imsave("final_converted.jpg",imageList)
def centre_of_mass(geometry, vertices='throat.offset_vertices', **kwargs): r""" Calculate the centre of mass of the throat from the voronoi vertices. """ Nt = geometry.num_throats() outer_verts = geometry['throat.vertices'] offset_verts = geometry[vertices] normal = geometry['throat.normal'] z_axis = [0, 0, 1] value = _sp.ndarray([Nt, 3]) for i in range(Nt): if len(offset_verts[i]) > 2: verts = offset_verts[i] elif len(outer_verts[i]) > 2: verts = outer_verts[i] else: verts = [] if len(verts) > 0: # For boundaries some facets will already be aligned with the axis - # if this is the case a rotation is unnecessary and could also cause # problems angle = tr.angle_between_vectors(normal[i], z_axis) if angle == 0.0 or angle == _sp.pi: "We are already aligned" rotate_input = False facet = verts else: rotate_input = True M = tr.rotation_matrix(tr.angle_between_vectors(normal[i], z_axis), tr.vector_product(normal[i], z_axis)) facet = _sp.dot(verts, M[:3, :3].T) # Now we have a rotated facet aligned with the z axis - make 2D facet_2D = _sp.column_stack((facet[:, 0], facet[:, 1])) z = _sp.unique(_sp.around(facet[:, 2], 10)) if len(z) == 1: # We need the vertices arranged in order so perform a convex hull hull = ConvexHull(facet_2D) ordered_facet_2D = facet_2D[hull.vertices] # Call the routine to calculate an area wighted centroid from the # 2D polygon COM_2D = vo.PolyWeightedCentroid2D(ordered_facet_2D) COM_3D = _sp.hstack((COM_2D, z)) # If we performed a rotation we need to rotate back if (rotate_input): MI = tr.inverse_matrix(M) # Unrotate the offset coordinates using the inverse of the # original rotation matrix value[i] = _sp.dot(COM_3D, MI[:3, :3].T) else: value[i] = COM_3D else: print('Rotation Failed: ' + str(_sp.unique(facet[:, 2]))) return value
def plotBias(vals, fn_plot, myidx, logScale = False, refname = 'TCGA'): iqr = ( (sp.percentile(vals[~myidx],75) - sp.percentile(vals[~myidx],25) ) * 1.5) iqr2 = ( (sp.percentile(vals[myidx],75) - sp.percentile(vals[myidx],25) ) * 1.5) sidx = sp.argsort(vals) vals = vals[sidx] myidx = myidx[sidx] fig = plt.figure(figsize=(12,10)) ax = fig.add_subplot(111) ax_c = ax.twinx() ax.vlines(sp.array(sp.arange(sp.sum(vals.shape[0])))[myidx],[0], vals[myidx], label = '%s Reference'%refname) ax.vlines(sp.array(sp.arange(sp.sum(vals.shape[0])))[~myidx],[0], vals[~myidx], color = 'r', label = 'Your Samples') ax.plot([0,vals.shape[0]],[3,3], '--', color = 'green') ax.plot([0,vals.shape[0]],[5,5] , '--',color = 'green') ax.plot([0,vals.shape[0]],[iqr + sp.percentile(vals[~myidx], 75),iqr + sp.percentile(vals[~myidx], 75)], '--',color = 'green') ax.plot([0,vals.shape[0]],[iqr2 + sp.percentile(vals[myidx], 75),iqr2 + sp.percentile(vals[myidx], 75)], '--',color = 'green') # ax.plot([0,vals.shape[0]],[6.25,6.25],'--', color = 'green') ax.plot([0,vals.shape[0]],[10,10] , '--',color = 'green') ax.set_ylabel('Median 3\'/5\' Bias') ax.set_xlim(0,vals.shape[0]) if logScale: ax.set_yscale('log') ax_c.set_yscale('log') ax_c.set_ylim(ax.get_ylim()) ### add right side ticks if logScale: tick_thresholds = sp.array([3,5,iqr+sp.percentile(vals[~myidx],75),iqr2 + sp.percentile(vals[myidx], 75), 10])#sp.array(sp.log([3,5,iqr+sp.percentile(vals,75), 10, 50])) else: tick_thresholds = sp.array([3,5,iqr+sp.percentile(vals[~myidx],75),iqr2 + sp.percentile(vals[myidx], 75), 10]) tick_idx = sp.argsort(tick_thresholds) tick_thresholds = tick_thresholds[tick_idx] tick_thresholds = sp.around(tick_thresholds, decimals = 2) ax_c.set_yticks(tick_thresholds) tick_thresholds = tick_thresholds.astype('|S4') tick_thresholds = tick_thresholds.astype('|S50') tick_thresholds[tick_idx == 2] = tick_thresholds[tick_idx == 2][0] + ' (Your Filter)' # tick_thresholds[tick_idx == 3] = tick_thresholds[tick_idx == 3][0] + ' (PRAD Filter)' tick_thresholds[tick_idx == 3] = tick_thresholds[tick_idx == 3][0] + ' (%s Filter)'%(refname) ax_c.set_yticklabels(tick_thresholds) ax.grid() ax.legend(loc=2) plt.tight_layout() plt.savefig(fn_plot, dpi = 300) plt.clf()
def _label_faces(self): r''' Label the pores sitting on the faces of the domain in accordance with the conventions used for cubic etc. ''' coords = sp.around(self['pore.coords'], decimals=10) min_labels = ['front', 'left', 'bottom'] max_labels = ['back', 'right', 'top'] min_coords = sp.amin(coords, axis=0) max_coords = sp.amax(coords, axis=0) for ax in range(3): self['pore.' + min_labels[ax]] = coords[:, ax] == min_coords[ax] self['pore.' + max_labels[ax]] = coords[:, ax] == max_coords[ax]
def mcUnitConvergeEst(func,dims,minPoints,maxPoints,numTestPoints,testRuns): #Couldn't get this to work, spits out an answer near 0 testPoints=sp.around(sp.linspace(minPoints,maxPoints,numTestPoints)) error = sp.zeros(sp.size(testPoints)) area = sp.zeros(testRuns) for i in range(0,numTestPoints): for k in range(0,testRuns): area[k]=mcUnit(func,testPoints[i],dims) error[i] = sp.mean(sp.absolute(area-sp.pi)) estimate = la.lstsq(sp.vstack((sp.log(testPoints),sp.ones(sp.size(testPoints)))).T,sp.log(error)) return estimate
def test_upwind_advection_diffusion(self): mod = op.models.physics.ad_dif_conductance.ad_dif self.phys.add_model(propname='throat.ad_dif_conductance_upwind', model=mod, s_scheme='upwind') self.phys.regenerate_models() self.ad.setup(conductance='throat.ad_dif_conductance_upwind') self.ad.run() x = [0., 0., 0., 0.86486, 0.86486, 0.86486, 1.51351, 1.51351, 1.51351, 2., 2., 2.] y = sp.around(self.ad['pore.concentration'], decimals=5) assert_allclose(actual=y, desired=x)
def test_upwind_NernstPlanck(self): mod = op.models.physics.ad_dif_mig_conductance.ad_dif_mig self.phys.add_model(propname='throat.ad_dif_mig_conductance_upwind', model=mod, s_scheme='upwind', ion='ionX') self.phys.regenerate_models() self.adm.setup(conductance='throat.ad_dif_mig_conductance_upwind') self.adm.run() x = [0., 0., 0., 0.86486, 0.86486, 0.86486, 1.51351, 1.51351, 1.51351, 2., 2., 2.] y = sp.around(self.adm['pore.concentration.ionX'], decimals=5) assert_allclose(actual=y, desired=x)
def test_one_value_one_source(self): rt = op.algorithms.ReactiveTransport(network=self.net, phase=self.phase) rt.setup(rxn_tolerance=1e-05, max_iter=5000, relaxation_source=1, relaxation_quantity=1) rt.settings.update({'conductance': 'throat.diffusive_conductance', 'quantity': 'pore.concentration'}) rt.set_source(pores=self.net.pores('bottom'), propname='pore.reaction') rt.set_value_BC(pores=self.net.pores('top'), values=1.0) rt.run() x = [0.0011, 0.1260, 0.2508, 0.3757, 0.5006, 0.6254, 0.7503, 0.8751, 1.0] y = sp.unique(sp.around(rt['pore.concentration'], decimals=4)) assert sp.all(x == y)