def infer_diag_post(self,X_ii,D_i): X_i = dc(X_ii) ns = len(D_i) X_i.resize([ns,self.D]) [m,V] = self.infer_diag(X_i,D_i) if sp.amin(V)<=-0.: class MJMError(Exception): pass print "negative/eq variance" print [m,V,X_i,D_i] print "_______________" #self.printc() raise(MJMError) if sp.amin(sp.var(m,axis=0))<-0.: class MJMError(Exception): pass print "negativevar of mean" print X_i.shape print [m,V,sp.var(m,axis=0),X_i,D_i] print "_______________" #self.printc() raise(MJMError) return [sp.mean(m,axis=0).reshape([1,ns]),(sp.mean(V,axis=0)+sp.var(m,axis=0)).reshape([1,ns])]
def loadfile(self, skiprows=2): self.d.efld = np.loadtxt(self.efile, skiprows=skiprows) self.d.hfld = np.loadtxt(self.hfile, skiprows=skiprows) erows, ecols = np.shape(self.d.efld) hrows, hcols = np.shape(self.d.hfld) if (erows != hrows) or (ecols != hcols): raise TypeError('Input file size of E and H is inconsistent.') exl = np.unique(self.d.efld[:, cx]) eyl = np.unique(self.d.efld[:, cy]) ezl = np.unique(self.d.efld[:, cz]) hxl = np.unique(self.d.hfld[:, cx]) hyl = np.unique(self.d.hfld[:, cy]) hzl = np.unique(self.d.hfld[:, cz]) if any(exl != hxl) or any(eyl != hyl) or any(eyl != hyl): raise TypeError('Input data grid of E and H is inonsisitent.') self.d.xmin = np.amin(exl) self.d.xmax = np.amax(exl) self.d.ymin = np.amin(eyl) self.d.ymax = np.amax(eyl) self.d.zmin = np.amin(ezl) self.d.zmax = np.amax(ezl) self.d.dsize = erows self.d.xsize = len(exl) - 1 self.d.ysize = len(eyl) - 1 self.d.zsize = len(ezl) - 1 self.d.dx = (self.d.xmax - self.d.xmin) / float(self.d.xsize) self.d.dy = (self.d.ymax - self.d.ymin) / float(self.d.ysize) self.d.dz = (self.d.zmax - self.d.zmin) / float(self.d.zsize) self.zyxsort()
def add_boundary_pores(self, labels=['top', 'bottom', 'front', 'back', 'left', 'right'], offset=None): r""" Add boundary pores to the specified faces of the network Pores are offset from the faces of the domain. Parameters ---------- labels : string or list of strings The labels indicating the pores defining each face where boundary pores are to be added (e.g. 'left' or ['left', 'right']) offset : scalar or array_like The spacing of the network (e.g. [1, 1, 1]). This must be given since it can be quite difficult to infer from the network, for instance if boundary pores have already added to other faces. """ offset = sp.array(offset) if offset.size == 1: offset = sp.ones(3)*offset for item in labels: Ps = self.pores(item) coords = sp.absolute(self['pore.coords'][Ps]) axis = sp.count_nonzero(sp.diff(coords, axis=0), axis=0) == 0 ax_off = sp.array(axis, dtype=int)*offset if sp.amin(coords) == sp.amin(coords[:, sp.where(axis)[0]]): ax_off = -1*ax_off topotools.add_boundary_pores(network=self, pores=Ps, offset=ax_off, apply_label=item + '_boundary')
def merged_event_breakpoint_stats(mev): bp1d, bp2d = [], [] bend1 = bend2 = None reads = [] quals = [] for ev in mev.events: bp1d.append(ev.bp1.pos) bp2d.append(ev.bp2.pos) reads.append(ev.reads) quals.append(ev.qual) bend1 = ev.bp1.breakend bend2 = ev.bp2.breakend bp1d = np.array(bp1d) bp2d = np.array(bp2d) if bend1 == "+": bp1limit = scipy.amin(bp1d) else: bp1limit = scipy.amax(bp1d) if bend2 == "+": bp2limit = scipy.amin(bp2d) else: bp2limit = scipy.amax(bp2d) reads_median = int(scipy.median(reads)) qual_median = int(scipy.median(quals)) return int(bp1limit), int(bp2limit), int(bp2limit - bp1limit), scipy.mean( bp1d), scipy.amax(bp1d) - scipy.amin(bp1d), scipy.std( bp1d), scipy.mean(bp2d), scipy.amax(bp2d) - scipy.amin( bp2d), scipy.std(bp2d), reads_median, qual_median
def plot(self): ex = self.ex hy = self.hy ngridx = self.ngridx nSteps = self.numSteps x = np.linspace(0, ngridx, ngridx) ymin1 = S.amin(ex) ymin2 = S.amin(hy) ymax1 = S.amax(ex) ymax2 = S.amax(hy) yminimum = min(ymin1, ymin2) ymaximum = max(ymax1, ymax2) title1 = 'EX and Hy field in FDTD 1D simulation.' fig = plt.figure() ax1 = fig.add_subplot(121) ax1.set_xlabel('FDTD Cells', fontsize=12) ax1.plot(x, ex, 'tab:blue', label='Ex (Normalized)') ax1.set_xlim([0, ngridx]) ax1.legend(loc='best', shadow=True, ncol=2) # ax1.legend(loc = 'upper center', bbox_to_anchor=(0.5, 0.1), shadow=True, ncol=2) ax2 = fig.add_subplot(122) ax2.set_xlabel('FDTD Cells', fontsize=12) ax2.plot(x, hy, 'tab:red', label='Hy') ax2.set_xlim([0, ngridx]) ax2.legend(loc='best', shadow=True, ncol=2) # ax2.legend(loc = 'upper center', bbox_to_anchor=(0.5, 0.1), shadow=True, ncol=2) plt.suptitle(title1, fontsize=20) plt.savefig('Figure.png') plt.show()
def plot_curr(self): """Plotet anhand der eingebenen Zeilennummer""" ######## plot a 3D Current ######## fig = plt.figure(figsize=plt.figaspect(0.5)) canvas = FigureCanvasTkAgg(fig, master=page3) canvas.get_tk_widget().pack(side='top', fill='both') canvas._tkcanvas.pack(side='top', fill='both', expand=15) toolbar = NavigationToolbar2TkAgg(canvas, page3) canvas.get_tk_widget().grid(row=1,column=1) toolbar.grid(row=50,column=1) ax = fig.add_subplot(1, 2, 1, projection='3d') X = sp.linspace(0,10,10) Y = sp.linspace(0,10,10) X, Y = sp.meshgrid(X, Y) X, Y = X.ravel(), Y.ravel() #R = sp.sqrt(X**2 + Y**2) #Z = sp.sin(R) width = depth = 1 bottom=X*0. #plt.ion() getrow=int(entryrowplot.get()) current_floats=sp.float64(self.data_str['current'][getrow].split(";")).reshape(10,10) maxcurr= np.max(current_floats) #print sp.mean(current_floats) top=current_floats.ravel() top_scaled=(top-sp.amin(top))/(sp.amax(top)-sp.amin(top)) mycolors = cm.jet(top_scaled) ax.bar3d(X, Y, bottom, width, depth, top,color=mycolors, alpha=float(entryTrans.get())) ax.set_title('Partial Currents @ '+self.data_str['sumcurr'][getrow]+' A'+'// @ '+self.data['voltage'][getrow]+' V') ax.set_zlim(0,maxcurr) ax.set_zlabel('Current in A', linespacing=10.4) ax.view_init(elev=int(entryCurrEval.get()), azim=int(entryCurrAngle.get())) #plt.show() fig = plt.figure() #plt.clf() plt.show() plt.gcf().canvas.draw() currplot = ts.strftime("%Y.%m.%d. %H:%M:%S :")+"Current dargestellt" self.displaystate.insert(tk.END, currplot+'\n')
def centroid_quadratic(curve, ratio): num, div = 0, 0 top = scp.amin(curve) + (scp.amax(curve) - scp.amin(curve)) * ratio for i in range(0, len(curve)): if (curve[i] <= top): num += (top - curve[i]) * (top - curve[i]) * (i + 1) div += (top - curve[i]) * (top - curve[i]) centroid = num / div return centroid
def __init__(self, evaluator, hof1, hof2, **args): if 'symmetric' in args: M = CiaoPlot.generateData(evaluator, hof1, hof2, symmetric = args['symmetric']) del args['symmetric'] else: M = CiaoPlot.generateData(evaluator, hof1, hof2) M *= 1/(amin(M) - amax(M)) M -= amin(M) self.relData = M ColorMap.__init__(self, M, minvalue = 0, maxvalue = 1, **args)
def partition(self, midpoint): """ returns two lists forward and reverse who have length len(self)=len(forward) +len(reverse) each index contains the distance of the closest coordinate of the other strand of the molecule as defined by midpoint """ forward = scipy.amin(self._d[0:midpoint + 1, midpoint + 1:len(self)], axis=1) reverse = scipy.amin(self._d[0:midpoint + 1, midpoint + 1:len(self)], axis=0) return forward, reverse
def compute(i, j): if i == j: return 1.0 elif trains[i].size <= 0 or trains[j].size <= 0: return 0.0 else: diff_matrix = sp.absolute(trains[i] - sp.atleast_2d(trains[j]).T) return 0.5 * ( sp.sum(kernel(sp.amin(diff_matrix, axis=0))) / trains[i].size + sp.sum(kernel(sp.amin(diff_matrix, axis=1))) / trains[j].size)
def __init__(self, evaluator, hof1, hof2, **args): if "symmetric" in args: M = CiaoPlot.generateData(evaluator, hof1, hof2, symmetric=args["symmetric"]) del args["symmetric"] else: M = CiaoPlot.generateData(evaluator, hof1, hof2) M *= 1 / (amin(M) - amax(M)) M -= amin(M) self.relData = M ColorMap.__init__(self, M, minvalue=0, maxvalue=1, **args)
def interpolate(self, canvas, status=None): # Clear the interpolated canvas canvas.interpolated = sp.zeros_like(canvas.fringes_image) - 1024.0 if status is not None: status.set("Performing the interpolation", 70) else: print("Performing the interpolation") # Iterate over all the triangles in the triangulation for triangle in self.triangles: # Create a shortcut to the triangle's vertices co = triangle.vert_coordinates # Calculate a few constants for the Barycentric Coordinates # More info: https://codeplea.com/triangular-interpolation div = (co[1, 0] - co[2, 0]) * (co[0, 1] - co[2, 1]) + ( co[2, 1] - co[1, 1]) * (co[0, 0] - co[2, 0]) a0 = (co[1, 0] - co[2, 0]) a1 = (co[2, 1] - co[1, 1]) a2 = (co[2, 0] - co[0, 0]) a3 = (co[0, 1] - co[2, 1]) # Calculate the bounds of a rectangle that fully encloses # the current triangle xmin = int(sp.amin(triangle.vert_coordinates[:, 1])) xmax = int(sp.amax(triangle.vert_coordinates[:, 1])) + 1 ymin = int(sp.amin(triangle.vert_coordinates[:, 0])) ymax = int(sp.amax(triangle.vert_coordinates[:, 0])) + 1 # Take out slices of the x and y arrays, # containing the points' coordinates x_slice = canvas.x[ymin:ymax, xmin:xmax] y_slice = canvas.y[ymin:ymax, xmin:xmax] # Use Barycentric Coordinates and the magic of numpy (scipy in this # case) to perform the calculations with the C backend, instead # of iterating on pixels with Python loops. # If you have not worked with numpy arrays befor dear reader, # the idea is that if x = [[0 1] # [2 3]], # then x*3+1 is a completely valid operation, returning # x = [[1 4] # [7 10]] # Basically, we can do maths on arrays as if they were variables. # Convenient, and really fast! w0 = (a0 * (x_slice - co[2, 1]) + a1 * (y_slice - co[2, 0])) / div w1 = (a2 * (x_slice - co[2, 1]) + a3 * (y_slice - co[2, 0])) / div w2 = sp.round_(1 - w0 - w1, 10) # Calculate the values for a rectangle enclosing our triangle slice = (self.values[triangle.vertices[0]] * w0 + self.values[triangle.vertices[1]] * w1 + self.values[triangle.vertices[2]] * w2) # Make a mask (so that we only touch the points # inside of the triangle). # In Barycentric Coordinates the points outside of the triangle # have at least one of the coefficients negative, so we use that mask = sp.logical_and(sp.logical_and(w0 >= 0, w1 >= 0), w2 >= 0) # Change the points in the actual canvas canvas.interpolated[ymin:ymax, xmin:xmax][mask] = slice[mask] canvas.interpolation_done = True
def test_largest_sphere(self): net = OpenPNM.Network.Cubic(shape=[5, 5, 5], spacing=[0.1, 0.2, 0.3]) geo = OpenPNM.Geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts) geo.models.add(propname='pore.diameter', model=mods.largest_sphere, iters=1) dmin = sp.amin(geo['pore.diameter']) assert dmin <= 0.1 geo.models['pore.diameter']['iters'] = 5 geo.regenerate() assert dmin < sp.amin(geo['pore.diameter'])
def plot_temp(self): ######## plot 3D Temperature ######## fig2 = plt.figure(figsize=plt.figaspect(0.5)) ax2 = fig2.add_subplot(1, 2, 1, projection='3d') X2 = sp.linspace(0, 5, 5) Y2 = sp.linspace(0, 5, 5) X2, Y2 = sp.meshgrid(X2, Y2) X2, Y2 = X2.ravel(), Y2.ravel() #R = sp.sqrt(X**2 + Y**2) #Z = sp.sin(R) width = depth = 1 bottom = X2 * 0 #plt.ion() p = int(entryrowplot.get()) temp_floats = sp.float64(self.data_str['temp'][p].split(";")).reshape( 5, 5) mintemp = np.min(temp_floats) mintempplot = mintemp mintemptxt = str(round(mintemp, 1)) maxtemp = np.max(temp_floats) maxtempplot = maxtemp maxi = maxtempplot - mintempplot + 0.1 top2 = temp_floats.ravel() - mintemp top_scaled2 = (top2 - sp.amin(top2)) / (sp.amax(top2) - sp.amin(top2)) mycolors = cm.jet(top_scaled2) ax2.bar3d(X2, Y2, bottom, width, depth, top2, color=mycolors, alpha=float(entryTrans.get())) ax2.set_title('Temperature' + '\n' + '@ ' + self.data_str['sumcurr'][p] + ' A' + ' // @ ' + self.data['voltage'][p] + ' V') ax2.set_zlim(0, maxi) ax2.set_zlabel('Temperature +' + mintemptxt + '°C') ax2.view_init(elev=25., azim=60) plt.show() tempplot = ts.strftime( "%Y.%m.%d. %H:%M:%S :") + "Temperature dargestellt" self.displaystate.insert(tk.END, tempplot + '\n')
def test_from_neighbor_throats_max(self): self.geo.pop('pore.seed', None) self.geo.models.pop('pore.seed', None) self.geo.models.pop('throat.seed', None) self.geo['throat.seed'] = sp.rand(self.net.Nt, ) self.geo.add_model(model=mods.from_neighbor_throats, propname='pore.seed', throat_prop='throat.seed', mode='max') assert sp.all(sp.in1d(self.geo['pore.seed'], self.geo['throat.seed'])) pmin = sp.amin(self.geo['pore.seed']) tmin = sp.amin(self.geo['throat.seed']) assert pmin >= tmin
def test_neighbor_max(self): catch = self.geo.pop('pore.seed', None) catch = self.geo.models.pop('pore.seed', None) catch = self.geo.models.pop('throat.seed', None) mod = gm.pore_misc.neighbor self.geo['throat.seed'] = sp.rand(self.net.Nt,) self.geo.models.add(model=mod, propname='pore.seed', throat_prop='throat.seed', mode='max') assert sp.all(sp.in1d(self.geo['pore.seed'], self.geo['throat.seed'])) pmin = sp.amin(self.geo['pore.seed']) tmin = sp.amin(self.geo['throat.seed']) assert pmin >= tmin
def representative_elementary_volume(im, npoints=1000): r""" Calculates the porosity of the image as a function subdomain size. This function extracts a specified number of subdomains of random size, then finds their porosity. Parameters ---------- im : ND-array The image of the porous material npoints : int The number of randomly located and sized boxes to sample. The default is 1000. Returns ------- A tuple containing the ND-arrays: The subdomain *volume* and its *porosity*. Each of these arrays is ``npoints`` long. They can be conveniently plotted by passing the tuple to matplotlib's ``plot`` function using the \* notation: ``plt.plot(*the_tuple, 'b.')``. The resulting plot is similar to the sketch given by Bachmat and Bear [1] Notes ----- This function is frustratingly slow. Profiling indicates that all the time is spent on scipy's ``sum`` function which is needed to sum the number of void voxels (1's) in each subdomain. Also, this function is primed for parallelization since the ``npoints`` are calculated independenlty. References ---------- [1] Bachmat and Bear. On the Concept and Size of a Representative Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media (1987) """ im_temp = sp.zeros_like(im) crds = sp.array(sp.rand(npoints, im.ndim) * im.shape, dtype=int) pads = sp.array(sp.rand(npoints) * sp.amin(im.shape) / 2 + 10, dtype=int) im_temp[tuple(crds.T)] = True labels, N = spim.label(input=im_temp) slices = spim.find_objects(input=labels) porosity = sp.zeros(shape=(N, ), dtype=float) volume = sp.zeros(shape=(N, ), dtype=int) for i in tqdm(sp.arange(0, N)): s = slices[i] p = pads[i] new_s = extend_slice(s, shape=im.shape, pad=p) temp = im[new_s] Vp = sp.sum(temp) Vt = sp.size(temp) porosity[i] = Vp / Vt volume[i] = Vt profile = namedtuple('profile', ('volume', 'porosity')) profile.volume = volume profile.porosity = porosity return profile
def test_spatially_correlated_zero_weights(self): f = OpenPNM.Geometry.models.pore_seed.spatially_correlated self.geo.models.add(propname='pore.seed', model=f, weights=[0, 0, 0]) assert sp.amin(self.geo['pore.seed'] > 0) assert sp.amax(self.geo['pore.seed'] < 1)
def late_pore_filling(physics, phase, network, Pc, Swp_star=0.2, eta=3, wetting_phase=False, pore_occupancy='pore.occupancy', throat_capillary_pressure='throat.capillary_pressure', **kwargs): r''' Applies a late pore filling model to calculate fractional pore filling as a function of applied capillary pressure. Parameters ---------- Pc : float The capillary pressure in the non-wetting phase (Pc > 0) Swp_star : float The residual wetting phase in an invaded pore immediately after nonwetting phase invasion eta : float Exponent to control the rate at which wetting phase is displaced wetting_phase : boolean Indicates whether supplied phase is the wetting or non-wetting phase ''' pores = phase.pores(physics.name) prop = phase[throat_capillary_pressure] neighborTs = network.find_neighbor_throats(pores,flatten=False) Pc_star = sp.array([sp.amin(prop[row]) for row in neighborTs]) Swp = Swp_star*(Pc_star/Pc)**eta if wetting_phase: values = Swp*phase[pore_occupancy]*(Pc_star<Pc) else: values = (1-Swp)*(1-phase[pore_occupancy])*(Pc_star<Pc) return values
def domain_length(self,face_1,face_2): r''' Calculate the distance between two faces of the network Parameters ---------- face_1 and face_2 : array_like Lists of pores belonging to opposite faces of the network Returns ------- The length of the domain in the specified direction Notes ----- - Does not yet check if input faces are perpendicular to each other ''' #Ensure given points are coplanar before proceeding if misc.iscoplanar(self['pore.coords'][face_1]) and misc.iscoplanar(self['pore.coords'][face_2]): #Find distance between given faces x = self['pore.coords'][face_1] y = self['pore.coords'][face_2] Ds = misc.dist(x,y) L = sp.median(sp.amin(Ds,axis=0)) else: self._logger.warning('The supplied pores are not coplanar. Length will be approximate.') f1 = self['pore.coords'][face_1] f2 = self['pore.coords'][face_2] distavg = [0,0,0] distavg[0] = sp.absolute(sp.average(f1[:,0]) - sp.average(f2[:,0])) distavg[1] = sp.absolute(sp.average(f1[:,1]) - sp.average(f2[:,1])) distavg[2] = sp.absolute(sp.average(f1[:,2]) - sp.average(f2[:,2])) L = max(distavg) return L
def find_acceleromoeter_scale_bias_priors(measures): maxZ = sp.amax(measures) minZ = sp.amin(measures) bias = minZ + (maxZ - minZ) / 2.0 scale = 1 / (maxZ - bias) return scale, bias
def neighbor(geometry, network, pore_prop='pore.seed', mode='min', **kwargs): r""" Adopt a value based on the values in the neighboring pores Parameters ---------- mode : string Indicates how to select the values from the neighboring pores. The options are: - min : (Default) Uses the minimum of the value found in the neighbors - max : Uses the maximum of the values found in the neighbors - mean : Uses an average of the neighbor values pore_prop : string The dictionary key containing the pore property to be used. """ throats = network.throats(geometry.name) P12 = network.find_connected_pores(throats) pvalues = network[pore_prop][P12] if mode == 'min': value = _sp.amin(pvalues, axis=1) if mode == 'max': value = _sp.amax(pvalues, axis=1) if mode == 'mean': value = _sp.mean(pvalues, axis=1) return value
def scale(self, x, M=None, m=None): # TODO: DO IN PLACE SCALING """!@brief Function that standardize the data Input: x: the data M: the Max vector m: the Min vector Output: x: the standardize data M: the Max vector m: the Min vector """ [n, d] = x.shape if not sp.issubdtype(x.dtype, float): x = x.astype('float') # Initialization of the output xs = sp.empty_like(x) # get the parameters of the scaling if M is None: M, m = sp.amax(x, axis=0), sp.amin(x, axis=0) den = M - m for i in range(d): if den[i] != 0: xs[:, i] = 2 * (x[:, i] - m[i]) / den[i] - 1 else: xs[:, i] = x[:, i] return xs
def run(self, N=100): r''' ''' im = self.image # Create a list of N random points to use as box centers pad = [0.1,0.1,0.45] # Ensure points are near middle Cx = sp.random.randint(pad[0]*sp.shape(im)[0],(1-pad[0])*sp.shape(im)[0],N) Cy = sp.random.randint(pad[1]*sp.shape(im)[1],(1-pad[1])*sp.shape(im)[1],N) Cz = sp.random.randint(pad[2]*sp.shape(im)[2],(1-pad[2])*sp.shape(im)[2],N) C = sp.vstack((Cx,Cy,Cz)).T # Find maximum radius allowable for each point Rmax = sp.array(C>sp.array(sp.shape(im))/2) Rlim = sp.zeros(sp.shape(Rmax)) Rlim[Rmax[:,0],0] = sp.shape(im)[0] Rlim[Rmax[:,1],1] = sp.shape(im)[1] Rlim[Rmax[:,2],2] = sp.shape(im)[2] R = sp.absolute(C-Rlim) R = R.astype(sp.int_) Rmin = sp.amin(R,axis=1) vol = [] size = [] porosity = [] for i in range(0,N): for r in sp.arange(Rmin[i],1,-10): imtemp = im[C[i,0]-150:C[i,0]+150,C[i,1]-150:C[i,1]+150:,C[i,2]-r:C[i,2]+r] vol.append(sp.size(imtemp)) size.append(2*r) porosity.append(sp.sum(imtemp==1)/(sp.size(imtemp))) vals = namedtuple('REV', ('porosity', 'size')) vals.porosity = porosity vals.size = size return vals
def overf_power_spectrum(amp, index, f0, dt, n, cut_off=0): """Calculates the theoretical f**`index` power spectrum. """ if cut_off < 0: raise ValueError("Low frequency cut off must not be negative.") # Sometimes the fitting routines do something weird that causes # an overflow from a ridiculous index. Limit the index. index = max(index, -20) # Get the frequencies represented in the FFT. df = 1.0/dt/n freq = sp.arange(n, dtype=float) freq[n//2+1:] -= freq[-1] + 1 freq = abs(freq)*df # 0th (mean) mode is meaningless has IR divergence. Deal with it later (in # the cut off. freq[0] = 1 # Make the power spectrum. power = (freq/f0)**index power *= amp # Restore frequency of mean mode. freq[0] = 0 # Find the power just above the cut off frequency. p_cut = power[sp.amin(sp.where(freq > cut_off)[0])] # Flatten off the power spectrum. power[freq <= cut_off] = p_cut return power
def compX(self,xtrue): n = self.X.shape[0] R = sp.empty([2,n]) for i in xrange(n): R[0,i] = spl.norm(self.X[i,:]-xtrue) R[1,i] = sp.amin(R[0,:i+1]) return R
def domain_length(self,face_1,face_2): r''' Calculate the distance between two faces of the network Parameters ---------- face_1 and face_2 : array_like Lists of pores belonging to opposite faces of the network Returns ------- The length of the domain in the specified direction Notes ----- - Does not yet check if input faces are perpendicular to each other ''' #Ensure given points are coplanar before proceeding if misc.iscoplanar(self['pore.coords'][face_1]) and misc.iscoplanar(self['pore.coords'][face_2]): #Find distance between given faces x = self['pore.coords'][face_1] y = self['pore.coords'][face_2] Ds = misc.dist(x,y) L = sp.median(sp.amin(Ds,axis=0)) else: logger.warning('The supplied pores are not coplanar. Length will be approximate.') f1 = self['pore.coords'][face_1] f2 = self['pore.coords'][face_2] distavg = [0,0,0] distavg[0] = sp.absolute(sp.average(f1[:,0]) - sp.average(f2[:,0])) distavg[1] = sp.absolute(sp.average(f1[:,1]) - sp.average(f2[:,1])) distavg[2] = sp.absolute(sp.average(f1[:,2]) - sp.average(f2[:,2])) L = max(distavg) return L
def plot_pairwise_velocities_r(case,color,all_radial_distances,all_radial_velocities): dr = 0.3 # Mpc/h rmin, rmax = sp.amin(all_radial_distances), sp.amax(all_radial_distances) rrange = rmax-rmin N = int(sp.ceil(rrange/dr)) rs = sp.linspace(rmin,rmax,N) v12_of_r = [[] for index in range(N)] for r,v12 in zip(all_radial_distances,all_pairwise_velocities): index = int(sp.floor((r-rmin)/dr)) v12_of_r[index].append(v12) sigma_12s = sp.zeros(N) v12_means = sp.zeros(N) for index in range(len(sigma_12s)): v12_of_r_index = sp.array(v12_of_r[index]) print "number of counts in the", index,"th bin:", len(v12_of_r_index) sigma_12 = sp.sqrt(sp.mean(v12_of_r_index**2)) v12_mean = -sp.mean(v12_of_r_index) sigma_12s[index] = sigma_12 v12_means[index] = v12_mean plt.plot(rs,sigma_12s,color=color,label='$\sigma_{12}$') plt.plot(rs,v12_means,color=color,label='$|v_{12}|$') plt.xlabel('r [Mpc/h]') plt.ylabel('[km/s]') plt.xscale('log') plt.axis([0.5,100,0,600])
def print_all_stats(ctx, series): ftime = get_ftime(series) start = 0 end = ctx.interval print('start-time, samples, min, avg, median, 90%, 95%, 99%, max') while (start < ftime): # for each time interval end = ftime if ftime < end else end sample_arrays = [ s.get_samples(start, end) for s in series ] samplevalue_arrays = [] for sample_array in sample_arrays: samplevalue_arrays.append( [ sample.value for sample in sample_array ] ) #print('samplevalue_arrays len: %d' % len(samplevalue_arrays)) #print('samplevalue_arrays elements len: ' + \ #str(map( lambda l: len(l), samplevalue_arrays))) # collapse list of lists of sample values into list of sample values samplevalues = reduce( array_collapser, samplevalue_arrays, [] ) #print('samplevalues: ' + str(sorted(samplevalues))) # compute all stats and print them myarray = scipy.fromiter(samplevalues, float) mymin = scipy.amin(myarray) myavg = scipy.average(myarray) mymedian = scipy.median(myarray) my90th = scipy.percentile(myarray, 90) my95th = scipy.percentile(myarray, 95) my99th = scipy.percentile(myarray, 99) mymax = scipy.amax(myarray) print( '%f, %d, %f, %f, %f, %f, %f, %f, %f' % ( start, len(samplevalues), mymin, myavg, mymedian, my90th, my95th, my99th, mymax)) # advance to next interval start += ctx.interval end += ctx.interval
def test_random(self): self.geo.models.add(propname='throat.seed', model=OpenPNM.Geometry.models.throat_seed.random, seed=0, num_range=[0.1, 2]) assert sp.amax(self.geo['throat.seed']) > 1.9 assert sp.amin(self.geo['throat.seed']) > 0.1
def test_random_with_range(self): mod = gm.throat_misc.random self.geo.models.add(model=mod, propname='throat.seed', num_range=[0.1, 0.9]) assert sp.amax(self.geo['throat.seed']) <= 0.9 assert sp.amin(self.geo['throat.seed']) >= 0.1
def GetStat(filename,Nsamp=1): hfile=[] if type(filename)==list: for i,f in enumerate(filename): hfile.append(h5py.File(f,'r')) elif type(filename)==str: hfile.append(h5py.File(filename,'r')) filename=[filename] stats=[] datapath=[] for ih,h in enumerate(hfile): for r in h: for d in h[r]: try: stats.append(int(h[r][d].attrs['statistics'][0])) except KeyError as err: stats.append(1) datapath.append((filename[ih],"/{0}/{1}".format(r,d))) bunches,args=vln.bunch(stats,Nsamp,indices=True) addstat=sc.array([sum(bunches[i]) for i in range(Nsamp)]) print("Average statistics of {0} (min: {1}, max: {2})"\ .format(sc.mean(addstat),sc.amin(addstat),sc.amax(addstat))) for f in hfile: f.close() return datapath,args
def _do_one_outer_iteration(self, **kwargs): r""" One iteration of an outer iteration loop for an algorithm (e.g. time or parametric study) """ # Checking for the necessary values in Picard algorithm nan_tol = sp.isnan(self['pore.source_tol']) nan_max = sp.isnan(self['pore.source_maxiter']) self._tol_for_all = sp.amin(self['pore.source_tol'][~nan_tol]) self._maxiter_for_all = sp.amax(self['pore.source_maxiter'][~nan_max]) if self._guess is None: self._guess = sp.zeros(self._coeff_dimension) t = 1 step = 0 # The main Picard loop while t > self._tol_for_all and step <= self._maxiter_for_all: X, t, A, b = self._do_inner_iteration_stage(guess=self._guess, **kwargs) logger.info('tol for Picard source_algorithm in step ' + str(step) + ' : ' + str(t)) self._guess = X step += 1 # Check for divergence self._steps = step if t >= self._tol_for_all and step > self._maxiter_for_all: raise Exception('Iterative algorithm for the source term reached ' 'to the maxiter: ' + str(self._maxiter_for_all) + ' without achieving tol: ' + str(self._tol_for_all)) logger.info('Picard algorithm for source term converged!') self.A = A self.b = b self._tol_reached = t return X
def __init__(self, mat, cmap=None, pixelspervalue=20, minvalue=None, maxvalue=None): """ Make a colormap image of a matrix or sequence of Matrix/Connection objects :key mat: the matrix to be used for the colormap. """ if isinstance(mat, (ParameterContainer, Connection)): mat = reshape(mat.params, (mat.outdim, mat.indim)) if not isinstance(mat, ndarray): raise ValueError("Don't know how to display a ColorMap for a matrix of type {}".format(type(mat))) if minvalue == None: minvalue = amin(mat) if maxvalue == None: maxvalue = amax(mat) if isinstance(cmap, basestring) and cmap.strip(): cmap = getattr(cm, cmap.lower().strip()) if not cmap: cmap = cm.hot figsize = (array(mat.shape) / 100. * pixelspervalue)[::-1] self.fig = figure(figsize=figsize) axes([0, 0, 1, 1]) # Make the plot occupy the whole canvas axis('off') self.fig.set_size_inches(figsize) imshow(mat, cmap=cmap, clim=(minvalue, maxvalue), interpolation='nearest')
def neighbor(geometry, pore_prop='pore.seed', mode='min', **kwargs): r""" Adopt a value based on the values in neighboring pores Parameters ---------- geometry : OpenPNM Geometry Object The object containing the ``pore_prop`` to be used. pore_prop : string The dictionary key to the array containing the pore property to be used in the calculation. Default is 'pore.seed'. mode : string Controls how the throat property is calculated. Options are 'min', 'max' and 'mean'. """ network = geometry._net throats = network.throats(geometry.name) P12 = network.find_connected_pores(throats) pvalues = network[pore_prop][P12] if mode == 'min': value = _sp.amin(pvalues, axis=1) if mode == 'max': value = _sp.amax(pvalues, axis=1) if mode == 'mean': value = _sp.mean(pvalues, axis=1) return value
def plotDigit(self, adigit, nvals=False): """Plots an image specified by adigit. If nvals is true, then it uses a colormap to distinguish negative and positive values, as opposed to plotting a monochrome image.""" # Set max/min of scale equal to +/- of largest value found in adigit scale = max(abs(scipy.amax(adigit)), abs(scipy.amin(adigit))) # Reshape adigit to square image dims = int(math.sqrt(len(adigit))) adigit = adigit.reshape(dims, dims) # Plot using interpolation if nvals: plt.imshow( adigit, cmap='RdBu_r', vmin=-scale, vmax=scale, interpolation='nearest') # show image in red-blue colorscale # Plot grey if nvals is false else: plt.imshow(adigit, cmap='Greys_r') # show image in greyscale # Add colorbar and show plot plt.colorbar() plt.show()
def test_generic(self): import OpenPNM.Geometry.models.pore_diameter as mods func = spst.gamma(a=2, loc=0.001, scale=0.0001) self.geo.models.add(propname="throat.diameter", model=mods.generic, func=func, seeds="throat.seed") assert sp.amin(self.geo["throat.diameter"]) > 0.001 del self.geo["throat.diameter"]
def averageLogDistribution(values, log_step=0.2, first_point=None, last_point=None): """ calculates the <values> vs. xVariable in log scale Parameters: --------------- values : dict A dictionary where the keys are the xValues and each element contains an array-like sequence of data Example: [1: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 2: array([1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2]), values : ndarray Two columns array of xValues and yValues, to be rearranged as above Returns: center point of the bin, average value within the bin """ # Check the list of Values if not checkIfVoid(values): print("Error") if isinstance(values, dict): xValues = np.asarray(values.keys()) yValues = np.asarray(values.values()) elif isinstance(values, np.ndarray): xValues = np.unique(values[:, 0]) yValues = [] for xVal in xValues: index = values[:, 0] == xVal yValues.append(values[index, 1]) yValues = scipy.array(yValues) else: print("Values shape not recognized") return if not first_point: first_point = scipy.amin(xValues) * 0.99 if not last_point: last_point = scipy.amax(xValues) * 1.01 xbins, bins = getLogBins(first_point, last_point, log_step) yAverage = [] for i, j in zip(bins[:-1], bins[1:]): q1, q2 = np.greater_equal(xValues, i), np.less(xValues, j) q = np.logical_and(q1, q2) if sum(q) == 0: averageValue = np.NaN else: allElements = [val for val in itertools.chain(*yValues[q])] averageValue = sum(allElements) / float(len(allElements)) # print averageValue, allElements yAverage.append(averageValue) yAverage = np.asanyarray(yAverage) # Check if there are NaN values iNan = np.isnan(yAverage) x = xbins[~iNan] y = yAverage[~iNan] return x, y
def scale(x, M=None, m=None, REVERSE=None): """ Function that standardize the data Input: x: the data M: the Max vector m: the Min vector Output: x: the standardize data M: the Max vector m: the Min vector """ if not sp.issubdtype(x.dtype, float): do_convert = 1 else: do_convert = 0 if REVERSE is None: if M is None: M = sp.amax(x, axis=0) m = sp.amin(x, axis=0) if do_convert: xs = 2 * (x.astype("float") - m) / (M - m) - 1 else: xs = 2 * (x - m) / (M - m) - 1 return xs, M, m else: if do_convert: xs = 2 * (x.astype("float") - m) / (M - m) - 1 else: xs = 2 * (x - m) / (M - m) - 1 return xs else: return (1 + x) / 2 * (M - m) + m
def test_late_pore_and_throat_filling(): phys.models.add(propname='pore.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_pore_filling, Pc=0, Swp_star=0.2, eta=1) mod = OpenPNM.Physics.models.multiphase.late_throat_filling phys.models.add(propname='throat.fractional_filling', model=mod, Pc=0, Swp_star=0.2, eta=1) phys.regenerate() drainage.setup(invading_phase=water, defending_phase=air, pore_filling='pore.fractional_filling', throat_filling='throat.fractional_filling') drainage.set_inlets(pores=pn.pores('boundary_top')) drainage.run() data = drainage.get_drainage_data() assert sp.amin(data['invading_phase_saturation']) == 0.0 assert sp.amax(data['invading_phase_saturation']) < 1.0 drainage.return_results(Pc=5000) assert 'pore.occupancy' in water.keys() assert 'throat.occupancy' in water.keys() assert 'pore.partial_occupancy' in water.keys() assert 'throat.partial_occupancy' in water.keys()
def _add_labels(self): pind = self.get_pore_indices('all') Tn = self.find_neighbor_throats(pnums=pind, flatten=False) Tmax = sp.amax(self.num_neighbors(pnums=pind, flatten=False)) for i in sp.arange(0, sp.shape(Tn)[0]): if sp.shape(Tn[i])[0] < Tmax: self.set_pore_info(label='surface', locations=i) else: self.set_pore_info(label='internal', locations=i) coords = self.get_pore_data(prop='coords') self.set_pore_info(label='left',locations=coords[:,0]<=(sp.amin(coords[:,0]))) self.set_pore_info(label='right',locations=coords[:,0]>=(sp.amax(coords[:,0]))) self.set_pore_info(label='front',locations=coords[:,1]<=(sp.amin(coords[:,1]))) self.set_pore_info(label='back',locations=coords[:,1]>=(sp.amax(coords[:,1]))) self.set_pore_info(label='bottom',locations=coords[:,2]<=(sp.amin(coords[:,2]))) self.set_pore_info(label='top',locations=coords[:,2]>=(sp.amax(coords[:,2])))
def test_residual_and_lpf(): phys.models.add(propname='pore.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_pore_filling, Pc=0, Swp_star=0.2, eta=1) phys.models.add(propname='throat.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_throat_filling, Pc=0, Swp_star=0.2, eta=1) phys.regenerate() drainage.setup(invading_phase=water, defending_phase=air, pore_filling='pore.fractional_filling', throat_filling='throat.fractional_filling') drainage.set_inlets(pores=pn.pores('boundary_top')) resPs = pn.pores('internal')[sp.random.random(len(pn.pores('internal')))<0.1] resTs = pn.throats('internal')[sp.random.random(len(pn.throats('internal')))<0.1] drainage.set_residual(pores=resPs, throats=resTs) drainage.run() drainage.return_results(Pc=5000) data = drainage.get_drainage_data() assert sp.all(water["pore.partial_occupancy"][resPs] == 1.0) assert sp.all(water["throat.partial_occupancy"][resTs] == 1.0) assert sp.amin(data['invading_phase_saturation']) > 0.0 assert sp.amax(data['invading_phase_saturation']) < 1.0 assert sp.all(water["pore.occupancy"]+air["pore.occupancy"] == 1.0) total_pp = water["pore.partial_occupancy"]+air["pore.partial_occupancy"] assert sp.all(total_pp == 1.0) assert sp.all(water["throat.occupancy"]+air["throat.occupancy"] == 1.0) total_pt = water["throat.partial_occupancy"]+air["throat.partial_occupancy"] assert sp.all(total_pt == 1.0)
def main(database): #Commits per committer limited to the 30 first with the highest accumulated activity query = "select count(*) from scmlog group by committer_id order by count(*) desc limit 40" #Connecting to the data base and retrieving data connector = connect(database) results = int(connector.execute(query)) if results > 0: results_aux = connector.fetchall() else: print("Error when retrieving data") return #Moving data to a list commits = [] for commit in results_aux[5:]: # for commits in results_aux: commits.append(int(commit[0])) #Calculating basic statistics print "max: " + str(sp.amax(commits)) print "min: " + str(sp.amin(commits)) print "mean: " + str(sp.mean(commits)) print "median: " + str(sp.median(commits)) print "std: " + str(sp.std(commits)) print ".25 quartile: " + str(sp.percentile(commits, 25)) print ".50 quartile: " + str(sp.percentile(commits, 50)) print ".75 quartile: " + str(sp.percentile(commits, 75))
def main(database): # Commits per committer limited to the 30 first with the highest accumulated activity query = "select count(*) from scmlog group by committer_id order by count(*) desc limit 40" # Connecting to the data base and retrieving data connector = connect(database) results = int(connector.execute(query)) if results > 0: results_aux = connector.fetchall() else: print ("Error when retrieving data") return # Moving data to a list commits = [] for commit in results_aux[5:]: # for commits in results_aux: commits.append(int(commit[0])) # Calculating basic statistics print "max: " + str(sp.amax(commits)) print "min: " + str(sp.amin(commits)) print "mean: " + str(sp.mean(commits)) print "median: " + str(sp.median(commits)) print "std: " + str(sp.std(commits)) print ".25 quartile: " + str(sp.percentile(commits, 25)) print ".50 quartile: " + str(sp.percentile(commits, 50)) print ".75 quartile: " + str(sp.percentile(commits, 75))
def run(self, npts=25, inv_points=None, access_limited=True, **kwargs): r""" Parameters ---------- npts : int (default = 25) The number of pressure points to apply. The list of pressures is logarithmically spaced between the lowest and highest throat entry pressures in the network. inv_points : array_like, optional A list of specific pressure point(s) to apply. """ if 'inlets' in kwargs.keys(): logger.info('Inlets recieved, passing to set_inlets') self.set_inlets(pores=kwargs['inlets']) if 'outlets' in kwargs.keys(): logger.info('Outlets recieved, passing to set_outlets') self.set_outlets(pores=kwargs['outlets']) self._AL = access_limited if inv_points is None: logger.info('Generating list of invasion pressures') min_p = sp.amin(self['throat.entry_pressure']) * 0.98 # nudge down max_p = sp.amax(self['throat.entry_pressure']) * 1.02 # bump up inv_points = sp.logspace(sp.log10(min_p), sp.log10(max_p), npts) self._npts = sp.size(inv_points) # Execute calculation self._do_outer_iteration_stage(inv_points)
def test_calculates_frequencies(self) : self.Data.calc_freq() self.assertTrue(hasattr(self.Data, 'freq')) self.assertEqual(len(self.Data.freq), self.nfreq) self.assertAlmostEqual(self.Data.field['BANDWID'], sp.amax(self.Data.freq) - sp.amin(self.Data.freq), -5)
def neighbor(geometry, throat_prop='throat.seed', mode='min', **kwargs): r""" Adopt a value from the values found in neighboring throats Parameters ---------- geometry : OpenPNM Geometry Object The object containing the ``throat_prop`` to be used. throat_prop : string The dictionary key of the array containing the throat property to be used in the calculation. The default is 'throat.seed'. mode : string Controls how the pore property is calculated. Options are 'min', 'max' and 'mean'. """ network = geometry._net Ps = geometry.pores() data = geometry[throat_prop] neighborTs = network.find_neighbor_throats(pores=Ps, flatten=False, mode='intersection') values = _sp.ones((_sp.shape(Ps)[0], )) * _sp.nan if mode == 'min': for pore in Ps: values[pore] = _sp.amin(data[neighborTs[pore]]) if mode == 'max': for pore in Ps: values[pore] = _sp.amax(data[neighborTs[pore]]) if mode == 'mean': for pore in Ps: values[pore] = _sp.mean(data[neighborTs[pore]]) return values
def __init__(self, mat, cmap=None, pixelspervalue=20, minvalue=None, maxvalue=None): """ Make a colormap image of a matrix :key mat: the matrix to be used for the colormap. """ if minvalue == None: minvalue = amin(mat) if maxvalue == None: maxvalue = amax(mat) if not cmap: cmap = cm.hot figsize = (array(mat.shape) / 100. * pixelspervalue)[::-1] self.fig = figure(figsize=figsize) axes([0, 0, 1, 1]) # Make the plot occupy the whole canvas axis('off') self.fig.set_size_inches(figsize) imshow(mat, cmap=cmap, clim=(minvalue, maxvalue), interpolation='nearest')