def initTau(self, pa, pb, qa, qb, qE): # Method to initialise the precision of the noise # Inputs: # pa (float): 'a' parameter of the prior distribution # pb (float): 'b' parameter of the prior distribution # qb (float): initialisation of the 'b' parameter of the variational distribution # qE (float): initial expectation of the variational distribution tau_list = [None] * self.M for m in range(self.M): if self.lik[m] == "poisson": tmp = 0.25 + 0.17 * s.amax(self.data[m], axis=0) tau_list[m] = Constant_Node(dim=(self.D[m], ), value=tmp) elif self.lik[m] == "bernoulli": # seeger # tau_list[m] = Constant_Node(dim=(self.D[m],), value=0.25) # Jaakkola tau_list[m] = Tau_Jaakkola(dim=((self.N, self.D[m])), value=1.) elif self.lik[m] == "binomial": tmp = 0.25 * s.amax(self.data["tot"][m], axis=0) tau_list[m] = Constant_Node(dim=(self.D[m], ), value=tmp) elif self.lik[m] == "gaussian": tau_list[m] = Tau_Node(dim=(self.D[m], ), pa=pa[m], pb=pb[m], qa=qa[m], qb=qb[m], qE=qE[m]) self.Tau = Multiview_Mixed_Node(self.M, *tau_list) self.nodes["Tau"] = self.Tau
def get_xs(self): """ Retrieve xs information to populate self.xscurves. Also create and populate self.max_q_query with maximum q value for querying interpolated rating curves.""" prof = [] disch = [] stage = [] self.max_q_query = 0 self.max_disch = 0 self.max_h_query = 0 self.max_stage = 0 # Retrieve xs information and populate self.xscurves stations = self.xs['RiverStation'].unique() # a = self.xs[self.xs['RiverStation'].isin(stations)] for i, rs in enumerate( stations ): # stage-height values for RiverStation rs h = self.xs[self.xs['RiverStation'] == rs]['Stage_Height_ft_'].values # ************ # If multiple zeros, ignore this RiverStation and proceed to next # ************ # Test if repeated zeroes (ie. multiple xs datasets for this RiverStation) repeats = [item for item, count in Counter(h).iteritems() if count > 1] if repeats: continue # Process xs data current = self.xs[ self.xs['RiverStation'] == rs ] prof.append(current['ProfileM'].unique()[0]) # xs location along reach disch.append(map(float,current['Discharge_cfs_'].values)) # disch vals stage.append(map(float,current['Stage_Height_ft_'].values)) # stage vals # Find max q value for querying interpolations # Find max disch value for plotting x_axis max_disch = int( scipy.amax(disch[-1]) ) if self.max_q_query == 0: self.max_q_query = max_disch self.max_disch = max_disch elif max_disch < self.max_q_query: self.max_q_query = max_disch elif max_disch > self.max_disch: self.max_disch = max_disch # Find max h value for querying interpolations # Find max stage value for plotting y_axis max_stage = int( scipy.amax(stage[-1]) ) if self.max_h_query == 0: self.max_h_query = max_stage self.max_stage = max_stage elif max_stage < self.max_h_query: self.max_h_query = max_stage elif max_stage > self.max_stage: self.max_stage = max_stage if len(disch) != 0: xs_profs = scipy.array(prof).astype(float) self.xs_profs = scipy.unique(xs_profs) # remove repeats self.xs_disch = scipy.array(disch) self.xs_stage = scipy.array(stage) # print '\n------------------------\n' # for s,p in zip(stations,self.xs_profs): print s,p # print '\n------------------------\n' return 1
def plot(self): ex = self.ex hy = self.hy ngridx = self.ngridx nSteps = self.numSteps x = np.linspace(0, ngridx, ngridx) ymin1 = S.amin(ex) ymin2 = S.amin(hy) ymax1 = S.amax(ex) ymax2 = S.amax(hy) yminimum = min(ymin1, ymin2) ymaximum = max(ymax1, ymax2) title1 = 'EX and Hy field in FDTD 1D simulation.' fig = plt.figure() ax1 = fig.add_subplot(121) ax1.set_xlabel('FDTD Cells', fontsize=12) ax1.plot(x, ex, 'tab:blue', label='Ex (Normalized)') ax1.set_xlim([0, ngridx]) ax1.legend(loc='best', shadow=True, ncol=2) # ax1.legend(loc = 'upper center', bbox_to_anchor=(0.5, 0.1), shadow=True, ncol=2) ax2 = fig.add_subplot(122) ax2.set_xlabel('FDTD Cells', fontsize=12) ax2.plot(x, hy, 'tab:red', label='Hy') ax2.set_xlim([0, ngridx]) ax2.legend(loc='best', shadow=True, ncol=2) # ax2.legend(loc = 'upper center', bbox_to_anchor=(0.5, 0.1), shadow=True, ncol=2) plt.suptitle(title1, fontsize=20) plt.savefig('Figure.png') plt.show()
def integrate(self,t,u,v): # integrate only one step in time. # assume same delta in x and y maxu = amax(u); maxv = amax(v); maxVel = amax((maxu,maxv)); dt = self.cflConstant*self.dx/maxVel; print 'Time step selected: ', dt; k1 = self.dudt(t,u,v); l1 = self.dvdt(t,u,v); k2 = self.dudt(t+dt/2, u+(dt*k1/2), v+(dt*l1/2)); l2 = self.dvdt(t+dt/2, u+(dt*k1/2), v+(dt*l1/2)); k3 = self.dudt(t+dt/2, u+(dt*k2/2), v+(dt*l2/2)); l3 = self.dvdt(t+dt/2, u+(dt*k2/2), v+(dt*l2/2)); k4 = self.dudt(t+dt, u+(dt*k3), v+(dt*l3)); l4 = self.dvdt(t+dt, u+(dt*k3), v+(dt*l3)); k = (k1 + 2*k2 + 2*k3 + k4)/6; l = (l1 + 2*l2 + 2*l3 + l4)/6; un = u + dt*k; vn = v + dt*k; tn = t + dt; return (tn,un,vn);
def line_segment(X0, X1): r""" Calculate the voxel coordinates of a straight line between the two given end points Parameters ---------- X0 and X1 : array_like The [x, y] or [x, y, z] coordinates of the start and end points of the line. Returns ------- coords : list of lists A list of lists containing the X, Y, and Z coordinates of all voxels that should be drawn between the start and end points to create a solid line. """ X0 = sp.around(X0).astype(int) X1 = sp.around(X1).astype(int) if len(X0) == 3: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]], [X1[2]-X0[2]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) z = sp.rint(sp.linspace(X0[2], X1[2], L)).astype(int) return [x, y, z] else: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) return [x, y]
def plot_drainage_curve(self, data=None, x_values='capillary_pressure', y_values='invading_phase_saturation'): r""" Plot the drainage curve as the non-wetting phase saturation vs the applied capillary pressure. Parameters ---------- data : dictionary of arrays This dictionary should be obtained from the ``get_drainage_data`` method. x_values and y_values : string The dictionary keys of the arrays containing the x-values and y-values """ # Begin creating nicely formatted plot if data is None: data = self.get_drainage_data() xdata = data[x_values] ydata = data[y_values] fig = plt.figure() plt.plot(xdata, ydata, 'ko-') plt.ylabel(y_values) plt.xlabel(x_values) plt.grid(True) if sp.amax(xdata) <= 1: plt.xlim(xmin=0, xmax=1) if sp.amax(ydata) <= 1: plt.ylim(ymin=0, ymax=1) return fig
def merged_event_breakpoint_stats(mev): bp1d, bp2d = [], [] bend1 = bend2 = None reads = [] quals = [] for ev in mev.events: bp1d.append(ev.bp1.pos) bp2d.append(ev.bp2.pos) reads.append(ev.reads) quals.append(ev.qual) bend1 = ev.bp1.breakend bend2 = ev.bp2.breakend bp1d = np.array(bp1d) bp2d = np.array(bp2d) if bend1 == "+": bp1limit = scipy.amin(bp1d) else: bp1limit = scipy.amax(bp1d) if bend2 == "+": bp2limit = scipy.amin(bp2d) else: bp2limit = scipy.amax(bp2d) reads_median = int(scipy.median(reads)) qual_median = int(scipy.median(quals)) return int(bp1limit), int(bp2limit), int(bp2limit - bp1limit), scipy.mean( bp1d), scipy.amax(bp1d) - scipy.amin(bp1d), scipy.std( bp1d), scipy.mean(bp2d), scipy.amax(bp2d) - scipy.amin( bp2d), scipy.std(bp2d), reads_median, qual_median
def get_xs(self): """ Retrieve xs information to populate self.xscurves. Also create and populate self.max_q_query with maximum q value for querying interpolated rating curves.""" xscurves = [] self.max_q_query = 0 self.max_disch = 0 self.max_h_query = 0 self.max_stage = 0 # Retrieve xs information and populate self.xscurves stations = self.xs['RiverStation'].unique() # a = self.xs[self.xs['RiverStation'].isin(stations)] for i, rs in enumerate(stations): # stage-height values for RiverStation rs a = self.xs[self.xs['RiverStation'] == rs]['Stage_Height_ft_'].values # Test if repeated zeroes (meaning multiple xs datasets for this RiverStation) # ************ # If multiple zeros, ignore this RiverStation and proceed to next # ************ s = [item for item, count in Counter(a).iteritems() if count > 1] if s: continue # Process xs data current = self.xs[self.xs['RiverStation'] == rs] prof = current['ProfileM'].unique()[ 0] # location of xs relative to river reach disch = map(float, current['Discharge_cfs_'].values) # xs disch vals stage = map(float, current['Stage_Height_ft_'].values) # xs stage vals # Find max q value for querying interpolations # Find max disch value for plotting x_axis max_disch = int(scipy.amax(disch)) if self.max_q_query == 0: self.max_q_query = max_disch self.max_disch = max_disch elif max_disch < self.max_q_query: self.max_q_query = max_disch elif max_disch > self.max_disch: self.max_disch = max_disch # Find max q value for querying interpolations # Find max disch value for plotting x_axis max_stage = int(scipy.amax(stage)) if self.max_h_query == 0: self.max_h_query = max_stage self.max_stage = max_stage elif max_stage < self.max_h_query: self.max_h_query = max_stage elif max_stage > self.max_stage: self.max_stage = max_stage pack = (prof, zip(disch, stage) ) # pack xs profile name w/ disch & stage vals xscurves.append(pack) if len(xscurves) != 0: self.xscurves = xscurves return 1
def amalgamate_throat_data(self,fluids='all'): r""" Returns a dictionary containing ALL throat data from all fluids, physics and geometry objects """ self._throat_data_amalgamate = {} if type(fluids)!= sp.ndarray and fluids=='all': fluids = self._fluids elif type(fluids)!= sp.ndarray: fluids = sp.array(fluids,ndmin=1) #Add fluid data for item in fluids: if type(item)==sp.str_: item = self.find_object_by_name(item) for key in item._throat_data.keys(): if sp.amax(item._throat_data[key]) < sp.inf: dict_name = item.name+'_throat_'+key self._throat_data_amalgamate.update({dict_name : item._throat_data[key]}) for key in item._throat_info.keys(): if sp.amax(item._throat_info[key]) < sp.inf: dict_name = item.name+'_throat_label_'+key self._throat_data_amalgamate.update({dict_name : item._throat_info[key]}) #Add geometry data for key in self._throat_data.keys(): if sp.amax(self._throat_data[key]) < sp.inf: dict_name = 'throat'+'_'+key self._throat_data_amalgamate.update({dict_name : self._throat_data[key]}) for key in self._throat_info.keys(): if sp.amax(self._throat_info[key]) < sp.inf: dict_name = 'throat'+'_label_'+key self._throat_data_amalgamate.update({dict_name : self._throat_info[key]}) return self._throat_data_amalgamate
def test_distance_center(): shape = sp.array([7, 5, 9]) spacing = sp.array([2, 1, 0.5]) pn = OpenPNM.Network.Cubic(shape=shape, spacing=spacing) sx, sy, sz = spacing center_coord = sp.around(topology.find_centroid(pn['pore.coords']), 7) cx, cy, cz = center_coord coords = pn['pore.coords'] x, y, z = coords.T coords = sp.concatenate((coords, center_coord.reshape((1, 3)))) pn['pore.center'] = False mask1 = (x <= (cx + sx / 2)) * (y <= (cy + sy / 2)) * (z <= (cz + sz / 2)) mask2 = (x >= (cx - sx / 2)) * (y >= (cy - sy / 2)) * (z >= (cz - sz / 2)) center_pores_mask = pn.Ps[mask1 * mask2] pn['pore.center'][center_pores_mask] = True center = pn.Ps[pn['pore.center']] L1 = sp.amax( topology.find_pores_distance(network=pn, pores1=center, pores2=pn.Ps)) L2 = sp.amax( topology.find_pores_distance(network=pn, pores1=pn.Ps, pores2=pn.Ps)) l1 = ((shape[0] - 1) * sx)**2 l2 = ((shape[1] - 1) * sy)**2 l3 = ((shape[2] - 1) * sz)**2 L3 = sp.sqrt(l1 + l2 + l3) assert sp.around(L1 * 2, 7) == sp.around(L2, 7) assert sp.around(L2, 7) == sp.around(L3, 7)
def test_distance_center(): shape = sp.array([7, 5, 9]) spacing = sp.array([2, 1, 0.5]) pn = OpenPNM.Network.Cubic(shape=shape, spacing=spacing) sx, sy, sz = spacing center_coord = sp.around(topology.find_centroid(pn['pore.coords']), 7) cx, cy, cz = center_coord coords = pn['pore.coords'] x, y, z = coords.T coords = sp.concatenate((coords, center_coord.reshape((1, 3)))) pn['pore.center'] = False mask1 = (x <= (cx + sx/2)) * (y <= (cy + sy/2)) * (z <= (cz + sz/2)) mask2 = (x >= (cx - sx/2)) * (y >= (cy - sy/2)) * (z >= (cz - sz/2)) center_pores_mask = pn.Ps[mask1 * mask2] pn['pore.center'][center_pores_mask] = True center = pn.Ps[pn['pore.center']] L1 = sp.amax(topology.find_pores_distance(network=pn, pores1=center, pores2=pn.Ps)) L2 = sp.amax(topology.find_pores_distance(network=pn, pores1=pn.Ps, pores2=pn.Ps)) l1 = ((shape[0] - 1) * sx) ** 2 l2 = ((shape[1] - 1) * sy) ** 2 l3 = ((shape[2] - 1) * sz) ** 2 L3 = sp.sqrt(l1 + l2 + l3) assert sp.around(L1 * 2, 7) == sp.around(L2, 7) assert sp.around(L2, 7) == sp.around(L3, 7)
def loadfile(self, skiprows=2): self.d.efld = np.loadtxt(self.efile, skiprows=skiprows) self.d.hfld = np.loadtxt(self.hfile, skiprows=skiprows) erows, ecols = np.shape(self.d.efld) hrows, hcols = np.shape(self.d.hfld) if (erows != hrows) or (ecols != hcols): raise TypeError('Input file size of E and H is inconsistent.') exl = np.unique(self.d.efld[:, cx]) eyl = np.unique(self.d.efld[:, cy]) ezl = np.unique(self.d.efld[:, cz]) hxl = np.unique(self.d.hfld[:, cx]) hyl = np.unique(self.d.hfld[:, cy]) hzl = np.unique(self.d.hfld[:, cz]) if any(exl != hxl) or any(eyl != hyl) or any(eyl != hyl): raise TypeError('Input data grid of E and H is inonsisitent.') self.d.xmin = np.amin(exl) self.d.xmax = np.amax(exl) self.d.ymin = np.amin(eyl) self.d.ymax = np.amax(eyl) self.d.zmin = np.amin(ezl) self.d.zmax = np.amax(ezl) self.d.dsize = erows self.d.xsize = len(exl) - 1 self.d.ysize = len(eyl) - 1 self.d.zsize = len(ezl) - 1 self.d.dx = (self.d.xmax - self.d.xmin) / float(self.d.xsize) self.d.dy = (self.d.ymax - self.d.ymin) / float(self.d.ysize) self.d.dz = (self.d.zmax - self.d.zmin) / float(self.d.zsize) self.zyxsort()
def __or__(self,other): priority_normalize = 'first' import copy new = self.deepcopy() new.params.update(copy.deepcopy(other.params)) for key in ['los','ndim']: assert getattr(self,key) == getattr(other,key) if scipy.amax(self.s) <= scipy.amax(other.s): first = self second = other else: first = other second = self #assert (first.index(first.zero) == 0) and (second.index(second.zero) == 0) if self.ndim == 1: firsts = [first.s] seconds = [second.s] else: firsts = first.s seconds = second.s firstshape = scipy.asarray(first.window.shape[1:]) firstipoles = [ipole for ipole,pole in enumerate(first.poles) if pole in second.poles] secondipoles = [second.poles.index(first.poles[ipole]) for ipole in firstipoles] new.poles = [first.poles[ipole] for ipole in firstipoles] secondmask = [s2 >= s1[-1] for s1,s2 in zip(firsts,seconds)] new.s = [scipy.concatenate([s1,s2[mask2]],axis=-1) for s1,s2,mask2 in zip(firsts,seconds,secondmask)] if self.ndim == 1: new.s = new.s[0] overlaps = [s2[~mask2] for s2,mask2 in zip(seconds,secondmask)] ratio = first(overlaps,first.zero,kind_interpol='linear')/second(overlaps,second.zero,kind_interpol='linear') #norm = scipy.mean(ratio) slices = (slice(1,None),)*self.ndim norm = scipy.mean(ratio[slices]) #do not take (imprecise or padded) first point def normalize_first_second(first,second,norm,priority_normalize): firstnorm = secondnorm = 1. if priority_normalize == 'second': norm = 1./norm if getattr(first,'norm',None) is not None: new.norm = first.norm if getattr(second,'norm',None) is None: secondnorm = norm self.logger.info('Rescaling {} part of the window function by {:.3f}.'.format(priority_normalize,secondnorm)) elif getattr(first,'norm',None) is None: firstnorm = 1/norm new.norm = second.norm self.logger.info('Rescaling {} part of the window function by {:.3f}.'.format('second' if priority_normalize=='first' else 'first',firstnorm)) else: self.logger.info('No rescaling, as both window functions are normalized (first over second ratio found: {:.3f}).'.format(norm)) return firstnorm,secondnorm if priority_normalize == 'first': firstnorm,secondnorm = normalize_first_second(first,second,norm,priority_normalize) else: secondnorm,firstnorm = normalize_first_second(second,first,norm,priority_normalize) new.window = secondnorm*second(new.s,new.poles,kind_interpol='linear') slices = (slice(None),) + tuple(slice(0,end) for end in firstshape) new.window[slices] = firstnorm*first.window[...] if hasattr(second,'error'): new.error = secondnorm*second.poisson_error(new.s,kind_interpol='linear') if hasattr(first,'error'): new.error[slices[1:]] = firstnorm*first.error[...] return new
def test_get_coords(self): f = OpenPNM.Network.models.pore_topology.adjust_spacing self.net.models.add(propname='pore.coords2', model=f, new_spacing=2) assert 'pore.coords2' in self.net.keys() a = sp.amax(self.net['pore.coords']) assert sp.amax(self.net['pore.coords2']) == 2*a
def porosity_profile(network, fig=None, axis=2): r''' Compute and plot the porosity profile in all three dimensions Parameters ---------- network : OpenPNM Network object axis : integer type 0 for x-axis, 1 for y-axis, 2 for z-axis Notes ----- the area of the porous medium at any position is calculated from the maximum pore coordinates in each direction ''' if fig is None: fig = _plt.figure() L_x = _sp.amax(network['pore.coords'][:,0]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0)) L_y = _sp.amax(network['pore.coords'][:,1]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0)) L_z = _sp.amax(network['pore.coords'][:,2]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0)) if axis is 0: xlab = 'x-direction' area = L_y*L_z elif axis is 1: xlab = 'y-direction' area = L_x*L_z else: axis = 2 xlab = 'z-direction' area = L_x*L_y n_max = _sp.amax(network['pore.coords'][:,axis]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0)) steps = _sp.linspace(0,n_max,100,endpoint=True) vals = _sp.zeros_like(steps) p_area = _sp.zeros_like(steps) t_area = _sp.zeros_like(steps) rp = ((21/88.0)*network['pore.volume'])**(1/3.0) p_upper = network['pore.coords'][:,axis] + rp p_lower = network['pore.coords'][:,axis] - rp TC1 = network['throat.conns'][:,0] TC2 = network['throat.conns'][:,1] t_upper = network['pore.coords'][:,axis][TC1] t_lower = network['pore.coords'][:,axis][TC2] for i in range(0,len(steps)): p_temp = (p_upper > steps[i])*(p_lower < steps[i]) t_temp = (t_upper > steps[i])*(t_lower < steps[i]) p_area[i] = sum((22/7.0)*(rp[p_temp]**2 - (network['pore.coords'][:,axis][p_temp]-steps[i])**2)) t_area[i] = sum(network['throat.area'][t_temp]) vals[i] = (p_area[i]+t_area[i])/area yaxis = vals xaxis = steps/n_max _plt.plot(xaxis,yaxis,'bo-') _plt.xlabel(xlab) _plt.ylabel('Porosity') fig.show()
def standardizeImage(im): #Scales image down to 640x480 or whatever the correct aspect ratio is with conf.imSize as the height im = array(im, 'float32') if im.shape[0] > conf.imSize: resize_factor = float(conf.imSize) / im.shape[0] # don't remove trailing .0 to avoid integer devision im = imresize(im, resize_factor) if amax(im) > 1.1: im = im / 255.0 assert((amax(im) > 0.01) & (amax(im) <= 1)) assert((amin(im) >= 0.00)) return im
def standarizeImage(im): im = array(im, 'float32') if np.shape(im)[0] > 480: resize_factor = 480.0 / np.shape(im)[0] im = imresize(im, resize_factor) if amax(im) > 1.1: im = im / 255.0 assert ((amax(im) > 0.01) & (amax(im) <= 1)) assert ((amin(im) >= 0.00)) return im
def standarizeImage(im): im = array(im, 'float32') if im.shape[0] > 480: resize_factor = 480.0 / im.shape[0] # don't remove trailing .0 to avoid integer devision im = imresize(im, resize_factor) if amax(im) > 1.1: im = im / 255.0 assert((amax(im) > 0.01) & (amax(im) <= 1)) assert((amin(im) >= 0.00)) return im
def interpolate(self, canvas, status=None): # Clear the interpolated canvas canvas.interpolated = sp.zeros_like(canvas.fringes_image) - 1024.0 if status is not None: status.set("Performing the interpolation", 70) else: print("Performing the interpolation") # Iterate over all the triangles in the triangulation for triangle in self.triangles: # Create a shortcut to the triangle's vertices co = triangle.vert_coordinates # Calculate a few constants for the Barycentric Coordinates # More info: https://codeplea.com/triangular-interpolation div = (co[1, 0] - co[2, 0]) * (co[0, 1] - co[2, 1]) + ( co[2, 1] - co[1, 1]) * (co[0, 0] - co[2, 0]) a0 = (co[1, 0] - co[2, 0]) a1 = (co[2, 1] - co[1, 1]) a2 = (co[2, 0] - co[0, 0]) a3 = (co[0, 1] - co[2, 1]) # Calculate the bounds of a rectangle that fully encloses # the current triangle xmin = int(sp.amin(triangle.vert_coordinates[:, 1])) xmax = int(sp.amax(triangle.vert_coordinates[:, 1])) + 1 ymin = int(sp.amin(triangle.vert_coordinates[:, 0])) ymax = int(sp.amax(triangle.vert_coordinates[:, 0])) + 1 # Take out slices of the x and y arrays, # containing the points' coordinates x_slice = canvas.x[ymin:ymax, xmin:xmax] y_slice = canvas.y[ymin:ymax, xmin:xmax] # Use Barycentric Coordinates and the magic of numpy (scipy in this # case) to perform the calculations with the C backend, instead # of iterating on pixels with Python loops. # If you have not worked with numpy arrays befor dear reader, # the idea is that if x = [[0 1] # [2 3]], # then x*3+1 is a completely valid operation, returning # x = [[1 4] # [7 10]] # Basically, we can do maths on arrays as if they were variables. # Convenient, and really fast! w0 = (a0 * (x_slice - co[2, 1]) + a1 * (y_slice - co[2, 0])) / div w1 = (a2 * (x_slice - co[2, 1]) + a3 * (y_slice - co[2, 0])) / div w2 = sp.round_(1 - w0 - w1, 10) # Calculate the values for a rectangle enclosing our triangle slice = (self.values[triangle.vertices[0]] * w0 + self.values[triangle.vertices[1]] * w1 + self.values[triangle.vertices[2]] * w2) # Make a mask (so that we only touch the points # inside of the triangle). # In Barycentric Coordinates the points outside of the triangle # have at least one of the coefficients negative, so we use that mask = sp.logical_and(sp.logical_and(w0 >= 0, w1 >= 0), w2 >= 0) # Change the points in the actual canvas canvas.interpolated[ymin:ymax, xmin:xmax][mask] = slice[mask] canvas.interpolation_done = True
def __init__( self, spike_count_range, train_count_range, num_units_range, firing_rate=50 * pq.Hz): self.spike_count_range = spike_count_range self.train_count_range = train_count_range self.num_units_range = num_units_range self.num_trains_per_spike_count = \ sp.amax(num_units_range) * sp.amax(train_count_range) self.trains = [ [stg.gen_homogeneous_poisson(firing_rate, max_spikes=num_spikes) for i in xrange(self.num_trains_per_spike_count)] for num_spikes in spike_count_range]
def test_respects_refractory_period(self): refractory = 100 * pq.ms st = self.invoke_gen_func( self.highRate, max_spikes=1000, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude) st = self.invoke_gen_func( self.highRate, t_stop=10 * pq.s, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude)
def test_from_neighbor_throats_min(self): self.geo.pop('pore.seed', None) self.geo.models.pop('pore.seed', None) self.geo.models.pop('throat.seed', None) self.geo['throat.seed'] = sp.rand(self.net.Nt, ) self.geo.add_model(model=mods.from_neighbor_throats, propname='pore.seed', throat_prop='throat.seed', mode='min') assert sp.all(sp.in1d(self.geo['pore.seed'], self.geo['throat.seed'])) pmax = sp.amax(self.geo['pore.seed']) tmax = sp.amax(self.geo['throat.seed']) assert pmax <= tmax
def expectation_prop_inner(m0, V0, Y, Z, F, z, needed): #expectation propagation on multivariate gaussian for soft inequality constraint #m0,v0 are mean vector , covariance before EP #Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance #z is number of ep rounds to run #returns mt, Vt the value and variance for observations created by ep m0 = sp.array(m0).flatten() V0 = sp.array(V0) n = V0.shape[0] print "expectation prpagation running on " + str( n) + " dimensions for " + str(z) + " loops:" mt = sp.zeros(n) Vt = sp.eye(n) * float(1e10) m = sp.empty(n) V = sp.empty([n, n]) conv = sp.empty(z) for i in xrange(z): #compute the m V give ep obs m, V = gaussian_fusion(m0, mt, V0, Vt) mtprev = mt.copy() Vtprev = Vt.copy() for j in [k for k in xrange(n) if needed[k]]: print[i, j] #the cavity dist at index j tmp = 1. / (Vt[j, j] - V[j, j]) v_ = (V[j, j] * Vt[j, j]) * tmp m_ = tmp * (m[j] * Vt[j, j] - mt[j] * V[j, j]) alpha = sp.sign(Z[j]) * (m_ - Y[j]) / (sp.sqrt(v_ + F[j])) pr = PhiR(alpha) if sp.isnan(pr): pr = -alpha beta = pr * (pr + alpha) / (v_ + F[j]) kappa = sp.sign(Z[j]) * (pr + alpha) / (sp.sqrt(v_ + F[j])) #print [alpha,beta,kappa,pr] mt[j] = m_ + 1. / kappa #mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j]) Vt[j, j] = min(1e10, 1. / beta - v_) #print sp.amax(mtprev-mt) #print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt)) #TODO make this a ratio instead of absolute delta = max(sp.amax(mtprev - mt), sp.amax(sp.diagonal(Vtprev) - sp.diagonal(Vt))) conv[i] = delta print "EP finished with final max deltas " + str(conv[-3:]) V = V0.dot(spl.solve(V0 + Vt, Vt)) m = V.dot((spl.solve(V0, m0) + spl.solve(Vt, mt)).T) return mt, Vt
def standardizeImage(im): #Scales image down to 640x480 im = array(im, 'float32') if im.shape[0] > conf.imSize: resize_factor = float(conf.imSize) / im.shape[0] # don't remove trailing .0 to avoid integer devision im = imresize(im, resize_factor) if amax(im) > 1.1: im = im / 255.0 assert((amax(im) > 0.01) & (amax(im) <= 1)) assert((amin(im) >= 0.00)) """r = 480.0 / im.shape[1] dim = (480, int(im.shape[0] * r)) im = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)""" return im
def test_neighbor_min(self): catch = self.geo.pop('pore.seed', None) catch = self.geo.models.pop('pore.seed', None) catch = self.geo.models.pop('throat.seed', None) mod = gm.pore_misc.neighbor self.geo['throat.seed'] = sp.rand(self.net.Nt,) self.geo.models.add(model=mod, propname='pore.seed', throat_prop='throat.seed', mode='min') assert sp.all(sp.in1d(self.geo['pore.seed'], self.geo['throat.seed'])) pmax = sp.amax(self.geo['pore.seed']) tmax = sp.amax(self.geo['throat.seed']) assert pmax <= tmax
def SetTimeStep(CFL, space, fluid): if (space.u.any() != 0): dt_hyper = CFL / max( sci.amax(space.u) / space.dx, sci.amax(space.v) / space.dy) else: dt_hyper = CFL * space.dx dt_para = min(space.dx**2 / (2 * fluid.mu), space.dy**2 / (2 * fluid.mu)) dt_temp = min(space.dx**2 / (2 * fluid.alpha), space.dy**2 / (2 * fluid.alpha)) dt_conc = min(space.dx**2 / (2 * fluid.D), space.dy**2 / (2 * fluid.D)) dt_min = min(dt_hyper, dt_para, dt_temp, dt_conc) space.dt = dt_min
def expectation_prop_inner(m0,V0,Y,Z,F,z,needed): #expectation propagation on multivariate gaussian for soft inequality constraint #m0,v0 are mean vector , covariance before EP #Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance #z is number of ep rounds to run #returns mt, Vt the value and variance for observations created by ep m0=sp.array(m0).flatten() V0=sp.array(V0) n = V0.shape[0] print "expectation prpagation running on "+str(n)+" dimensions for "+str(z)+" loops:" mt =sp.zeros(n) Vt= sp.eye(n)*float(1e10) m = sp.empty(n) V = sp.empty([n,n]) conv = sp.empty(z) for i in xrange(z): #compute the m V give ep obs m,V = gaussian_fusion(m0,mt,V0,Vt) mtprev=mt.copy() Vtprev=Vt.copy() for j in [k for k in xrange(n) if needed[k]]: print [i,j] #the cavity dist at index j tmp = 1./(Vt[j,j]-V[j,j]) v_ = (V[j,j]*Vt[j,j])*tmp m_ = tmp*(m[j]*Vt[j, j]-mt[j]*V[j, j]) alpha = sp.sign(Z[j])*(m_-Y[j]) / (sp.sqrt(v_+F[j])) pr = PhiR(alpha) if sp.isnan(pr): pr = -alpha beta = pr*(pr+alpha)/(v_+F[j]) kappa = sp.sign(Z[j])*(pr+alpha) / (sp.sqrt(v_+F[j])) #print [alpha,beta,kappa,pr] mt[j] = m_+1./kappa #mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j]) Vt[j,j] = min(1e10,1./beta - v_) #print sp.amax(mtprev-mt) #print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt)) #TODO make this a ratio instead of absolute delta = max(sp.amax(mtprev-mt),sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt))) conv[i]=delta print "EP finished with final max deltas "+str(conv[-3:]) V = V0.dot(spl.solve(V0+Vt,Vt)) m = V.dot((spl.solve(V0,m0)+spl.solve(Vt,mt)).T) return mt, Vt
def patch_color_labels(s, freq=[1], cmap='Paired', shuffle=True): ''' color by freq of labels ''' s.vColor = sp.zeros(s.vertices.shape) _, labels = sp.unique(s.labels, return_inverse=True) labels += 1 colr = get_cmap(sp.amax(labels) + 1, cmap=cmap) s.vColor = s.vColor + 1 perm1 = sp.mod(3511 * sp.arange(sp.amax(labels) + 1), sp.amax(labels) + 1) freq = sp.reshape(freq, (len(freq), 1)) if shuffle == True: s.vColor = (1 - freq) + freq * sp.array(colr(perm1[labels])[:, :3]) else: s.vColor = (1 - freq) + freq * sp.array(colr(labels)[:, :3]) return s
def test_respects_refractory_period(self): refractory = 100 * pq.ms st = self.invoke_gen_func(self.highRate, max_spikes=1000, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude) st = self.invoke_gen_func(self.highRate, t_stop=10 * pq.s, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude)
def array_factor(number_of_elements, scan_angle, element_spacing, frequency, theta, window_type, side_lobe_level): """ Calculate the array factor for a linear binomial excited array. :param window_type: The string name of the window. :param side_lobe_level: The sidelobe level for Tschebyscheff window (dB). :param number_of_elements: The number of elements in the array. :param scan_angle: The angle to which the main beam is scanned (rad). :param element_spacing: The distance between elements. :param frequency: The operating frequency (Hz). :param theta: The angle at which to evaluate the array factor (rad). :return: The array factor as a function of angle. """ # Calculate the wavenumber k = 2.0 * pi * frequency / c # Calculate the phase psi = k * element_spacing * (cos(theta) - cos(scan_angle)) # Calculate the coefficients if window_type == 'Uniform': coefficients = ones(number_of_elements) elif window_type == 'Binomial': coefficients = binom(number_of_elements-1, range(0, number_of_elements)) elif window_type == 'Tschebyscheff': warnings.simplefilter("ignore", UserWarning) coefficients = chebwin(number_of_elements, at=side_lobe_level, sym=True) elif window_type == 'Kaiser': coefficients = kaiser(number_of_elements, 6, True) elif window_type == 'Blackman-Harris': coefficients = blackmanharris(number_of_elements, True) elif window_type == 'Hanning': coefficients = hanning(number_of_elements, True) elif window_type == 'Hamming': coefficients = hamming(number_of_elements, True) # Calculate the offset for even/odd offset = int(floor(number_of_elements / 2)) # Odd case if number_of_elements & 1: coefficients = roll(coefficients, offset + 1) coefficients[0] *= 0.5 af = sum(coefficients[i] * cos(i * psi) for i in range(offset + 1)) return af / amax(abs(af)) # Even case else: coefficients = roll(coefficients, offset) af = sum(coefficients[i] * cos((i + 0.5) * psi) for i in range(offset)) return af / amax(abs(af))
def genParams(self, df): data = [] ys = sp.array(df.filter(like='Ids')).T fits = sp.array(df.filter(like='tcfit')).T cols = [i.replace('_Ids', '') for i in df.filter(like='Ids').columns] for y, f in zip(ys, fits): on = sp.amax(y) off = sp.amin(y) data.append([off, on, on / off, sp.amax(f)]) datadf = pd.DataFrame(sp.array(data).T, index=['off', 'on', 'onoff', 'maxtc'], columns=cols) return datadf
def newtonRaphson(sys): eps = 1e-9 # Abbruchkriterium relDif = np.amax(np.absolute(sys.b)) * eps imax = CircuitAnalysis.MAX_NEWTON_ITERATIONS i = 0 # Iterationsnummer ungenau = True wenig_iterationen = True d = 10 movelen = 10 sys.curNewtonIteration = 0 x_backup = np.copy(sys.x) Vmax = np.amax(np.absolute(sys.b)) while (ungenau and wenig_iterationen): xvorher = sys.x i += 1 sys.curNewtonIteration = i CircuitAnalysis.nonlin(sys) if (sys.n > 1000): A = sys.A + sys.J b = sys.J.dot(sys.x) - sys.g + sys.b sys.x = spsolve(A, b, permc_spec="NATURAL") else: sys.x = np.linalg.solve(sys.A + sys.J, np.dot(sys.J, sys.x) - sys.g + sys.b) movelen, d = CircuitAnalysis.subNewtonRaphson( xvorher, sys, d, movelen, Vmax) dif = np.amax(np.absolute(sys.x - xvorher)) wenig_iterationen = (i < imax) ungenau = d > relDif or (dif > relDif) #eps)# and (d > eps) #print("NR: %i"%(i)) if (i > 50): print("Newton: Iteration: " + str(i) + " ", end='\r') if i == imax: sys.x = np.copy(x_backup) print("Newton-Raphson convergence failure") #raise NRConvergenceException() print(" ", end='\r') return [sys.A, sys.x, sys.J, i]
def output_percentile_set(data_field, args): r""" Does three sets of percentiles and stacks them as columns: raw data, absolute value data, normalized+absolute value """ data = {} # # outputting percentiles of initial subtraction to screen field = data_field.clone() pctle = Percentiles(field, percentiles=args.perc) pctle.process() data['raw'] = pctle.processed_data # # normalizing data field = data_field.clone() field.data_map = field.data_map/sp.amax(sp.absolute(field.data_map)) field.data_vector = sp.ravel(field.data_map) pctle = Percentiles(field, percentiles=args.perc) pctle.process() data['norm'] = pctle.processed_data # # taking absolute value of data field = data_field.clone() field.data_map = sp.absolute(field.data_map) field.data_vector = sp.absolute(field.data_vector) pctle = Percentiles(field, percentiles=args.perc) pctle.process() data['abs'] = pctle.processed_data # # absolute value + normed field.data_map = field.data_map/sp.amax(field.data_map) field.data_vector = sp.ravel(field.data_map) pctle = Percentiles(field, percentiles=args.perc) pctle.process() data['abs+norm'] = pctle.processed_data # # outputting stacked percentiles fmt = ' {:>6.2f}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\n' content = 'Percentile\tRaw Data\tAbsolute\tNormalized\tNorm+abs\n' data = zip(args.perc, data['raw'].values(), data['abs'].values(), data['norm'].values(), data['abs+norm'].values()) # for row in data: content += fmt.format(*row) content += '\n' print(content)
def sem(im, direction='X'): r""" Simulates an SEM photograph looking into the porous material in the specified direction. Features are colored according to their depth into the image, so darker features are further away. Parameters ---------- im : array_like ND-image of the porous material with the solid phase marked as 1 or True direction : string Specify the axis along which the camera will point. Options are 'X', 'Y', and 'Z'. Returns ------- A 2D greyscale image suitable for use in matplotlib\'s ```imshow``` function. """ im = sp.array(~im, dtype=int) if direction in ['Y', 'y']: im = sp.transpose(im, axes=[1, 0, 2]) if direction in ['Z', 'z']: im = sp.transpose(im, axes=[2, 1, 0]) t = im.shape[0] depth = sp.reshape(sp.arange(0, t), [t, 1, 1]) im = im * depth im = sp.amax(im, axis=0) return im
def GetStat(filename,Nsamp=1): hfile=[] if type(filename)==list: for i,f in enumerate(filename): hfile.append(h5py.File(f,'r')) elif type(filename)==str: hfile.append(h5py.File(filename,'r')) filename=[filename] stats=[] datapath=[] for ih,h in enumerate(hfile): for r in h: for d in h[r]: try: stats.append(int(h[r][d].attrs['statistics'][0])) except KeyError as err: stats.append(1) datapath.append((filename[ih],"/{0}/{1}".format(r,d))) bunches,args=vln.bunch(stats,Nsamp,indices=True) addstat=sc.array([sum(bunches[i]) for i in range(Nsamp)]) print("Average statistics of {0} (min: {1}, max: {2})"\ .format(sc.mean(addstat),sc.amin(addstat),sc.amax(addstat))) for f in hfile: f.close() return datapath,args
def test_random(self): self.geo.models.add(propname='throat.seed', model=OpenPNM.Geometry.models.throat_seed.random, seed=0, num_range=[0.1, 2]) assert sp.amax(self.geo['throat.seed']) > 1.9 assert sp.amin(self.geo['throat.seed']) > 0.1
def neighbor(geometry, network, pore_prop='pore.seed', mode='min', **kwargs): r""" Adopt a value based on the values in the neighboring pores Parameters ---------- mode : string Indicates how to select the values from the neighboring pores. The options are: - min : (Default) Uses the minimum of the value found in the neighbors - max : Uses the maximum of the values found in the neighbors - mean : Uses an average of the neighbor values pore_prop : string The dictionary key containing the pore property to be used. """ throats = network.throats(geometry.name) P12 = network.find_connected_pores(throats) pvalues = network[pore_prop][P12] if mode == 'min': value = _sp.amin(pvalues, axis=1) if mode == 'max': value = _sp.amax(pvalues, axis=1) if mode == 'mean': value = _sp.mean(pvalues, axis=1) return value
def test_random_with_range(self): mod = gm.throat_misc.random self.geo.models.add(model=mod, propname='throat.seed', num_range=[0.1, 0.9]) assert sp.amax(self.geo['throat.seed']) <= 0.9 assert sp.amin(self.geo['throat.seed']) >= 0.1
def plot_delta(): beta = 0.99 N = 1000 u = lambda c: sp.sqrt(c) W = sp.linspace(0,1,N) X, Y = sp.meshgrid(W,W) Wdiff = sp.transpose(X-Y) index = Wdiff <0 Wdiff[index] = 0 util_grid = u(Wdiff) util_grid[index] = -10**10 Vprime = sp.zeros((N,1)) delta = sp.ones(1) tol = 10**-9 it = 0 max_iter = 500 while (delta[-1] >= tol) and (it < max_iter): V = Vprime it += 1; print(it) val = util_grid + beta*sp.transpose(V) Vprime = sp.amax(val, axis = 1) Vprime = Vprime.reshape((N,1)) delta = sp.append(delta,sp.dot(sp.transpose(Vprime - V),Vprime-V)) plt.figure() plt.plot(delta[1:]) plt.ylabel(r'$\delta_k$') plt.xlabel('iteration') plt.savefig('convergence.pdf')
def Problem3Real(): beta = 0.9 N = 1000 u = lambda c: sp.sqrt(c) W = sp.linspace(0,1,N) X, Y = sp.meshgrid(W,W) Wdiff = sp.transpose(X-Y) index = Wdiff <0 Wdiff[index] = 0 util_grid = u(Wdiff) util_grid[index] = -10**10 Vprime = sp.zeros((N,1)) psi = sp.zeros((N,1)) delta = 1.0 tol = 10**-9 it = 0 max_iter = 500 while (delta >= tol) and (it < max_iter): V = Vprime it += 1; #print(it) val = util_grid + beta*sp.transpose(V) Vprime = sp.amax(val, axis = 1) Vprime = Vprime.reshape((N,1)) psi_ind = sp.argmax(val,axis = 1) psi = W[psi_ind] delta = sp.dot(sp.transpose(Vprime - V),Vprime-V) return psi
def Problem1Real(): beta = 0.9; T = 10; N = 100; u = lambda c: sp.sqrt(c); W = sp.linspace(0,1,N); X, Y = sp.meshgrid(W,W); Wdiff = Y-X index = Wdiff <0; Wdiff[index] = 0; util_grid = u(Wdiff); util_grid[index] = -10**10; V = sp.zeros((N,T+2)); psi = sp.zeros((N,T+1)); for k in xrange(T,-1,-1): val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1)); vt = sp.amax(val, axis = 1); psi_ind = sp.argmax(val,axis = 1) V[:,k] = vt; psi[:,k] = W[psi_ind]; return V,psi
def test_late_pore_and_throat_filling(): phys.models.add(propname='pore.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_pore_filling, Pc=0, Swp_star=0.2, eta=1) mod = OpenPNM.Physics.models.multiphase.late_throat_filling phys.models.add(propname='throat.fractional_filling', model=mod, Pc=0, Swp_star=0.2, eta=1) phys.regenerate() drainage.setup(invading_phase=water, defending_phase=air, pore_filling='pore.fractional_filling', throat_filling='throat.fractional_filling') drainage.set_inlets(pores=pn.pores('boundary_top')) drainage.run() data = drainage.get_drainage_data() assert sp.amin(data['invading_phase_saturation']) == 0.0 assert sp.amax(data['invading_phase_saturation']) < 1.0 drainage.return_results(Pc=5000) assert 'pore.occupancy' in water.keys() assert 'throat.occupancy' in water.keys() assert 'pore.partial_occupancy' in water.keys() assert 'throat.partial_occupancy' in water.keys()
def __MR_get_adj_loop(self, labels): s = sp.amax(labels) + 1 adj = np.ones((s, s), np.bool) for i in range(labels.shape[0] - 1): for j in range(labels.shape[1] - 1): if labels[i, j]<>labels[i+1, j]: adj[labels[i, j], labels[i+1, j]] = False adj[labels[i+1, j], labels[i, j]] = False if labels[i, j]<>labels[i, j + 1]: adj[labels[i, j], labels[i, j+1]] = False adj[labels[i, j+1], labels[i, j]] = False if labels[i, j]<>labels[i + 1, j + 1]: adj[labels[i, j] , labels[i+1, j+1]] = False adj[labels[i+1, j+1], labels[i, j]] = False if labels[i + 1, j]<>labels[i, j + 1]: adj[labels[i+1, j], labels[i, j+1]] = False adj[labels[i, j+1], labels[i+1, j]] = False upper_ids = sp.unique(labels[0,:]).astype(int) right_ids = sp.unique(labels[:,labels.shape[1]-1]).astype(int) low_ids = sp.unique(labels[labels.shape[0]-1,:]).astype(int) left_ids = sp.unique(labels[:,0]).astype(int) bd = np.append(upper_ids, right_ids) bd = np.append(bd, low_ids) bd = sp.unique(np.append(bd, left_ids)) for i in range(len(bd)): for j in range(i + 1, len(bd)): adj[bd[i], bd[j]] = False adj[bd[j], bd[i]] = False return adj
def plot_optimal_uncertainty_reduction(results_for_exp, results_for_exp_inftau): """ Plot the percentage of uncertainty reduction of the optimal classifiers. :param results_for_exp: The results of one experiment as 4-D array of the shape (metrics, z-values, tau-values, experimental repetitions). :type results_for_exp: 4-D array :param result_list_inftau: The results of one experiment for `tau = inf` as 3-D array of the shape (metrics, z-values, experimental repetitions). :type results_for_exp_inftau: 3-D array. """ plt.ylim(0, 1) plot_param_per_metric_and_z( sp.mean(sp.amax(results_for_exp, axis=2), axis=2), sp.std(sp.amax(results_for_exp, axis=2), axis=2)) plot_param_per_metric_and_z(sp.mean(results_for_exp_inftau, axis=2), c='g')
def print_all_stats(ctx, series): ftime = get_ftime(series) start = 0 end = ctx.interval print('start-time, samples, min, avg, median, 90%, 95%, 99%, max') while (start < ftime): # for each time interval end = ftime if ftime < end else end sample_arrays = [ s.get_samples(start, end) for s in series ] samplevalue_arrays = [] for sample_array in sample_arrays: samplevalue_arrays.append( [ sample.value for sample in sample_array ] ) #print('samplevalue_arrays len: %d' % len(samplevalue_arrays)) #print('samplevalue_arrays elements len: ' + \ #str(map( lambda l: len(l), samplevalue_arrays))) # collapse list of lists of sample values into list of sample values samplevalues = reduce( array_collapser, samplevalue_arrays, [] ) #print('samplevalues: ' + str(sorted(samplevalues))) # compute all stats and print them myarray = scipy.fromiter(samplevalues, float) mymin = scipy.amin(myarray) myavg = scipy.average(myarray) mymedian = scipy.median(myarray) my90th = scipy.percentile(myarray, 90) my95th = scipy.percentile(myarray, 95) my99th = scipy.percentile(myarray, 99) mymax = scipy.amax(myarray) print( '%f, %d, %f, %f, %f, %f, %f, %f, %f' % ( start, len(samplevalues), mymin, myavg, mymedian, my90th, my95th, my99th, mymax)) # advance to next interval start += ctx.interval end += ctx.interval
def run(self, npts=25, inv_points=None, access_limited=True, **kwargs): r""" Parameters ---------- npts : int (default = 25) The number of pressure points to apply. The list of pressures is logarithmically spaced between the lowest and highest throat entry pressures in the network. inv_points : array_like, optional A list of specific pressure point(s) to apply. """ if 'inlets' in kwargs.keys(): logger.info('Inlets recieved, passing to set_inlets') self.set_inlets(pores=kwargs['inlets']) if 'outlets' in kwargs.keys(): logger.info('Outlets recieved, passing to set_outlets') self.set_outlets(pores=kwargs['outlets']) self._AL = access_limited if inv_points is None: logger.info('Generating list of invasion pressures') min_p = sp.amin(self['throat.entry_pressure']) * 0.98 # nudge down max_p = sp.amax(self['throat.entry_pressure']) * 1.02 # bump up inv_points = sp.logspace(sp.log10(min_p), sp.log10(max_p), npts) self._npts = sp.size(inv_points) # Execute calculation self._do_outer_iteration_stage(inv_points)
def scale(x, M=None, m=None, REVERSE=None): """ Function that standardize the data Input: x: the data M: the Max vector m: the Min vector Output: x: the standardize data M: the Max vector m: the Min vector """ if not sp.issubdtype(x.dtype, float): do_convert = 1 else: do_convert = 0 if REVERSE is None: if M is None: M = sp.amax(x, axis=0) m = sp.amin(x, axis=0) if do_convert: xs = 2 * (x.astype("float") - m) / (M - m) - 1 else: xs = 2 * (x - m) / (M - m) - 1 return xs, M, m else: if do_convert: xs = 2 * (x.astype("float") - m) / (M - m) - 1 else: xs = 2 * (x - m) / (M - m) - 1 return xs else: return (1 + x) / 2 * (M - m) + m
def add_boundaries(self): r''' This method uses ``clone`` to clone the surface pores (labeled 'left', 'right', etc), then shifts them to the periphery of the domain, and gives them the label 'right_face', 'left_face', etc. ''' x, y, z = self['pore.coords'].T Lc = sp.amax(sp.diff(x)) #this currently works but is very fragile offset = {} offset['front'] = offset['left'] = offset['bottom'] = [0, 0, 0] offset['back'] = [x.max() + Lc / 2, 0, 0] offset['right'] = [0, y.max() + Lc / 2, 0] offset['top'] = [0, 0, z.max() + Lc / 2] scale = {} scale['front'] = scale['back'] = [0, 1, 1] scale['left'] = scale['right'] = [1, 0, 1] scale['bottom'] = scale['top'] = [1, 1, 0] for label in ['front', 'back', 'left', 'right', 'bottom', 'top']: ps = self.pores(label) self.clone(pores=ps, apply_label=[label + '_boundary', 'boundary']) #Translate cloned pores ind = self.pores(label + '_boundary') coords = self['pore.coords'][ind] coords = coords * scale[label] + offset[label] self['pore.coords'][ind] = coords
def RREFscaled(mymat): # Pdb().set_trace() scalevect=scipy.amax(abs(mymat),1) scaledrows=[] for sf,row in zip(scalevect,mymat): row=row/sf scaledrows.append(row) scaledmat=scipy.vstack(scaledrows) # scaledmat=mymat nc=scipy.shape(scaledmat)[1] nr=scipy.shape(scaledmat)[0] for j in range(nr-1): # print('=====================') # print('j='+str(j)) pivrow=scipy.argmax(abs(scaledmat[j:-1,j])) pivrow=pivrow+j # print('pivrow='+str(pivrow)) if pivrow!=j: temprow=copy.copy(scaledmat[j,:]) scaledmat[j,:]=scaledmat[pivrow,:] scaledmat[pivrow,:]=temprow # Pdb().set_trace() for i in range(j+1,nr): # print('i='+str(i)) scaledmat[i,:]-=scaledmat[j,:]*(scaledmat[i,j]/scaledmat[j,j]) return scaledmat, scalevect
def test_residual_and_lpf(): phys.models.add(propname='pore.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_pore_filling, Pc=0, Swp_star=0.2, eta=1) phys.models.add(propname='throat.fractional_filling', model=OpenPNM.Physics.models.multiphase.late_throat_filling, Pc=0, Swp_star=0.2, eta=1) phys.regenerate() drainage.setup(invading_phase=water, defending_phase=air, pore_filling='pore.fractional_filling', throat_filling='throat.fractional_filling') drainage.set_inlets(pores=pn.pores('boundary_top')) resPs = pn.pores('internal')[sp.random.random(len(pn.pores('internal')))<0.1] resTs = pn.throats('internal')[sp.random.random(len(pn.throats('internal')))<0.1] drainage.set_residual(pores=resPs, throats=resTs) drainage.run() drainage.return_results(Pc=5000) data = drainage.get_drainage_data() assert sp.all(water["pore.partial_occupancy"][resPs] == 1.0) assert sp.all(water["throat.partial_occupancy"][resTs] == 1.0) assert sp.amin(data['invading_phase_saturation']) > 0.0 assert sp.amax(data['invading_phase_saturation']) < 1.0 assert sp.all(water["pore.occupancy"]+air["pore.occupancy"] == 1.0) total_pp = water["pore.partial_occupancy"]+air["pore.partial_occupancy"] assert sp.all(total_pp == 1.0) assert sp.all(water["throat.occupancy"]+air["throat.occupancy"] == 1.0) total_pt = water["throat.partial_occupancy"]+air["throat.partial_occupancy"] assert sp.all(total_pt == 1.0)
def Problem3Real(): beta = 0.9 N = 1000 u = lambda c: sp.sqrt(c) W = sp.linspace(0, 1, N) X, Y = sp.meshgrid(W, W) Wdiff = sp.transpose(X - Y) index = Wdiff < 0 Wdiff[index] = 0 util_grid = u(Wdiff) util_grid[index] = -10**10 Vprime = sp.zeros((N, 1)) psi = sp.zeros((N, 1)) delta = 1.0 tol = 10**-9 it = 0 max_iter = 500 while (delta >= tol) and (it < max_iter): V = Vprime it += 1 #print(it) val = util_grid + beta * sp.transpose(V) Vprime = sp.amax(val, axis=1) Vprime = Vprime.reshape((N, 1)) psi_ind = sp.argmax(val, axis=1) psi = W[psi_ind] delta = sp.dot(sp.transpose(Vprime - V), Vprime - V) return psi
def main(database): #Commits per committer limited to the 30 first with the highest accumulated activity query = "select count(*) from scmlog group by committer_id order by count(*) desc limit 40" #Connecting to the data base and retrieving data connector = connect(database) results = int(connector.execute(query)) if results > 0: results_aux = connector.fetchall() else: print("Error when retrieving data") return #Moving data to a list commits = [] for commit in results_aux[5:]: # for commits in results_aux: commits.append(int(commit[0])) #Calculating basic statistics print "max: " + str(sp.amax(commits)) print "min: " + str(sp.amin(commits)) print "mean: " + str(sp.mean(commits)) print "median: " + str(sp.median(commits)) print "std: " + str(sp.std(commits)) print ".25 quartile: " + str(sp.percentile(commits, 25)) print ".50 quartile: " + str(sp.percentile(commits, 50)) print ".75 quartile: " + str(sp.percentile(commits, 75))
def process_maps(aper_map, data_map1, data_map2, args): r""" subtracts the data maps and then calculates percentiles of the result before outputting a final map to file. """ # # creating resultant map from clone of aperture map result = aper_map.clone() result.data_map = data_map1 - data_map2 result.data_vector = sp.ravel(result.data_map) result.infile = args.out_name result.outfile = args.out_name # print('Percentiles of data_map1 - data_map2') output_percentile_set(result, args) # # checking if data is to be normalized and/or absolute if args.post_abs: result.data_map = sp.absolute(result.data_map) result.data_vector = sp.absolute(result.data_vector) # if args.post_normalize: result.data_map = result.data_map/sp.amax(sp.absolute(result.data_map)) result.data_vector = sp.ravel(result.data_map) # return result
def _do_one_outer_iteration(self, **kwargs): r""" One iteration of an outer iteration loop for an algorithm (e.g. time or parametric study) """ # Checking for the necessary values in Picard algorithm nan_tol = sp.isnan(self['pore.source_tol']) nan_max = sp.isnan(self['pore.source_maxiter']) self._tol_for_all = sp.amin(self['pore.source_tol'][~nan_tol]) self._maxiter_for_all = sp.amax(self['pore.source_maxiter'][~nan_max]) if self._guess is None: self._guess = sp.zeros(self._coeff_dimension) t = 1 step = 0 # The main Picard loop while t > self._tol_for_all and step <= self._maxiter_for_all: X, t, A, b = self._do_inner_iteration_stage(guess=self._guess, **kwargs) logger.info('tol for Picard source_algorithm in step ' + str(step) + ' : ' + str(t)) self._guess = X step += 1 # Check for divergence self._steps = step if t >= self._tol_for_all and step > self._maxiter_for_all: raise Exception('Iterative algorithm for the source term reached ' 'to the maxiter: ' + str(self._maxiter_for_all) + ' without achieving tol: ' + str(self._tol_for_all)) logger.info('Picard algorithm for source term converged!') self.A = A self.b = b self._tol_reached = t return X