def test_neighbor_throats_with_nans(self): net = op.network.Cubic(shape=[2, 2, 2]) net['throat.values'] = 1.0 net['throat.values'][0] = sp.nan f = mods.from_neighbor_throats with_nans = f(target=net, throat_prop='throat.values', ignore_nans=False, mode='min') assert sp.any(sp.isnan(with_nans)) no_nans = f(target=net, throat_prop='throat.values', ignore_nans=True, mode='min') assert sp.all(~sp.isnan(no_nans)) with_nans = f(target=net, throat_prop='throat.values', ignore_nans=False, mode='max') assert sp.any(sp.isnan(with_nans)) no_nans = f(target=net, throat_prop='throat.values', ignore_nans=True, mode='max') assert sp.all(~sp.isnan(no_nans)) with_nans = f(target=net, throat_prop='throat.values', ignore_nans=False, mode='mean') assert sp.any(sp.isnan(with_nans)) no_nans = f(target=net, throat_prop='throat.values', ignore_nans=True, mode='mean') assert sp.all(~sp.isnan(no_nans))
def computePCsPython(out_dir,k,bfile,ffile): """ reading in """ RV = plink_reader.readBED(bfile,useMAFencoding=True) X = np.ascontiguousarray(RV['snps']) """ normalizing markers """ print('Normalizing SNPs...') p_ref = X.mean(axis=0)/2. X -= 2*p_ref with warnings.catch_warnings(): warnings.simplefilter("ignore") X /= sp.sqrt(2*p_ref*(1-p_ref)) hasNan = sp.any(sp.isnan(X),axis=0) if sp.any(hasNan): print(('%d SNPs have a nan entry. Exluding them for computing the covariance matrix.'%hasNan.sum())) X = X[:,~hasNan] """ computing prinicipal components """ U,S,Vt = ssl.svds(X,k=k) U -= U.mean(0) U /= U.std(0) U = U[:,::-1] """ saving to output """ np.savetxt(ffile, U, delimiter='\t',fmt='%.6f')
def test_RSA_mask_edge_3d(self): im = sp.zeros([50, 50, 50], dtype=int) im = ps.generators.RSA(im, radius=5, volume_fraction=0.5, mode='contained') coords = sp.argwhere(im == 2) assert ~sp.any(coords < 5) assert ~sp.any(coords > 45)
def _check_bounds(self, x_new): # If self.bounds_error = 1, we raise an error if any x_new values # fall outside the range of x. Otherwise, we return an array indicating # which values are outside the boundary region. # !! Needs some work for multi-dimensional x !! below_bounds = less(x_new, self.x[0]) above_bounds = greater(x_new, self.x[-1]) # Note: sometrue has been redefined to handle length 0 arrays # !! Could provide more information about which values are out of bounds # RHC -- Changed these ValueErrors to PyDSTool_BoundsErrors if self.bounds_error and any(sometrue(below_bounds)): ## print "Input:", x_new ## print "Bound:", self.x[0] ## print "Difference input - bound:", x_new-self.x[0] raise PyDSTool_BoundsError, " A value in x_new is below the"\ " interpolation range." if self.bounds_error and any(sometrue(above_bounds)): ## print "Input:", x_new ## print "Bound:", self.x[-1] ## print "Difference input - bound:", x_new-self.x[-1] raise PyDSTool_BoundsError, " A value in x_new is above the"\ " interpolation range." # !! Should we emit a warning if some values are out of bounds. # !! matlab does not. out_of_bounds = logical_or(below_bounds, above_bounds) return out_of_bounds
def crop_pts(coords, box): r''' Drop all points lying outside the box Parameters ---------- coords : array_like An Np x ndims array off [x,y,z] coordinates box : array_like A 2 x ndims array of diametrically opposed corner coordintes Returns ------- coords : array_like Inputs coordinates with outliers removed Notes ----- This needs to be made more general so that an arbitray cuboid with any orientation can be supplied, using Np x 8 points ''' coords = coords[_sp.any(coords < box[0], axis=1)] coords = coords[_sp.any(coords > box[1], axis=1)] return coords
def test_RSA_mask_edge_2d(self): im = sp.zeros([100, 100], dtype=int) im = ps.generators.RSA(im, radius=10, volume_fraction=0.5, mode='contained') coords = sp.argwhere(im == 2) assert ~sp.any(coords < 10) assert ~sp.any(coords > 90)
def _check_bounds(self,x_new): # If self.bounds_error = 1, we raise an error if any x_new values # fall outside the range of x. Otherwise, we return an array indicating # which values are outside the boundary region. # !! Needs some work for multi-dimensional x !! below_bounds = less(x_new,self.x[0]) above_bounds = greater(x_new,self.x[-1]) # Note: sometrue has been redefined to handle length 0 arrays # !! Could provide more information about which values are out of bounds # RHC -- Changed these ValueErrors to PyDSTool_BoundsErrors if self.bounds_error and any(sometrue(below_bounds)): ## print "Input:", x_new ## print "Bound:", self.x[0] ## print "Difference input - bound:", x_new-self.x[0] raise PyDSTool_BoundsError, " A value in x_new is below the"\ " interpolation range." if self.bounds_error and any(sometrue(above_bounds)): ## print "Input:", x_new ## print "Bound:", self.x[-1] ## print "Difference input - bound:", x_new-self.x[-1] raise PyDSTool_BoundsError, " A value in x_new is above the"\ " interpolation range." # !! Should we emit a warning if some values are out of bounds. # !! matlab does not. out_of_bounds = logical_or(below_bounds,above_bounds) return out_of_bounds
def generate_simulation_data(n_classes=2,n_samples=[20,20],n_features=3,seed=0,scales=None): #initial checks assert n_classes>0, "n_classes has to be larger than 0" assert n_features>2, "n_features has to be larger than 2" assert n_classes==len(n_samples), "n_samples has to be an array with as many elements as n_classes" if sp.any(scales)==None: scales = sp.ones(n_features) else: assert len(scales)==n_features, "scales has to be an array with as many elements as features" #set seed sp.random.seed(seed) feature_matrix = None class_labels = None #generate data for each class for i in range(n_classes): #generate random data for class i drawn from a multivariate gauss distribution class_matrix = sp.random.multivariate_normal(sp.ones(n_features)*(i+1)*sp.random.randn(n_features),sp.eye(n_features),n_samples[i]) #store data class_labels = sp.ones(n_samples[i])*(i+1) if (sp.any(class_labels)==None) else sp.concatenate([class_labels,sp.ones(n_samples[i])*(i+1)]) feature_matrix = class_matrix if (sp.any(feature_matrix)==None) else sp.vstack([feature_matrix,class_matrix]) #scale features if sp.any(scales)!=None: feature_matrix *= scales #generate data dict data = Data(data=feature_matrix, target=class_labels, n_samples=feature_matrix.shape[0], n_features=feature_matrix.shape[1]) return data
def _apply_percolation(self, inv_val): r""" Determine which pores and throats are invaded at a given applied capillary pressure. This method is called by ``run``. """ # Generate a list containing boolean values for throat state Tinvaded = self['throat.entry_pressure'] <= inv_val # Add residual throats, if any, to list of invaded throats Tinvaded = Tinvaded + self['throat.residual'] # Find all pores that can be invaded at specified pressure [pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded, t_labels=True) # Identify clusters connected to inlet sites inv_clusters = sp.unique(pclusters[self['pore.inlets']]) inv_clusters = inv_clusters[inv_clusters >= 0] # Find pores on the invading clusters pmask = np.in1d(pclusters, inv_clusters) # Store current applied pressure in newly invaded pores pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask) self['pore.inv_Pc'][pinds] = inv_val # Find throats on the invading clusters tmask = np.in1d(tclusters, inv_clusters) # Store current applied pressure in newly invaded throats tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask) self['throat.inv_Pc'][tinds] = inv_val # Set residual pores and throats, if any, to invaded if sp.any(self['pore.residual']): self['pore.inv_Pc'][self['pore.residual']] = 0 if sp.any(self['throat.residual']): self['throat.inv_Pc'][self['throat.residual']] = 0
def computePCsPython(out_dir, k, bfile, ffile): """ reading in """ RV = plink_reader.readBED(bfile, useMAFencoding=True) X = np.ascontiguousarray(RV['snps']) """ normalizing markers """ print('Normalizing SNPs...') p_ref = X.mean(axis=0) / 2. X -= 2 * p_ref with warnings.catch_warnings(): warnings.simplefilter("ignore") X /= sp.sqrt(2 * p_ref * (1 - p_ref)) hasNan = sp.any(sp.isnan(X), axis=0) if sp.any(hasNan): print(( '%d SNPs have a nan entry. Exluding them for computing the covariance matrix.' % hasNan.sum())) X = X[:, ~hasNan] """ computing prinicipal components """ U, S, Vt = ssl.svds(X, k=k) U -= U.mean(0) U /= U.std(0) U = U[:, ::-1] """ saving to output """ np.savetxt(ffile, U, delimiter='\t', fmt='%.6f')
def test_snow_partitioning_n(self): im = self.im snow = ps.filters.snow_partitioning_n(im + 1, r_max=4, sigma=0.4, return_all=True, mask=True, randomize=False, alias=None) assert sp.amax(snow.regions) == 44 assert not sp.any(sp.isnan(snow.regions)) assert not sp.any(sp.isnan(snow.dt)) assert not sp.any(sp.isnan(snow.im))
def cylinders(shape: List[int], radius: int, nfibers: int, phi_max: float = 0, theta_max: float = 90): r""" Generates a binary image of overlapping cylinders. This is a good approximation of a fibrous mat. Parameters ---------- phi_max : scalar A value between 0 and 90 that controls the amount that the fibers lie out of the XY plane, with 0 meaning all fibers lie in the XY plane, and 90 meaning that fibers are randomly oriented out of the plane by as much as +/- 90 degrees. theta_max : scalar A value between 0 and 90 that controls the amount rotation in the XY plane, with 0 meaning all fibers point in the X-direction, and 90 meaning they are randomly rotated about the Z axis by as much as +/- 90 degrees. Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space """ shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) elif sp.size(shape) == 2: raise Exception("2D fibers don't make sense") im = sp.zeros(shape) R = sp.sqrt(sp.sum(sp.square(shape))) n = 0 while n < nfibers: x = sp.rand(3) * shape phi = sp.deg2rad(90 + 90 * (0.5 - sp.rand()) * phi_max / 90) theta = sp.deg2rad(180 - 90 * (0.5 - sp.rand()) * 2 * theta_max / 90) X0 = R * sp.array([ sp.sin(theta) * sp.cos(phi), sp.sin(theta) * sp.sin(phi), sp.cos(theta) ]) [X0, X1] = [X0 + x, -X0 + x] crds = line_segment(X0, X1) lower = ~sp.any(sp.vstack(crds).T < [0, 0, 0], axis=1) upper = ~sp.any(sp.vstack(crds).T >= shape, axis=1) valid = upper * lower if sp.any(valid): im[crds[0][valid], crds[1][valid], crds[2][valid]] = 1 n += 1 im = sp.array(im, dtype=bool) dt = spim.distance_transform_edt(~im) < radius return ~dt
def process_file(self, file_ind) : params = self.params file_middle = params['file_middles'][file_ind] input_fname = (params['input_root'] + file_middle + params['input_end']) sub_input_fname = (params['subtracted_input_root'] + file_middle + params['input_end']) output_fname = (params['output_root'] + file_middle + params['output_end']) sub_output_fname = (params['subtracted_output_root'] + file_middle + params['output_end']) Writer = fitsGBT.Writer(feedback=self.feedback) SubWriter = fitsGBT.Writer(feedback=self.feedback) # Read in the data, and loop over data blocks. Reader = fitsGBT.Reader(input_fname, feedback=self.feedback) SubReader = fitsGBT.Reader(sub_input_fname, feedback=self.feedback) if (sp.any(Reader.scan_set != SubReader.scan_set) or sp.any(Reader.IF_set != SubReader.IF_set)) : raise ce.DataError("IFs and scans don't match signal subtracted" " data.") # Get the number of scans if asked for all of them. scan_inds = params['scans'] if len(scan_inds) == 0 or scan_inds is None : scan_inds = range(len(Reader.scan_set)) if_inds = params['IFs'] if len(if_inds) == 0 or scan_inds is None : if_inds = range(len(Reader.IF_set)) if self.feedback > 1 : print "New flags each block:", # Loop over scans and IFs for thisscan in scan_inds : for thisIF in if_inds : Data = Reader.read(thisscan, thisIF) SubData = SubReader.read(thisscan, thisIF) n_flags = ma.count_masked(Data.data) # Now do the flagging. flag(Data, SubData, params['thres']) Data.add_history("Reflaged for outliers.", ("Used file: " + utils.abbreviate_file_path(sub_input_fname),)) SubData.add_history("Reflaged for outliers.") Writer.add_data(Data) SubWriter.add_data(SubData) # Report the numbe of new flags. n_flags = ma.count_masked(Data.data) - n_flags if self.feedback > 1 : print n_flags, if self.feedback > 1 : print '' # Finally write the data back to file. utils.mkparents(output_fname) utils.mkparents(sub_output_fname) Writer.write(output_fname) SubWriter.write(sub_output_fname)
def updateData(self, array = Array(sp.zeros((0,2)),scale=[0,0], Type = 0), action = 1, Type = None): """ Запис в тимчасовий файл даних з масиву action = {-1, 0, 1, 2} -1 : undo 0 : reset 1 : add """ if Type is None: if sp.any(array): Type = array.Type #print(sp.shape(array),array.Type) emit = False #print(len(self.dataStack[Type]),action) # Запис в історію if action == 1: if sp.any(array) and sp.shape(array)[1] == 2 and sp.shape(array)[0] > 1: self.dataStack[Type].append(array) emit = True else: print('updateData: arrayError',sp.any(array) , sp.shape(array)[1] == 2 , sp.shape(array)[0] > 1) # Видалення останнього запису elif action == -1 and len(self.dataStack[Type])>=2: self.dataStack[Type].pop() emit = True #self.setActiveLogScale( Type) # Скидання історії, або запис першого елемента історії elif action == 0: print(0) if sp.any(array) and sp.shape(array)[1] == 2 and sp.shape(array)[0] > 1 and len(self.dataStack[Type])>=1: self.dataStack[Type][0:] = [] self.dataStack[Type].append(array) emit = True if not sp.any(array) and len(self.dataStack[Type])>=2: self.dataStack[Type][1:] = [] emit = True #self.setActiveLogScale( Type) else: print("updateData: Error0",len(self.dataStack[Type])) print(sp.shape(self.getData(Type))) try: for i in self.dataStack[Type]: print(i.scaleX, i.scaleY, i.shape) except: pass # Емітувати повідомлення про зміу даних if emit: self.data_signal.emit(Type, action) self.Plot(self.getData(Type) )
def test_ransohoff_snapoff_verts(self): ws = op.Workspace() ws.clear() bp = sp.array([[0.25, 0.25, 0.25], [0.25, 0.75, 0.25], [0.75, 0.25, 0.25], [0.75, 0.75, 0.25], [0.25, 0.25, 0.75], [0.25, 0.75, 0.75], [0.75, 0.25, 0.75], [0.75, 0.75, 0.75]]) scale = 1e-4 sp.random.seed(1) p = (sp.random.random([len(bp), 3])-0.5)/1000 bp += p fiber_rad = 2e-6 bp = op.topotools.reflect_base_points(bp, domain_size=[1, 1, 1]) prj = op.materials.VoronoiFibers(fiber_rad=fiber_rad, resolution=1e-6, shape=[scale, scale, scale], points=bp*scale, name='test') net = prj.network del_geom = prj.geometries()['test_del'] vor_geom = prj.geometries()['test_vor'] f = op.models.physics.capillary_pressure.ransohoff_snap_off water = op.phases.GenericPhase(network=net) water['pore.surface_tension'] = 0.072 water['pore.contact_angle'] = 45 phys1 = op.physics.GenericPhysics(network=net, geometry=del_geom, phase=water) phys1.add_model(propname='throat.snap_off', model=f, wavelength=fiber_rad) phys1.add_model(propname='throat.snap_off_pair', model=f, wavelength=fiber_rad, require_pair=True) phys2 = op.physics.GenericPhysics(network=net, geometry=vor_geom, phase=water) phys2.add_model(propname='throat.snap_off', model=f, wavelength=fiber_rad) phys2.add_model(propname='throat.snap_off_pair', model=f, wavelength=fiber_rad, require_pair=True) ts = ~net['throat.interconnect'] assert ~sp.any(sp.isnan(water['throat.snap_off'][ts])) assert sp.any(sp.isnan(water['throat.snap_off_pair'][ts])) assert sp.any(~sp.isnan(water['throat.snap_off_pair'][ts]))
def test_ransohoff_snapoff_verts(self): ws = op.Workspace() ws.clear() bp = sp.array([[0.25, 0.25, 0.25], [0.25, 0.75, 0.25], [0.75, 0.25, 0.25], [0.75, 0.75, 0.25], [0.25, 0.25, 0.75], [0.25, 0.75, 0.75], [0.75, 0.25, 0.75], [0.75, 0.75, 0.75]]) scale = 1e-4 sp.random.seed(1) p = (sp.random.random([len(bp), 3]) - 0.5) / 1000 bp += p fiber_rad = 2e-6 bp = op.topotools.reflect_base_points(bp, domain_size=[1, 1, 1]) prj = op.materials.VoronoiFibers(fiber_rad=fiber_rad, resolution=1e-6, shape=[scale, scale, scale], points=bp * scale, name='test') net = prj.network del_geom = prj.geometries()['test_del'] vor_geom = prj.geometries()['test_vor'] f = op.models.physics.capillary_pressure.ransohoff_snap_off water = op.phases.GenericPhase(network=net) water['pore.surface_tension'] = 0.072 water['pore.contact_angle'] = 45 phys1 = op.physics.GenericPhysics(network=net, geometry=del_geom, phase=water) phys1.add_model(propname='throat.snap_off', model=f, wavelength=fiber_rad) phys1.add_model(propname='throat.snap_off_pair', model=f, wavelength=fiber_rad, require_pair=True) phys2 = op.physics.GenericPhysics(network=net, geometry=vor_geom, phase=water) phys2.add_model(propname='throat.snap_off', model=f, wavelength=fiber_rad) phys2.add_model(propname='throat.snap_off_pair', model=f, wavelength=fiber_rad, require_pair=True) ts = ~net['throat.interconnect'] assert ~sp.any(sp.isnan(water['throat.snap_off'][ts])) assert sp.any(sp.isnan(water['throat.snap_off_pair'][ts])) assert sp.any(~sp.isnan(water['throat.snap_off_pair'][ts]))
def affine_transform(input, matrix, shift=None, offset=None, interptype=InterpolationType.CATMULL_ROM_CUBIC_SPLINE, fill=None): """ Applies an affine transformation to an image. This is the forward transform so that (conceptually):: idx = scipy.array((i,j,k), dtype="int32") tidx = matrix.dot((idx-offset).T) + offset + shift out[tuple(tidx)] = input[tuple(idx)] This differs from the :func:`scipy.ndimage.interpolation.affine_transform` function which does (conceptually, ignoring shift):: idx = scipy.array((i,j,k), dtype="int32") tidx = matrix.dot((idx-offset).T) + offset out[tuple(idx)] = input[tuple(tidx)] :type input: :obj:`mango.Dds` :param input: Image to be transformed. :type matrix: :obj:`numpy.ndarray` :param matrix: A :samp:`(3,3)` shaped affine transformation matrix. :type shift: 3-sequence :param shift: The translation (number of voxels), can be :obj:`float` elements. :type offset: 3-sequence :param offset: The centre-point of the affine transformation (relative to input.origin). If :samp:`None`, the centre of the image is used as the centre of affine transformation. Elements can be :obj:`float`. :type interptype: :obj:`mango.image.InterpolationType` :param interptype: Interpolation type. :type fill: numeric :param fill: The value used for elements outside the image-domain. If :samp:`None` uses the :samp:`input.mtype.maskValue()` or :samp:`0` if there is no :samp:`input.mtype` attribute. :rtype: :obj:`mango.Dds` :return: Affine-transformed :obj:`mango.Dds` image. """ if (_mango_reg_core is None): raise Exception("This mango build has not been compiled with registration support.") if (sp.any(input.md.getVoxelSize() <= 0)): raise Exception("Non-positive voxel size (%s) found in input, affine_transform requires positive voxel size to be set." % (input.md.getVoxelSize(),)) if (fill is None): fill = 0 if (hasattr(input,"mtype") and (input.mtype != None)): fill = input.mtype.maskValue() if (offset is None): # Set the default offset to be the centre of the image. offset = sp.array(input.shape, dtype="float64")*0.5 # Convert from relative offset value to absolute global coordinate. centre = sp.array(offset, dtype="float64") + input.origin mangoFilt = _mango_reg_core._TransformApplier(matrix, centre, shift, interptype, fill) filt = _DdsMangoFilterApplier(mangoFilt) ###trnsDds = filt(input, mode=mode, cval=cval) trnsDds = filt(input, mode="constant", cval=fill) return trnsDds
def geodesic(x, v, tmax, func, jacobian, Avv, args = (), lam = 0, dtd = None, rtol = 1e-6, atol = 1e-6, maxsteps = 500, callback = None): N = len(x) y = scipy.empty((2*N,)) y[:N] = x[:] y[N:] = v[:] if dtd is None: dtd = scipy.eye(N) j = jacobian(x,*args) M,N = j.shape Acc = scipy.empty((M,)) r = ode(geodesic_rhs_ode,jac=None).set_f_params(lam, dtd, func, jacobian, Avv, args, j, Acc).set_integrator('vode',atol = atol, rtol=rtol).set_initial_value(y,0.0) steps = 0 xs = [] vs = [] ts = [] stop = False while r.successful() and steps < maxsteps and r.t < tmax and not(scipy.any(scipy.isnan(r.y))) and not stop: try: r.integrate(tmax,step = 1) xs.append(r.y[:N]) vs.append(r.y[N:]) ts.append(r.t) steps += 1 if callback is not None: stop = callback(r.y[:N], r.y[N:], r.t, j, Acc, dtd) except: stop = True return scipy.array(xs), scipy.array(vs), scipy.array(ts)
def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # load input image data_input, header_input = load(args.input) # transform to uin8 data_input = data_input.astype(scipy.uint8) # reduce to 3D, if larger dimensionality if data_input.ndim > 3: for _ in range(data_input.ndim - 3): data_input = data_input[...,0] # iter over slices (2D) until first with content is detected for plane in data_input: if scipy.any(plane): # set pixel spacing spacing = list(header.get_pixel_spacing(header_input)) spacing = spacing[1:3] __update_header_from_array_nibabel(header_input, plane) header.set_pixel_spacing(header_input, spacing) # save image save(plane, args.output, header_input, args.force) break logger.info("Successfully terminated.")
def execute(self): self.power_mat, self.thermal_expectation = self.full_calculation() n_chan = self.power_mat.shape[1] n_freq = self.power_mat.shape[0] # Calculate the the mean channel correlations at low frequencies. low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1, :, :], 0).real # Factorize it into preinciple components. e, v = linalg.eigh(low_f_mat) self.low_f_mode_values = e # Make sure the eigenvalues are sorted. if sp.any(sp.diff(e) < 0): raise RuntimeError("Eigenvalues not sorted.") self.low_f_modes = v # Now subtract out the noisiest channel modes and see what is left. n_modes_subtract = 10 mode_subtracted_power_mat = sp.copy(self.power_mat.real) mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq)) for ii in range(n_modes_subtract): mode = v[:, -ii] amp = sp.sum(mode[:, None] * mode_subtracted_power_mat, 1) amp = sp.sum(amp * mode, 1) to_subtract = amp[:, None, None] * mode[:, None] * mode mode_subtracted_power_mat -= to_subtract auto_power = mode_subtracted_power_mat.view() auto_power.shape = (n_freq, n_chan**2) auto_power = auto_power[:, ::n_chan + 1] mode_subtracted_auto_power[ii, :] = sp.mean(auto_power, -1) self.subtracted_auto_power = mode_subtracted_auto_power
def generate_matrices(model, col_map, c, r, m, t): noBrPointsPerEpoch = model.nbreakpoints nleaves = model.nleaves all_time_breakpoints, time_breakpoints = default_bps(model, c, r, t) M = [] for e in xrange(len(noBrPointsPerEpoch)): newM = identity(nleaves) newM[:] = m[e] M.append(newM) pi, T, E = model.run(r, c, time_breakpoints, M, col_map=col_map) assert not any(isnan(pi)) assert not any(isnan(T)) assert not any(isnan(E)) return pi, T, array(E)
def _initMean(self, Y, F=None, tol=1e-6): """ initialize the mean term Args: F: sample design of the fixed effect """ if F is not None: R = LA.qr(F, mode='r')[0][:F.shape[1], :] I = (abs(R.diagonal()) > tol) if SP.any(~I): warnings.warn( 'cols ' + str(SP.where(~I)[0]) + ' have been removed because linearly dependent on the others' ) self.F = F[:, I] else: self.F = None #dimensions self.N, self.P = Y.shape #get F and Y self.Y = Y # build mean self.mean = mean(Y) if F is not None: A = SP.eye(self.P) self.mean.addFixedEffect(F=self.F, A=A)
def makehist(testpath, npulses): """ This functions are will create histogram from data made in the testpath. Inputs testpath - The path that the data is located. npulses - The number of pulses in the sim. """ sns.set_style("whitegrid") sns.set_context("notebook") params = ['Ne', 'Te', 'Ti', 'Vi'] pvals = [1e11, 2.1e3, 1.1e3, 0.] histlims = [[4e10, 2e11], [1200., 3000.], [300., 1900.], [-250., 250.]] erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., -800], [-250., 250.]] erperlims = [[-100., 100.]] * 4 lims_list = [histlims, erlims, erperlims] errdict = makehistdata(params, testpath)[:4] ernames = ['Data', 'Error', 'Error Percent'] sig1 = sp.sqrt(1. / npulses) # Two dimensiontal histograms pcombos = [i for i in itertools.combinations(params, 2)] c_rows = int(math.ceil(float(len(pcombos)) / 2.)) (figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows * 6), facecolor='w') axvec = axmat.flatten() for icomn, icom in enumerate(pcombos): curax = axvec[icomn] str1, str2 = icom _, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax) filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist')) plt.tight_layout() plt.subplots_adjust(top=0.95) figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20) fname = filetemplate + '_{0:0>5}Pulses.png'.format(npulses) plt.savefig(fname) plt.close(figmplf) # One dimensiontal histograms for ierr, iername in enumerate(ernames): filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername)) (figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w') axvec = axmat.flatten() for ipn, iparam in enumerate(params): plt.sca(axvec[ipn]) if sp.any(sp.isinf(errdict[ierr][iparam])): continue binlims = lims_list[ierr][ipn] bins = sp.linspace(binlims[0], binlims[1], 100) histhand = sns.distplot(errdict[ierr][iparam], bins=bins, kde=True, rug=False) axvec[ipn].set_title(iparam) figmplf.suptitle(iername + ' Pulses: {0}'.format(npulses), fontsize=20) fname = filetemplate + '_{0:0>5}Pulses.png'.format(npulses) plt.savefig(fname) plt.close(figmplf)
def has_complex(arr): try: imag = arr.imag except AttributeError: return False else: return sp.any(imag != 0)
def refractory_correct_SpikeTrain(SpikeTrain, ref_per=2 * pq.ms): """ checks for spike duplets in the SpikeTrain, removes the latter if both spikes are closer in time than a refractory period. Args: SpikeTrain (neo.core.SpikeTrain): the SpikeTrain ref_per (quantities.Quantity): the refractory period - minimum time between two spikes Returns: neo.core.SpikeTrain: the corrected SpikeTrain """ ind = 0 next_ind = 0 good_inds = [] try: while sp.any( sp.argmax(SpikeTrain.times - SpikeTrain.times[ind] > ref_per)): next_ind = sp.argmax( SpikeTrain.times - SpikeTrain.times[ind] > ref_per) good_inds.append(next_ind) ind = next_ind except IndexError: # when empty return SpikeTrain return SpikeTrain[good_inds]
def have_same_subd_decomp(dds0, dds1): """ Returns :samp:`True` if pairs of non-halo-sub-domains on all processes have the same (global) non-halo-sub-domain origin index and same non-halo-sub-domain shape. Note: performs an MPI *allreduce* operation. :type dds0: :obj:`mango.Dds` :param dds0: Array. :type dds1: :obj:`mango.Dds` :param dds1: Array. :rtype: :obj:`bool` :return: :samp:`True` if MPI non-halo-subdomain decomposition is the same for :samp:`dds0` and :samp:`dds1`. """ numDiff = 0 if (sp.any(sp.logical_or(dds0.subd.origin != dds1.subd.origin, (dds0.subd.shape != dds1.subd.shape)))): numDiff = 1 mpiComm = None if (hasattr(dds0, "mpi") and hasattr(dds0.mpi, "comm") and (dds0.mpi.comm != None)): mpiComm = dds0.mpi.comm numDiff = mpiComm.allreduce(numDiff, op=mango.mpi.SUM) return (numDiff == 0)
def is_symmetric(a, rtol=1e-10): r""" Is ``a`` a symmetric matrix? Parameters ---------- a : ndarray, sparse matrix Object to check for being a symmetric matrix. rtol : float Relative tolerance with respect to the smallest entry in ``a`` that is used to determine if ``a`` is symmetric. Returns ------- bool ``True`` if ``a`` is a symmetric matrix, ``False`` otherwise. """ if type(a) != _sp.ndarray and not _sp.sparse.issparse(a): raise Exception("'a' must be either a sparse matrix or an ndarray.") if a.shape[0] != a.shape[1]: raise Exception("'a' must be a square matrix.") atol = _sp.amin(_sp.absolute(a.data)) * rtol if _sp.sparse.issparse(a): issym = False if ((a - a.T) > atol).nnz else True elif type(a) == _sp.ndarray: issym = False if _sp.any((a - a.T) > atol) else True return issym
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c): """Dispersion relation for the extraordinary wave. NOTE See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous coupled-wave analysis of anisotropic grating diffraction", JOSA A, 7(8), 1990 Always give positive real or negative imaginary. """ if kx.shape != ky.shape or c.size != 3: raise ValueError('kx and ky must have the same length and c must have 3 components') kz = S.empty_like(kx) for ii in xrange(0, kx.size): alpha = nE**2 - nO**2 beta = kx[ii]/k * c[0] + ky[ii]/k * c[1] # coeffs C = S.array([nO**2 + c[2]**2 * alpha, \ 2. * c[2] * beta * alpha, \ nO**2 * (kx[ii]**2 + ky[ii]**2) / k**2 + alpha * beta**2 - nO**2 * nE**2]) # two solutions of type +x or -x, purely real or purely imag tmp_kz = k * S.roots(C) # get the negative imaginary part or the positive real one if S.any(S.isreal(tmp_kz)): kz[ii] = S.absolute(tmp_kz[0]) else: kz[ii] = -1j * S.absolute(tmp_kz[0]) return kz
def main(testpath,npulse = 1400 ,functlist = ['spectrums','radardata','fitting','analysis']): """ This function will call other functions to create the input data, config file and run the radar data sim. The path for the simulation will be created in the Testdata directory in the SimISR module. The new folder will be called BasicTest. The simulation is a long pulse simulation will the desired number of pulses from the user. Inputs npulse - Number of pulses for the integration period, default==100. functlist - The list of functions for the SimISR to do. """ curloc = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) if not os.path.isdir(testpath): os.mkdir(testpath) functlist_default = ['spectrums','radardata','fitting'] check_list = sp.array([i in functlist for i in functlist_default]) check_run =sp.any( check_list) functlist_red = sp.array(functlist_default)[check_list].tolist() configfilesetup(testpath,npulse) config = os.path.join(testpath,'stats.ini') (sensdict,simparams) = readconfigfile(config) makedata(testpath,simparams['Tint']) if check_run : runsim(functlist_red,testpath,config,True) if 'analysis' in functlist: analysisdump(testpath,config)
def dataListener(self,Type, action): """Обробка зміни даних""" Buttons = ( ('cUndo', 'cReset'), ('sUndo', 'sReset'), ('rUndo', 'rReset')) Types = ['c','s','r'] active = self.getData(Type) self.mprint("dataChanged: scaleX : %d, scaleY : %d, type : %d, len : %d, action : %d" %\ (active.scaleX, active.scaleY ,active.Type, sp.shape(active)[0],action)) #for i in self.dataStack[Type]: # print(i.scale) if sp.any(active): #intervalCheck = ['cAutoInterval', 'sAutoInterval', 'rAutoInterval'] b_splineSCheck = ['cAutoB_splineS', 'sAutoB_splineS', 'rAutoB_splineS'] #intervalObj = self.findChild(QtGui.QCheckBox,intervalCheck[Type]) b_splineSObj = self.findChild(QtGui.QCheckBox,b_splineSCheck[Type]) #self.AutoInterval(intervalObj.checkState(), isSignal = False, senderType = Type) if getattr(self.ui,Types[Type] + 'AllSliceConcat').currentIndex() == 0: getattr(self.ui,Types[Type] + 'Start').setValue(active[:,0].min()) getattr(self.ui,Types[Type] + 'End').setValue(active[:,0].max()) self.AutoB_splineS(b_splineSObj.checkState(), isSignal = False, senderType = Type ) ##### Undo/Reset hist = self.dataStack[Type] state = False if len(hist)>=2: state = True buttons = self.findChilds(QtGui.QPushButton,Buttons[Type]) buttons[0].setEnabled(state) buttons[1].setEnabled(state)
def main(npulse=100, functlist=['spectrums', 'radardata', 'fitting', 'analysis']): """ This function will call other functions to create the input data, config file and run the radar data sim. The path for the simulation will be created in the Testdata directory in the SimISR module. The new folder will be called BasicTest. The simulation is a long pulse simulation will the desired number of pulses from the user. Inputs npulse - Number of pulses for the integration period, default==100. functlist - The list of functions for the SimISR to do. """ curloc = Path(__file__).resolve() testpath = curloc.parent.parent / 'Testdata' / 'BasicTest' if not testpath.is_dir(): testpath.mkdir(parents=True) functlist_default = ['spectrums', 'radardata', 'fitting'] check_list = sp.array([i in functlist for i in functlist_default]) check_run = sp.any(check_list) functlist_red = sp.array(functlist_default)[check_list].tolist() configfilesetup(str(testpath), npulse) config = testpath.joinpath('stats.ini') (sensdict, simparams) = readconfigfile(str(config)) makedata(testpath, simparams['Tint']) if check_run: runsim(functlist_red, str(testpath), config, True) if 'analysis' in functlist: analysisdump(str(testpath), config)
def isequal(A,B,tol=1e-15): """Determines if two qobj objects are equal to within given tolerance. Parameters ---------- A : qobj Qobj one B : qobj Qobj two tol : float Tolerence for equality to be valid Returns ------- isequal : bool True if qobjs are equal, False otherwise. """ if A.dims!=B.dims: return False else: Adat=A.data Bdat=B.data elems=(Adat-Bdat).data if any(abs(elems)>tol): return False else: return True
def delayedsignalF(x, t0_pts): #============================================================== """ Delay a signal with a non integer value (computation in frequency domain) Synopsis: y=delayedsignalF(x,t0_pts) Inputs: x vector of length N t0_pts is a REAL delay expressed wrt the sampling time Ts=1: t0_pts = 1 corresponds to one time dot t0_pts may be positive, negative, non integer t0_pts>0: shift to the right t0_pts<0: shift to the left Rk: the length of FFT is 2^(nextpow2(N)+1 """ # # M. Charbit, Jan. 2010 #============================================================== N = len(x) p = ceil(log2(N)) + 1 Lfft = int(2.0**p) Lffts2 = Lfft / 2 fftx = fft(x, Lfft) ind = concatenate((range(Lffts2 + 1), range(Lffts2 + 1 - Lfft, 0)), axis=0) fftdelay = exp(-2j * pi * t0_pts * ind / Lfft) fftdelay[Lffts2] = real(fftdelay[Lffts2]) ifftdelay = ifft(fftx * fftdelay) y = ifftdelay[range(N)] if isreal(any(x)): y = real(y) return y
def computeCovarianceMatrixPython(out_dir, bfile, cfile, sim_type='RRM'): print "Using python to create covariance matrix. This might be slow. We recommend using plink instead." if sim_type is not 'RRM': raise Exception('sim_type %s is not known' % sim_type) """ loading data """ data = plink_reader.readBED(bfile, useMAFencoding=True) iid = data['iid'] X = data['snps'] N = X.shape[1] print '%d variants loaded.' % N print '%d people loaded.' % X.shape[0] """ normalizing markers """ print 'Normalizing SNPs...' p_ref = X.mean(axis=0) / 2. X -= 2 * p_ref with warnings.catch_warnings(): warnings.simplefilter("ignore") X /= sp.sqrt(2 * p_ref * (1 - p_ref)) hasNan = sp.any(sp.isnan(X), axis=0) print '%d SNPs have a nan entry. Exluding them for computing the covariance matrix.' % hasNan.sum( ) """ computing covariance matrix """ print 'Computing relationship matrix...' K = sp.dot(X[:, ~hasNan], X[:, ~hasNan].T) K /= 1. * N print 'Relationship matrix calculation complete' print 'Relationship matrix written to %s.cov.' % cfile print 'IDs written to %s.cov.id.' % cfile """ saving to output """ np.savetxt(cfile + '.cov', K, delimiter='\t', fmt='%.6f') np.savetxt(cfile + '.cov.id', iid, delimiter=' ', fmt='%s')
def _get_Voronoi_edges(vor): r""" Given a Voronoi object as produced by the scipy.spatial.Voronoi class, this function calculates the start and end points of eeach edge in the Voronoi diagram, in terms of the vertex indices used by the received Voronoi object. Parameters ---------- vor : scipy.spatial.Voronoi object Returns ------- A 2-by-N array of vertex indices, indicating the start and end points of each vertex in the Voronoi diagram. These vertex indices can be used to index straight into the ``vor.vertices`` array to get spatial positions. """ edges = [[], []] for facet in vor.ridge_vertices: # Create a closed cycle of vertices that define the facet edges[0].extend(facet[:-1]+[facet[-1]]) edges[1].extend(facet[1:]+[facet[0]]) edges = sp.vstack(edges).T # Convert to scipy-friendly format mask = sp.any(edges == -1, axis=1) # Identify edges at infinity edges = edges[~mask] # Remove edges at infinity edges = sp.sort(edges, axis=1) # Move all points to upper triangle # Remove duplicate pairs edges = edges[:, 0] + 1j*edges[:, 1] # Convert to imaginary edges = sp.unique(edges) # Remove duplicates edges = sp.vstack((sp.real(edges), sp.imag(edges))).T # Back to real edges = sp.array(edges, dtype=int) return edges
def isherm(Q): """Determines if given operator is Hermitian. Parameters ---------- Q : qobj Quantum object Returns ------- isherm : bool True if operator is Hermitian, False otherwise. Examples -------- >>> a=destroy(4) >>> isherm(a) False """ if Q.dims[0] != Q.dims[1]: return False else: dat = Q.data elems = (dat.transpose().conj() - dat).data if any(abs(elems) > 1e-15): return False else: return True
def ismember(element, array, rows=False): """Check if element is member of array""" if rows: return sp.any([sp.all(array[x, :] == element) for x in range(array.shape[0])]) else: return sp.all([element[i] in array for i in element.shape[0]])
def isherm(Q): """Determines if given operator is Hermitian. Parameters ---------- Q : qobj Quantum object Returns ------- isherm : bool True if operator is Hermitian, False otherwise. Examples -------- >>> a=destroy(4) >>> isherm(a) False """ if Q.dims[0]!=Q.dims[1]: return False else: dat=Q.data elems=(dat.transpose().conj()-dat).data if any(abs(elems)>1e-15): return False else: return True
def main(npulse=100, functlist=['spectrums', 'radardata', 'fitting', 'analysis'],radar='pfisr'): """ This function will call other functions to create the input data, config file and run the radar data sim. The path for the simulation will be created in the Testdata directory in the SimISR module. The new folder will be called BasicTest. The simulation is a long pulse simulation will the desired number of pulses from the user. Inputs npulse - Number of pulses for the integration period, default==100. functlist - The list of functions for the SimISR to do. """ curloc = Path(__file__).resolve() testpath = curloc.parent.parent/'Testdata'/'BasicTest' if not testpath.is_dir(): testpath.mkdir(parents=True) functlist_default = ['spectrums', 'radardata', 'fitting'] check_list = sp.array([i in functlist for i in functlist_default]) check_run = sp.any(check_list) functlist_red = sp.array(functlist_default)[check_list].tolist() config = testpath.joinpath('stats.yml') if not config.exists(): configfilesetup(str(testpath), npulse, radar) (_, simparams) = readconfigfile(str(config)) makedata(testpath, simparams['Tint']) if check_run: runsim(functlist_red, str(testpath), config, True) if 'analysis' in functlist: analysisdump(str(testpath), config)
def compactness(target, throat_perimeter='throat.perimeter', throat_area='throat.area'): r""" Mortensen et al. have shown that the Hagen-Poiseuille hydraluic resistance is linearly dependent on the compactness. Defined as perimeter^2/area. The dependence is not universal as shapes with sharp corners provide more resistance than those that are more elliptical. Count the number of vertices and apply the right correction. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_perimeter : string The dictionary key of the array containing the throat perimeter values. throat_area : string The dictionary key of the array containing the throat area values. Returns ------- alpha : NumPy ndarray Array containing throat compactness values. References ---------- Mortensen N.A, Okkels F., and Bruus H. Reexamination of Hagen-Poiseuille flow: Shape dependence of the hydraulic resistance in microchannels. Physical Review E, v.71, pp.057301 (2005). """ # Only apply to throats with an area ts = target.throats()[target[throat_area] > 0] P = target[throat_perimeter] A = target[throat_area] C = _sp.ones(target.num_throats()) C[ts] = P[ts]**2 / A[ts] alpha = _sp.ones_like(C) * 8 * _sp.pi if 'throat.offset_vertices' in target.props(): verts = target['throat.offset_vertices'] for i in ts: if ~_sp.any(_sp.isnan(verts[i])): if len(verts[i]) == 3: # Triangular Correction alpha[i] = C[i] * (25 / 17) + (40 * _sp.sqrt(3) / 17) elif len(verts[i]) == 4: # Rectangular Correction alpha[i] = C[i] * (22 / 7) - (65 / 3) elif len(verts[i]) > 4: # Approximate Elliptical Correction alpha[i] = C[i] * (8 / 3) - (8 * _sp.pi / 3) # For a perfect circle alpha = 8*pi so normalize by this alpha /= 8 * _sp.pi # Very small throats could have values less than one alpha[alpha < 1.0] = 1.0 return alpha
def test_late_pore_and_throat_filling(self): mip = op.algorithms.Porosimetry(network=self.net) mip.setup(phase=self.hg) mip.set_inlets(pores=self.net.pores('left')) # Run without late pore filling mip.run() data_no_lpf = mip.get_intrusion_data() # Now run with late pore filling self.phys['pore.pc_star'] = 2/self.net['pore.diameter'] self.phys.add_model(propname='pore.partial_filling', pressure='pore.pressure', Pc_star='pore.pc_star', model=op.models.physics.multiphase.late_filling) mip.reset() mip.set_inlets(pores=self.net.pores('left')) mip.set_partial_filling(propname='pore.partial_filling') mip.run() self.phys.regenerate_models() data_w_lpf = mip.get_intrusion_data() assert sp.all(sp.array(data_w_lpf.Snwp) < sp.array(data_no_lpf.Snwp)) # Now run with late throat filling self.phys['throat.pc_star'] = 2/self.net['throat.diameter'] self.phys.add_model(propname='throat.partial_filling', pressure='throat.pressure', Pc_star='throat.pc_star', model=op.models.physics.multiphase.late_filling) mip.reset() mip.set_inlets(pores=self.net.pores('left')) mip.set_partial_filling(propname='throat.partial_filling') mip.run() data_w_ltf = mip.get_intrusion_data() assert sp.any(sp.array(data_w_ltf.Snwp) < sp.array(data_w_lpf.Snwp))
def execute(self): self.power_mat, self.thermal_expectation = self.full_calculation() n_chan = self.power_mat.shape[1] n_freq = self.power_mat.shape[0] # Calculate the the mean channel correlations at low frequencies. low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real # Factorize it into preinciple components. e, v = linalg.eigh(low_f_mat) self.low_f_mode_values = e # Make sure the eigenvalues are sorted. if sp.any(sp.diff(e) < 0): raise RuntimeError("Eigenvalues not sorted.") self.low_f_modes = v # Now subtract out the noisiest channel modes and see what is left. n_modes_subtract = 10 mode_subtracted_power_mat = sp.copy(self.power_mat.real) mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq)) for ii in range(n_modes_subtract): mode = v[:,-ii] amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1) amp = sp.sum(amp * mode, 1) to_subtract = amp[:,None,None] * mode[:,None] * mode mode_subtracted_power_mat -= to_subtract auto_power = mode_subtracted_power_mat.view() auto_power.shape = (n_freq, n_chan**2) auto_power = auto_power[:,::n_chan + 1] mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1) self.subtracted_auto_power = mode_subtracted_auto_power
def isequal(A, B, tol=1e-15): """Determines if two qobj objects are equal to within given tolerance. Parameters ---------- A : qobj Qobj one B : qobj Qobj two tol : float Tolerence for equality to be valid Returns ------- isequal : bool True if qobjs are equal, False otherwise. """ if A.dims != B.dims: return False else: Adat = A.data Bdat = B.data elems = (Adat - Bdat).data if any(abs(elems) > tol): return False else: return True
def delayedsignalF(x,t0_pts): #============================================================== """ Delay a signal with a non integer value (computation in frequency domain) Synopsis: y=delayedsignalF(x,t0_pts) Inputs: x vector of length N t0_pts is a REAL delay expressed wrt the sampling time Ts=1: t0_pts = 1 corresponds to one time dot t0_pts may be positive, negative, non integer t0_pts>0: shift to the right t0_pts<0: shift to the left Rk: the length of FFT is 2^(nextpow2(N)+1 """ # # M. Charbit, Jan. 2010 #============================================================== N = len(x) p = ceil(log2(N))+1; Lfft = int(2.0**p); Lffts2 = Lfft/2; fftx = fft(x, Lfft); ind = concatenate((range(Lffts2+1), range(Lffts2+1-Lfft,0)),axis=0) fftdelay = exp(-2j*pi*t0_pts*ind/Lfft); fftdelay[Lffts2] = real(fftdelay[Lffts2]); ifftdelay = ifft(fftx*fftdelay); y = ifftdelay[range(N)]; if isreal(any(x)): y=real(y) return y
def straight(network, geometry, pore_diameter='pore.diameter', L_negative = 1e-9, **kwargs): r""" Calculate throat length Parameters ---------- L_negative : float The default throat length to use when negative lengths are found. The default is 1 nm. To accept negative throat lengths, set this value to ``None``. """ #Initialize throat_property['length'] throats = network.throats(geometry.name) pore1 = network['throat.conns'][:,0] pore2 = network['throat.conns'][:,1] C1 = network['pore.coords'][pore1] C2 = network['pore.coords'][pore2] E = _sp.sqrt(_sp.sum((C1-C2)**2,axis=1)) #Euclidean distance between pores D1 = network[pore_diameter][pore1] D2 = network[pore_diameter][pore2] value = E-(D1+D2)/2. value = value[throats] if _sp.any(value<0) and (L_negative is not None): print('Negative throat lengths are calculated. Arbitrary positive length assigned: '+str(L_negative)) Ts = _sp.where(value<0)[0] value[Ts] = L_negative return value
def test_late_pore_and_throat_filling(self): mip = op.algorithms.Porosimetry(network=self.net) mip.setup(phase=self.hg) mip.set_inlets(pores=self.net.pores('left')) # Run without late pore filling mip.run() data_no_lpf = mip.get_intrusion_data() # Now run with late pore filling self.phys['pore.pc_star'] = 2 / self.net['pore.diameter'] self.phys.add_model(propname='pore.partial_filling', pressure='pore.pressure', Pc_star='pore.pc_star', model=op.models.physics.multiphase.late_filling) mip.reset() mip.set_inlets(pores=self.net.pores('left')) mip.set_partial_filling(propname='pore.partial_filling') mip.run() self.phys.regenerate_models() data_w_lpf = mip.get_intrusion_data() assert sp.all(sp.array(data_w_lpf.Snwp) < sp.array(data_no_lpf.Snwp)) # Now run with late throat filling self.phys['throat.pc_star'] = 2 / self.net['throat.diameter'] self.phys.add_model(propname='throat.partial_filling', pressure='throat.pressure', Pc_star='throat.pc_star', model=op.models.physics.multiphase.late_filling) mip.reset() mip.set_inlets(pores=self.net.pores('left')) mip.set_partial_filling(propname='throat.partial_filling') mip.run() data_w_ltf = mip.get_intrusion_data() assert sp.any(sp.array(data_w_ltf.Snwp) < sp.array(data_w_lpf.Snwp))
def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False): """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`. Parameters ---------- Xi : :py:class:`Matrix` or other Array-like, (`M`, `D`) `M` inputs with dimension `D`. Xj : :py:class:`Matrix` or other Array-like, (`M`, `D`) `M` inputs with dimension `D`. ni : :py:class:`Matrix` or other Array-like, (`M`, `D`) `M` derivative orders for set `i`. nj : :py:class:`Matrix` or other Array-like, (`M`, `D`) `M` derivative orders for set `j`. hyper_deriv : Non-negative int or None, optional The index of the hyperparameter to compute the first derivative with respect to. If None, no derivatives are taken. Hyperparameter derivatives are not supported at this point. Default is None. symmetric : bool Whether or not the input `Xi`, `Xj` are from a symmetric matrix. Default is False. Returns ------- Kij : :py:class:`Array`, (`M`,) Covariances for each of the `M` `Xi`, `Xj` pairs. Raises ------ NotImplementedError If the `hyper_deriv` keyword is not None. """ if hyper_deriv is not None: raise NotImplementedError( "Hyperparameter derivatives have not been implemented!") if scipy.any(scipy.sum(ni, axis=1) > 1) or scipy.any( scipy.sum(nj, axis=1) > 1): raise ValueError( "Matern52Kernel only supports 0th and 1st order derivatives") Xi = scipy.asarray(Xi, dtype=scipy.float64) Xj = scipy.asarray(Xj, dtype=scipy.float64) ni = scipy.array(ni, dtype=scipy.int32) nj = scipy.array(nj, dtype=scipy.int32) var = scipy.square(self.params[-self.num_dim:]) value = _matern52(Xi, Xj, ni, nj, var) return self.params[0]**2 * value
def readSRI_h5(fn,params,timelims = None): assert isinstance(params,(tuple,list)) h5fn = Path(fn).expanduser() '''This will read the SRI formated h5 files for RISR and PFISR.''' coordnames = 'Spherical' # Set up the dictionary to find the data pathdict = {'Ne':('/FittedParams/Ne', None), 'dNe':('/FittedParams/Ne',None), 'Vi':('/FittedParams/Fits', (0,3)), 'dVi':('/FittedParams/Errors',(0,3)), 'Ti':('/FittedParams/Fits', (0,1)), 'dTi':('/FittedParams/Errors',(0,1)), 'Te':('/FittedParams/Fits', (-1,1)), 'Ti':('/FittedParams/Errors',(-1,1))} with h5py.File(str(h5fn),'r',libver='latest') as f: # Get the times and time lims times = f['/Time/UnixTime'].value # get the sensor location sensorloc = np.array([f['/Site/Latitude'].value, f['/Site/Longitude'].value, f['/Site/Altitude'].value]) # Get the locations of the data points rng = f['/FittedParams/Range'].value / 1e3 angles = f['/BeamCodes'][:,1:3] nt = times.shape[0] if timelims is not None: times = times[(times[:,0]>= timelims[0]) & (times[:,1]<timelims[1]) ,:] nt = times.shape[0] # allaz, allel corresponds to rng.ravel() allaz = np.tile(angles[:,0],rng.shape[1]) allel = np.tile(angles[:,1],rng.shape[1]) dataloc =np.vstack((rng.ravel(),allaz,allel)).T # Read in the data data = {} with h5py.File(str(h5fn),'r',libver='latest') as f: for istr in params: if not istr in pathdict.keys(): #list() NOT needed logging.error('{} is not a valid parameter name.'.format(istr)) continue curpath = pathdict[istr][0] curint = pathdict[istr][-1] if curint is None: #3-D data tempdata = f[curpath] else: #5-D data -> 3-D data tempdata = f[curpath][:,:,:,curint[0],curint[1]] data[istr] = np.array([tempdata[iT,:,:].ravel() for iT in range(nt)]).T # remove nans from SRI file nanlog = sp.any(sp.isnan(dataloc),1) keeplog = sp.logical_not(nanlog) dataloc = dataloc[keeplog] for ikey in data.keys(): data[ikey]= data[ikey][keeplog] return (data,coordnames,dataloc,sensorloc,times)
def compactness(target, throat_perimeter='throat.perimeter', throat_area='throat.area'): r""" Mortensen et al. have shown that the Hagen-Poiseuille hydraluic resistance is linearly dependent on the compactness. Defined as perimeter^2/area. The dependence is not universal as shapes with sharp corners provide more resistance than those that are more elliptical. Count the number of vertices and apply the right correction. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_perimeter : string The dictionary key of the array containing the throat perimeter values. throat_area : string The dictionary key of the array containing the throat area values. Returns ------- alpha : NumPy ndarray Array containing throat compactness values. References ---------- Mortensen N.A, Okkels F., and Bruus H. Reexamination of Hagen-Poiseuille flow: Shape dependence of the hydraulic resistance in microchannels. Physical Review E, v.71, pp.057301 (2005). """ # Only apply to throats with an area ts = target.throats()[target[throat_area] > 0] P = target[throat_perimeter] A = target[throat_area] C = _sp.ones(target.num_throats()) C[ts] = P[ts]**2/A[ts] alpha = _sp.ones_like(C)*8*_sp.pi if 'throat.offset_vertices' in target.props(): verts = target['throat.offset_vertices'] for i in ts: if ~_sp.any(_sp.isnan(verts[i])): if len(verts[i]) == 3: # Triangular Correction alpha[i] = C[i]*(25/17) + (40*_sp.sqrt(3)/17) elif len(verts[i]) == 4: # Rectangular Correction alpha[i] = C[i]*(22/7) - (65/3) elif len(verts[i]) > 4: # Approximate Elliptical Correction alpha[i] = C[i]*(8/3) - (8*_sp.pi/3) # For a perfect circle alpha = 8*pi so normalize by this alpha /= 8*_sp.pi # Very small throats could have values less than one alpha[alpha < 1.0] = 1.0 return alpha
def test_dump_and_fetch_data(self): proj = self.ws.copy_project(self.proj) proj._dump_data() # Ensure only pore.coords and throat.conns are found assert sum([len(item.props()) for item in proj]) == 2 proj._fetch_data() assert sp.any([len(item.props()) for item in proj]) os.remove(proj.name+'.hdf5')
def test_add_boundary_pores(self): net = op.Network.CubicDual(shape=[5, 5, 5], label_1='primary', label_2='secondary') Ps = net.pores(labels=['surface', 'bottom'], mode='intersection') net.add_boundary_pores(pores=Ps, offset=[0, 0, -0.5]) Ps2 = net.pores(labels=['boundary'], mode='intersection') assert Ps.size == Ps2.size assert ~sp.any(sp.in1d(Ps, Ps2))
def preProcess(self, periodF0 = 0.06, deltaF_div_F0 = True, max_threshold = None, min_threshold = None, nan_to_zeros = True, detrend = False, #~ band_filter = None, gaussian_filter = None, f1 = None, f2 = None, **kargs): images = self.images if deltaF_div_F0: ind = self.t()<=self.t_start+periodF0 m0 = mean(images[ind,:,:] , axis = 0) images = (images-m0)/m0*1000. if max_threshold is not None: #~ images[images>max_threshold] = max_threshold images[images>max_threshold] = nan if min_threshold is not None: #~ images[images<min_threshold] = min_threshold images[images<min_threshold] = nan if nan_to_zeros: images[isnan(images) ] = 0. if detrend and not nan_to_zeros: m = any(isnan(images) , axis = 0) images[isnan(images) ] = 0. images = signal.detrend( images , axis = 0) images[:,m] = nan elif detrend and nan_to_zeros: images = signal.detrend( images , axis = 0) if gaussian_filter is not None: images = ndimage.gaussian_filter( images , (0 , gaussian_filter , gaussian_filter)) if f1 is not None or f2 is not None: from ..computing.filter import fft_passband_filter if f1 is None: f1=0. if f2 is None: f1=inf nq = self.sampling_rate/2. images = fft_passband_filter(images, f_low = f1/nq , f_high = f2/nq , axis = 0) return images
def ex3(exclude=sc.array([1,2,3,4]),plotfilename='ex3.png', bovyprintargs={}): """ex3: solve exercise 3 Input: exclude - ID numbers to exclude from the analysis plotfilename - filename for the output plot Output: plot History: 2009-05-27 - Written - Bovy (NYU) """ #Read the data data= read_data('data_yerr.dat') ndata= len(data) nsample= ndata- len(exclude) #Put the dat in the appropriate arrays and matrices Y= sc.zeros(nsample) A= sc.ones((nsample,3)) C= sc.zeros((nsample,nsample)) yerr= sc.zeros(nsample) jj= 0 for ii in range(ndata): if sc.any(exclude == data[ii][0]): pass else: Y[jj]= data[ii][1][1] A[jj,1]= data[ii][1][0] A[jj,2]= data[ii][1][0]**2. C[jj,jj]= data[ii][2]**2. yerr[jj]= data[ii][2] jj= jj+1 #Now compute the best fit and the uncertainties bestfit= sc.dot(linalg.inv(C),Y.T) bestfit= sc.dot(A.T,bestfit) bestfitvar= sc.dot(linalg.inv(C),A) bestfitvar= sc.dot(A.T,bestfitvar) bestfitvar= linalg.inv(bestfitvar) bestfit= sc.dot(bestfitvar,bestfit) #Now plot the solution plot.bovy_print(**bovyprintargs) #plot bestfit xrange=[0,300] yrange=[0,700] nsamples= 1001 xs= sc.linspace(xrange[0],xrange[1],nsamples) ys= sc.zeros(nsamples) for ii in range(nsamples): ys[ii]= bestfit[0]+bestfit[1]*xs[ii]+bestfit[2]*xs[ii]**2. plot.bovy_plot(xs,ys,'k-',xrange=xrange,yrange=yrange, xlabel=r'$x$',ylabel=r'$y$',zorder=2) #Plot data errorbar(A[:,1],Y,yerr,marker='o',color='k',linestyle='None',zorder=1) #Put in a label with the best fit text(5,30,r'$y = ('+'%4.4f \pm %4.4f)\,x^2 + ( %4.2f \pm %4.2f )\,x+ ( %4.0f\pm %4.0f' % (bestfit[2], m.sqrt(bestfitvar[2,2]),bestfit[1], m.sqrt(bestfitvar[1,1]), bestfit[0],m.sqrt(bestfitvar[0,0]))+r')$') plot.bovy_end_print(plotfilename) return 0
def SRIparams2iono(filename): fullfile = h5file(filename) fullfiledict = fullfile.readWholeh5file() #Size = Nrecords x Nbeams x Nranges x Nions+1 x 4 (fraction, temperature, collision frequency, LOS speed) fits = fullfiledict['/FittedParams']['Fits'] (nt,nbeams,nrng,nspecs,nstuff) = fits.shape nlocs = nbeams*nrng fits = fits.transpose((1,2,0,3,4)) fits = fits.reshape((nlocs,nt,nspecs,nstuff)) # Nrecords x Nbeams x Nranges Ne = fullfiledict['/FittedParams']['Ne'] Ne = Ne.transpose((1,2,0)) Ne = Ne.reshape((nlocs,nt)) param_lists =sp.zeros((nlocs,nt,nspecs,2)) param_lists[:,:,:,0] = fits[:,:,:,0] param_lists[:,:,:,1] = fits[:,:,:,1] param_lists[:,:,-1,0]=Ne Velocity = fits[:,:,0,3] if fullfiledict['/FittedParams']['IonMass']==16: species = ['O+','e-'] pnames = sp.array([['Ni','Ti'],['Ne','Te']]) time= fullfiledict['/Time']['UnixTime'] time = time rng = fullfiledict['/FittedParams']['Range'] bco = fullfiledict['/']['BeamCodes'] angles = bco[:,1:3] (nang,nrg) = rng.shape allang = sp.tile(angles[:,sp.newaxis],(1,nrg,1)) all_loc = sp.column_stack((rng.flatten(),allang.reshape(nang*nrg,2))) lkeep = ~ sp.any(sp.isnan(all_loc),1) all_loc = all_loc[lkeep] Velocity = Velocity[lkeep] param_lists = param_lists[lkeep] all_loc[:,0]=all_loc[:,0]*1e-3 iono1 = IonoContainer(all_loc,param_lists,times=time,ver = 1,coordvecs = ['r','theta','phi'], paramnames = pnames,species=species,velocity=Velocity) # MSIS tn = fullfiledict['/MSIS']['Tn'] tn = tn.transpose((1,2,0)) tn = tn.reshape((nlocs,nt)) startparams = sp.ones((nlocs,nt,2,2)) startparams[:,:,0,1] = tn startparams[:,:,1,1] = tn startparams = startparams[lkeep] ionoS = IonoContainer(all_loc,startparams,times=time,ver = 1,coordvecs = ['r','theta','phi'], paramnames = pnames,species=species) return iono1,ionoS
def isOrthogonal(A, tol=1e-13): ''' Test whether matrix A is orthogonal, upto numerical tolerance. If A is orthogonal then sp.dot(A.T,A) will be the identity matrix. ''' (_,p) = A.shape Ix = sp.dot( A.T, A) - sp.eye(p) return not sp.any(Ix > tol)