def gen_crit_sub_num_rad(rad_sampler, jet_type='quark', obs_accuracy='LL', splitfn_accuracy='LL', beta=2., epsilon=1e-15, bin_space='log', fixed_coupling=True, num_bins=1000): """A function which takes in a sampler with generated data, and returns the associated numerically integrated radiator dependent on the variables over a sampled phase space as well as angles theta of associated critical emissions. Saves the radiator and the associated interpolation function. Parameters ---------- rad_sampler : sampler A sampler class which has sampled over the phase space of a certain type of emission. jet_type : str The type of jet. Must be 'quark' or 'gluon'. accuracy : str The accuracy at which we calculate the relevant observable. Must be 'LL' or 'MLL'. fixed_coupling : bool A boolean which determines whether the radiator is calculated using fixed coupling (True) or running coupling (False). Returns ------- function A 2d interpolating function for the critical-subsequent radiator. """ # Preparing lists to hold all radiator and angle data rads_all = [] rad_error_all = [] xs_all = [] thetas_all = [] # Getting information from sampler samples = rad_sampler.getSamples() # Preparing a list of theta_crits on which our radiator will depend: theta_calc_list = lin_log_mixed_list(epsilon, 1., num_bins) # Preparing observables from the subsequent sampler z_em = samples[:, 0] theta_samp = samples[:, 1] for i, theta_crit in enumerate(theta_calc_list): # Setting up an integrator rad_integrator = integrator() # Integral is positive, and is zero at the last bin rad_integrator.setLastBinBndCondition([0., 'plus']) # Preparing to integrate over the sampled phase space jacs = rad_sampler.jacobians area = rad_sampler.area # Rescaling the emission angles relative to the sampled angles: theta_em = theta_samp * theta_crit if bin_space == 'lin': area = area * theta_crit if bin_space == 'log': jacs = np.array(jacs) * theta_crit obs = C_ungroomed(z_em, theta_em, beta, acc=obs_accuracy) # Weights, binned observables, and area rad_integrator.setBins(num_bins, obs, bin_space, min_log_bin=theta_crit**beta * 1e-15) weights = radiatorWeight(z_em, theta_em, jet_type, fixedcoupling=fixed_coupling, acc=splitfn_accuracy) # Performing integration rad_integrator.setDensity(obs, weights * jacs, area) rad_integrator.integrate() # Radiator, given a maximum angle of theta radiator = rad_integrator.integral # radiator_error = rad_integrator.integralErr xs = rad_integrator.bins[:-1] radiator = np.append(radiator, 0) # radiator_error = np.append(radiator_error, 0) xs = np.append( xs, C_ungroomed_max(beta, radius=theta_crit, acc=obs_accuracy)) # Saving the function/radiator values, bin edges, and theta value rads_all.append(np.array(radiator)) # rad_error_all.append(np.array(radiator_error)) xs_all.append(np.array(xs)) thetas_all.append(np.ones(len(xs)) * theta_crit) xs_all = np.array(xs_all) thetas_all = np.array(thetas_all) rads_all = np.array(rads_all) points = np.array([xs_all.flatten(), thetas_all.flatten()]).T unbounded_rad_fn = NearestNDInterpolator(points, rads_all.flatten()) def bounded_rad_fn(x, theta): # Subsequent emission boundaries bnds = (x <= C_ungroomed_max(beta, radius=theta, acc=obs_accuracy)) rad = ((0 <= x) * bnds * (0 <= theta) * unbounded_rad_fn(x, theta)) return rad return bounded_rad_fn
def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): """ Get active indices from topography Parameters ---------- :param TensorMesh mesh: TensorMesh object on which to discretize the topography :param numpy.ndarray topo: [X,Y,Z] topographic data :param str gridLoc: 'CC' or 'N'. Default is 'CC'. Discretize the topography on cells-center 'CC' or nodes 'N' :param str method: 'nearest' or 'linear' or 'cubic'. Default is 'nearest'. Interpolation method for the topographic data :param float fill_value: default is np.nan. Filling value for extrapolation Returns ------- :param numpy.ndarray actind: index vector for the active cells on the mesh below the topography """ if mesh._meshType == "TENSOR": if mesh.dim == 3: # Check if Topo points are inside of the mesh xmin, xmax = mesh.vectorNx.min(), mesh.vectorNx.max() xminTopo, xmaxTopo = topo[:, 0].min(), topo[:, 0].max() ymin, ymax = mesh.vectorNy.min(), mesh.vectorNy.max() yminTopo, ymaxTopo = topo[:, 1].min(), topo[:, 1].max() if ((xminTopo > xmin) or (xmaxTopo < xmax) or (yminTopo > ymin) or (ymaxTopo < ymax)): # If not, use nearest neihbor to extrapolate them Ftopo = NearestNDInterpolator(topo[:, :2], topo[:, 2]) xinds = np.logical_or(xminTopo < mesh.vectorNx, xmaxTopo > mesh.vectorNx) yinds = np.logical_or(yminTopo < mesh.vectorNy, ymaxTopo > mesh.vectorNy) XYOut = ndgrid(mesh.vectorNx[xinds], mesh.vectorNy[yinds]) topoOut = Ftopo(XYOut) topo = np.vstack((topo, np.c_[XYOut, topoOut])) if gridLoc == "CC": XY = ndgrid(mesh.vectorCCx, mesh.vectorCCy) Zcc = mesh.gridCC[:, 2].reshape( (np.prod(mesh.vnC[:2]), mesh.nCz), order="F") gridTopo = griddata(topo[:, :2], topo[:, 2], XY, method=method, fill_value=fill_value) actind = [ gridTopo >= Zcc[:, ixy] for ixy in range(np.prod(mesh.vnC[2])) ] actind = np.hstack(actind) elif gridLoc == "N": XY = ndgrid(mesh.vectorNx, mesh.vectorNy) gridTopo = griddata(topo[:, :2], topo[:, 2], XY, method=method, fill_value=fill_value) gridTopo = gridTopo.reshape(mesh.vnN[:2], order="F") if mesh._meshType not in ["TENSOR", "CYL", "BASETENSOR"]: raise NotImplementedError( "Nodal surface2ind_topo not implemented for {0!s} mesh" .format(mesh._meshType)) # TODO: this will only work for tensor meshes Nz = mesh.vectorNz[1:] actind = np.array([False] * mesh.nC).reshape(mesh.vnC, order="F") for ii in range(mesh.nCx): for jj in range(mesh.nCy): actind[ii, jj, :] = [ np.all(gridTopo[ii:ii + 2, jj:jj + 2] >= Nz[kk]) for kk in range(len(Nz)) ] elif mesh.dim == 2: # Check if Topo points are inside of the mesh xmin, xmax = mesh.vectorNx.min(), mesh.vectorNx.max() xminTopo, xmaxTopo = topo[:, 0].min(), topo[:, 0].max() if (xminTopo > xmin) or (xmaxTopo < xmax): fill_value = "extrapolate" Ftopo = interp1d(topo[:, 0], topo[:, 1], fill_value=fill_value, kind=method) if gridLoc == "CC": gridTopo = Ftopo(mesh.gridCC[:, 0]) actind = mesh.gridCC[:, 1] <= gridTopo elif gridLoc == "N": gridTopo = Ftopo(mesh.vectorNx) if mesh._meshType not in ["TENSOR", "CYL", "BASETENSOR"]: raise NotImplementedError( "Nodal surface2ind_topo not implemented for {0!s} mesh" .format(mesh._meshType)) # TODO: this will only work for tensor meshes Ny = mesh.vectorNy[1:] actind = np.array([False] * mesh.nC).reshape(mesh.vnC, order="F") for ii in range(mesh.nCx): actind[ii, :] = [ np.all(gridTopo[ii:ii + 2] > Ny[kk]) for kk in range(len(Ny)) ] else: raise NotImplementedError( "surface2ind_topo not implemented for 1D mesh") elif mesh._meshType == "TREE": if mesh.dim == 3: if gridLoc == "CC": # Compute unique XY location uniqXY = uniqueRows(mesh.gridCC[:, :2]) if method == "nearest": Ftopo = NearestNDInterpolator(topo[:, :2], topo[:, 2]) elif method == "linear": # Check if Topo points are inside of the mesh xmin, xmax = mesh.x0[0], mesh.hx.sum() + mesh.x0[0] xminTopo, xmaxTopo = topo[:, 0].min(), topo[:, 0].max() ymin, ymax = mesh.x0[1], mesh.hy.sum() + mesh.x0[1] yminTopo, ymaxTopo = topo[:, 1].min(), topo[:, 1].max() if ((xminTopo > xmin) or (xmaxTopo < xmax) or (yminTopo > ymin) or (ymaxTopo < ymax)): # If not, use nearest neihbor to extrapolate them Ftopo = NearestNDInterpolator(topo[:, :2], topo[:, 2]) xinds = np.logical_or(xminTopo < uniqXY[0][:, 0], xmaxTopo > uniqXY[0][:, 0]) yinds = np.logical_or(yminTopo < uniqXY[0][:, 1], ymaxTopo > uniqXY[0][:, 1]) inds = np.logical_or(xinds, yinds) XYOut = uniqXY[0][inds, :] topoOut = Ftopo(XYOut) topo = np.vstack((topo, np.c_[XYOut, topoOut])) Ftopo = LinearNDInterpolator(topo[:, :2], topo[:, 2]) else: raise NotImplementedError( "Only nearest and linear method are available for TREE mesh" ) actind = np.zeros(mesh.nC, dtype="bool") npts = uniqXY[0].shape[0] for i in range(npts): z = Ftopo(uniqXY[0][i, :]) inds = uniqXY[2] == i actind[inds] = mesh.gridCC[inds, 2] < z[0] # Need to implement elif gridLoc == "N": raise NotImplementedError( "gridLoc=N is not implemented for TREE mesh") else: raise Exception("gridLoc must be either CC or N") elif mesh.dim == 2: if gridLoc == "CC": # Compute unique X location uniqX = np.unique(mesh.gridCC[:, 0], return_index=True, return_inverse=True) if method == "nearest": Ftopo = interp1d(topo[:, 0], topo[:, -1], kind="nearest") elif method == "linear": # Check if Topo points are inside of the mesh xmin, xmax = mesh.x0[0], mesh.hx.sum() + mesh.x0[0] xminTopo, xmaxTopo = topo[:, 0].min(), topo[:, 0].max() if (xminTopo > xmin) or (xmaxTopo < xmax): # If not, use nearest neihbor to extrapolate them Ftopo = interp1d(topo[:, 0], topo[:, -1], kind="nearest") xinds = np.logical_or(xminTopo < uniqX[0][:, 0], xmaxTopo > uniqX[0][:, 0]) XOut = uniqX[0][xinds, :] topoOut = Ftopo(XOut) topo = np.vstack((topo, np.c_[XOut, topoOut])) Ftopo = interp1d(topo[:, 0], topo[:, -1], kind="nearest") else: raise NotImplementedError( "Only nearest and linear method are available for TREE mesh" ) actind = np.zeros(mesh.nC, dtype="bool") npts = uniqX[0].shape[0] for i in range(npts): z = Ftopo(uniqX[0][i]) inds = uniqX[2] == i actind[inds] = mesh.gridCC[inds, 1] < z # Need to implement elif gridLoc == "N": raise NotImplementedError( "gridLoc=N is not implemented for TREE mesh") else: raise Exception("gridLoc must be either CC or N") else: raise NotImplementedError( "surface2ind_topo not implemented for 1D mesh") return mkvc(actind)
def __init__(self, geo_in, geo_out, cache=None, distance_check=True, distance_limit=3): from scipy.interpolate import NearestNDInterpolator if not geo_in.can_interpolate: raise Exception("The input geometry can not be interpolated") cached_interpolator = None if cache is not None: cached_interpolator = cache.get_interpolator( "nearest", geo_in, geo_out) if cached_interpolator is not None: print("Using cached interpolator") self.type = cached_interpolator.type if self.type != "nearest": raise Exception("Mismatch in interpolators") self.index = cached_interpolator.index self.nx = cached_interpolator.nx self.ny = cached_interpolator.ny self.var_lons = cached_interpolator.var_lons self.var_lats = cached_interpolator.var_lats else: interpolated_lons = geo_out.lonlist interpolated_lats = geo_out.latlist var_lons = geo_in.lons var_lats = geo_in.lats nx = var_lons.shape[0] ny = var_lats.shape[1] dim_x = var_lons.shape[0] dim_y = var_lats.shape[1] lons_vec = np.reshape(var_lons, dim_x * dim_y) lats_vec = np.reshape(var_lats, dim_x * dim_y) points = np.empty([dim_x * dim_y, 2]) points[:, 0] = lons_vec points[:, 1] = lats_vec values_vec = np.arange(dim_x * dim_y) x = np.floor_divide(values_vec, dim_y) y = np.mod(values_vec, dim_y) # extract subdomain for faster interpolation llv = (np.min(interpolated_lons) - 1, np.min(interpolated_lats) - 1) urv = (np.max(interpolated_lons) + 1, np.max(interpolated_lats) + 1) test1 = points > llv test2 = points < urv subdom = test1[:, 0] * test1[:, 1] * test2[:, 0] * test2[:, 1] surfex.util.info("Interpolating..." + str(len(interpolated_lons)) + " points") # nn = NearestNDInterpolator(points[subdom, :], values_vec[subdom]) nn = NearestNDInterpolator(points[:, :], values_vec[:]) surfex.util.info("Interpolation finished") # print(interpolated_lons, interpolated_lats) ii = nn(interpolated_lons, interpolated_lats) i = x[ii] j = y[ii] self.distances = self.distance(interpolated_lons, interpolated_lats, lons_vec[ii], lats_vec[ii]) # Set max distance as sanity if distance_check: if len(lons_vec) > 1 and len(lats_vec) > 1: max_distance = distance_limit * self.distance( lons_vec[0], lats_vec[0], lons_vec[1], lats_vec[1]) else: raise Exception( "You only have one point is your input field!") # dist = self.distance(interpolated_lons, interpolated_lats, lons_vec[ii], lats_vec[ii]) if self.distances.max() > max_distance: if distance_check: raise Exception( "Point is too far away from nearest point: " + str(self.distances.max()) + " Max distance=" + str(max_distance)) grid_points = np.column_stack((i, j)) self.index = grid_points Interpolation.__init__(self, "nearest", nx, ny, var_lons, var_lats) if cache is not None: cache.update_interpolator("nearest", geo_in, geo_out, self)
def __init__(self, network, lgn_on, lgn_off, target, parameters, name): from numpy import random random.seed(1023) BaseComponent.__init__(self, network, parameters) self.name = name t_size = target.size_in_degrees() or_map = None if self.parameters.or_map: f = open(self.parameters.or_map_location, 'r') or_map = pickle.load(f)*numpy.pi coords_x = numpy.linspace(-t_size[0]/2.0, t_size[0]/2.0, numpy.shape(or_map)[0]) coords_y = numpy.linspace(-t_size[1]/2.0, t_size[1]/2.0, numpy.shape(or_map)[1]) print min(coords_x), max(coords_x) print min(coords_y), max(coords_y) X, Y = numpy.meshgrid(coords_x, coords_y) or_map = NearestNDInterpolator(zip(X.flatten(), Y.flatten()), or_map.flatten()) phase_map = None if self.parameters.phase_map: f = open(self.parameters.phase_map_location, 'r') phase_map = pickle.load(f) coords_x = numpy.linspace(-t_size[0]/2.0, t_size[0]/2.0, numpy.shape(phase_map)[0]) coords_y = numpy.linspace(-t_size[1]/2.0, t_size[1]/2.0, numpy.shape(phase_map)[1]) X, Y = numpy.meshgrid(coords_x, coords_y) phase_map = NearestNDInterpolator(zip(X.flatten(), Y.flatten()), phase_map.flatten()) print min(target.pop.positions[0]), max(target.pop.positions[0]) print min(target.pop.positions[1]), max(target.pop.positions[1]) for (j, neuron2) in enumerate(target.pop.all()): if or_map: orientation = or_map(target.pop.positions[0][j], target.pop.positions[1][j]) else: orientation = parameters.orientation_preference.next()[0] if phase_map: phase = phase_map(target.pop.positions[0][j], target.pop.positions[1][j]) else: phase = parameters.phase.next()[0] aspect_ratio = parameters.aspect_ratio.next()[0] frequency = parameters.frequency.next()[0] size = parameters.size.next()[0] assert orientation < numpy.pi target.add_neuron_annotation(j, 'LGNAfferentOrientation', orientation, protected=True) target.add_neuron_annotation(j, 'LGNAfferentAspectRatio', aspect_ratio, protected=True) target.add_neuron_annotation(j, 'LGNAfferentFrequency', frequency, protected=True) target.add_neuron_annotation(j, 'LGNAfferentSize', size, protected=True) target.add_neuron_annotation(j, 'LGNAfferentPhase', phase, protected=True) if self.parameters.topological: target.add_neuron_annotation(j, 'LGNAfferentX', target.pop.positions[0][j], protected=True) target.add_neuron_annotation(j, 'LGNAfferentY', target.pop.positions[1][j], protected=True) else: target.add_neuron_annotation(j, 'LGNAfferentX', 0, protected=True) target.add_neuron_annotation(j, 'LGNAfferentY', 0, protected=True) ps = ParameterSet({ 'target_synapses' : 'excitatory', 'weight_functions' : { 'f1' : { 'component' : 'mozaik.connectors.vision.GaborArborization', 'params' : { 'ON' : True, } } }, 'delay_functions' : {}, 'weight_expression' : 'f1', # a python expression that can use variables f1..fn where n is the number of functions in weight_functions, and fi corresponds to the name given to a ModularConnectorFunction in weight_function ParameterSet. It determines how are the weight functions combined to obtain the weights 'delay_expression' : str(self.parameters.delay), 'short_term_plasticity' : self.parameters.short_term_plasticity, 'base_weight' : self.parameters.base_weight, 'num_samples' : self.parameters.num_samples, 'fan_in' : self.parameters.fan_in, }) ModularSamplingProbabilisticConnector(network,name+'On',lgn_on,target,ps).connect() ps['weight_functions.f1.params.ON']=False ModularSamplingProbabilisticConnector(network,name+'Off',lgn_off,target,ps).connect()
def _mkandsav_lookup(self, sat, BB=None, BBname=''): ''' Generates the lookup table for a pair sat and grid. Usage: first generate the grid object gg = geosat.GeoGrid(gridtype) where gridtype is one of the allowed grids then runs the generator for this grid and a given satellite (it takes a while) gg._mkandsav_lookup(sat) sat can take values among himawari, msg1, msg3 BB is a bounding box in the following format [lat1,lat2,lon1,lon2] lat1 and lat2 are the first and the last latitude retained lon1 and lon2 are the first and the last longitude retained BBname is a box name used to build the name of the output file This option is useful if the input results from the processsing of a part of the satellite images. Very important point: The lonlat file must be a masked array equipped with the same mask that will be used for all the images of the satellite. Failure to enforce strictly this rule will generate irrecoverable errors. For MSG and Hiamawari, this mask is read in read_mask_MSG and read_mask_himawari. It is not read here but it is assumed that lonlat.pkl has been generated with it. TO DO: add a check here that read the mask and compares it to that in the lonlat.pkl file to be sure they match. For Full_AMA_SAF: msg1: BB = [340,1856,306,3350] himawari: BB = [474,2750,444,3793] ''' # get the lon lat grid from the satellite try: if sat == 'msg1': satin = 'msg3' else: satin = sat print(os.path.join(root_dir, satin, 'lonlat.pkl')) lonlat = pickle.load( gzip.open(os.path.join(root_dir, satin, 'lonlat.pkl'), 'rb')) # add 360 to avoid discontinuity at 180 for Himawari if sat == 'himawari': lonlat['lon'][lonlat['lon'] < 0] += 360 # add 41.5 degree to msg3 lon to get msg1 lon if sat == 'msg1': lonlat['lon'] += 41.5 # extract the bounding box if required # if BB is not None: try: lonlat['lon'] = lonlat['lon'][BB[0]:BB[1] + 1, BB[2]:BB[3] + 1] lonlat['lat'] = lonlat['lat'][BB[0]:BB[1] + 1, BB[2]:BB[3] + 1] lonlat['BB'] = BB except: print( 'ERROR WHILE BOUNDING THE LATITUDE AND LONGITUDE GRIDS, CHECK BB' ) return # Flatten the grid and select only the non masked pixels lonlat_c = np.asarray( [lonlat['lon'].compressed(), lonlat['lat'].compressed()]) except: print('sat or lonlat undefined') return # Calculate interpolator index idx = np.arange(lonlat_c.shape[1], dtype=int) print('NearestNDInterpolator start') interp = NearestNDInterpolator(lonlat_c.T, idx) print('NearestNDInterpolator done') # Building the lookup table for the grid lookup = np.empty(shape=(len(self.ycent), len(self.xcent)), dtype=int) for j in range(len(self.ycent)): lookup[j, :] = interp( np.asarray( [self.xcent, np.repeat(self.ycent[j], len(self.xcent))]).T) self.lookup_dict = {} self.lookup_dist = {} self.lookup_dict['lat_g'] = self.ycent self.lookup_dict['lon_g'] = self.xcent self.lookup_dict['lookup_f'] = lookup.flatten() # Calculate actual distances between pixels on the regular grid and the # closest neighbour on the Himawari grid that can be used to generate a # mask to disclose meshes which are too far from their nearest neighbour # distance is in a regular lon lat space self.lookup_dist['distx'] = abs( np.repeat([self.xcent], len(self.ycent), axis=0).flatten() - (lonlat['lon'].compressed())[self.lookup_dict['lookup_f']]) self.lookup_dist['disty'] = abs( (np.repeat([self.ycent], len(self.xcent), axis=0).T).flatten() - (lonlat['lat'].compressed())[self.lookup_dict['lookup_f']]) # Calculate a mask with distance less than 0.2 in longitude or latitude offset = 0.2 self.lookup_dict['mask'] = (self.lookup_dist['distx'] > offset) | ( self.lookup_dist['disty'] > offset) # Add lonlat mask to the dist as this is useful to process the data (highly compressible) #self.lookup_dict['in_mask'] = lonlat['lon'].mask # Store the lookup table and distances separately if BBname != '': BBname = '_' + BBname pickle.dump( self.lookup_dict, gzip.open( os.path.join( root_dir, sat, 'lookup_' + sat + '_' + self.gridtype + BBname + '.pkl'), 'wb', pickle.HIGHEST_PROTOCOL)) pickle.dump( self.lookup_dist, gzip.open( os.path.join( root_dir, sat, 'lookup_dist_' + sat + '_' + self.gridtype + BBname + '.pkl'), 'wb', pickle.HIGHEST_PROTOCOL))
def drapeTopo(self, mesh, actind, option='top', topography=None): if self.a_locations is None: self.getABMN_locations() # 2D if mesh.dim == 2: if self.survey_geometry == "surface": if self.electrodes_info is None: self.electrodes_info = SimPEG.Utils.uniqueRows( np.hstack(( self.a_locations[:, 0], self.b_locations[:, 0], self.m_locations[:, 0], self.n_locations[:, 0], )).reshape([-1, 1]) ) self.electrode_locations = SimPEG.EM.Static.Utils.drapeTopotoLoc( mesh, self.electrodes_info[0].flatten(), actind=actind, option=option ) temp = ( self.electrode_locations[self.electrodes_info[2], 1] ).reshape((self.a_locations.shape[0], 4), order="F") self.a_locations = np.c_[self.a_locations[:, 0], temp[:, 0]] self.b_locations = np.c_[self.b_locations[:, 0], temp[:, 1]] self.m_locations = np.c_[self.m_locations[:, 0], temp[:, 2]] self.n_locations = np.c_[self.n_locations[:, 0], temp[:, 3]] # Make interpolation function self.topo_function = interp1d( self.electrode_locations[:, 0], self.electrode_locations[:, 1] ) # Loop over all Src and Rx locs and Drape topo for src in self.srcList: # Pole Src if isinstance(src, Src.Pole): locA = src.loc.flatten() z_SrcA = self.topo_function(locA[0]) src.loc = np.array([locA[0], z_SrcA]) for rx in src.rxList: # Pole Rx if isinstance(rx, Rx.Pole) or isinstance(rx, Rx.Pole_ky): locM = rx.locs.copy() z_RxM = self.topo_function(locM[:, 0]) rx.locs = np.c_[locM[:, 0], z_RxM] # Dipole Rx elif isinstance(rx, Rx.Dipole) or isinstance(rx, Rx.Dipole_ky): locM = rx.locs[0].copy() locN = rx.locs[1].copy() z_RxM = self.topo_function(locM[:, 0]) z_RxN = self.topo_function(locN[:, 0]) rx.locs[0] = np.c_[locM[:, 0], z_RxM] rx.locs[1] = np.c_[locN[:, 0], z_RxN] else: raise Exception() # Dipole Src elif isinstance(src, Src.Dipole): locA = src.loc[0].flatten() locB = src.loc[1].flatten() z_SrcA = self.topo_function(locA[0]) z_SrcB = self.topo_function(locB[0]) src.loc[0] = np.array([locA[0], z_SrcA]) src.loc[1] = np.array([locB[0], z_SrcB]) for rx in src.rxList: # Pole Rx if isinstance(rx, Rx.Pole) or isinstance(rx, Rx.Pole_ky): locM = rx.locs.copy() z_RxM = self.topo_function(locM[:, 0]) rx.locs = np.c_[locM[:, 0], z_RxM] # Dipole Rx elif isinstance(rx, Rx.Dipole) or isinstance(rx, Rx.Dipole_ky): locM = rx.locs[0].copy() locN = rx.locs[1].copy() z_RxM = self.topo_function(locM[:, 0]) z_RxN = self.topo_function(locN[:, 0]) rx.locs[0] = np.c_[locM[:, 0], z_RxM] rx.locs[1] = np.c_[locN[:, 0], z_RxN] else: raise Exception() elif self.survey_geometry == "borehole": raise Exception( "Not implemented yet for borehole survey_geometry" ) else: raise Exception( "Input valid survey survey_geometry: surface or borehole" ) if mesh.dim == 3: if self.survey_geometry == "surface": if self.electrodes_info is None: self.electrodes_info = SimPEG.Utils.uniqueRows( np.vstack(( self.a_locations[:, :2], self.b_locations[:, :2], self.m_locations[:, :2], self.n_locations[:, :2], )) ) self.electrode_locations = SimPEG.EM.Static.Utils.drapeTopotoLoc( mesh, self.electrodes_info[0], actind=actind, topo=topography ) temp = ( self.electrode_locations[self.electrodes_info[2], 1] ).reshape((self.a_locations.shape[0], 4), order="F") self.a_locations = np.c_[self.a_locations[:, :2], temp[:, 0]] self.b_locations = np.c_[self.b_locations[:, :2], temp[:, 1]] self.m_locations = np.c_[self.m_locations[:, :2], temp[:, 2]] self.n_locations = np.c_[self.n_locations[:, :2], temp[:, 3]] # Make interpolation function self.topo_function = NearestNDInterpolator( self.electrode_locations[:, :2], self.electrode_locations[:, 2] ) # Loop over all Src and Rx locs and Drape topo for src in self.srcList: # Pole Src if isinstance(src, Src.Pole): locA = src.loc.reshape([1, -1]) z_SrcA = self.topo_function(locA[0, :2]) src.loc = np.r_[locA[0, :2].flatten(), z_SrcA] for rx in src.rxList: # Pole Rx if isinstance(rx, Rx.Pole): locM = rx.locs.copy() z_RxM = self.topo_function(locM[:, :2]) rx.locs = np.c_[locM[:, 0], z_RxM] # Dipole Rx elif isinstance(rx, Rx.Dipole): locM = rx.locs[0].copy() locN = rx.locs[1].copy() z_RxM = self.topo_function(locM[:, :2]) z_RxN = self.topo_function(locN[:, :2]) rx.locs[0] = np.c_[locM[:, :2], z_RxM] rx.locs[1] = np.c_[locN[:, :2], z_RxN] else: raise Exception() # Dipole Src elif isinstance(src, Src.Dipole): locA = src.loc[0].reshape([1, -1]) locB = src.loc[1].reshape([1, -1]) z_SrcA = self.topo_function(locA[0, :2]) z_SrcB = self.topo_function(locB[0, :2]) src.loc[0] = np.r_[locA[0, :2].flatten(), z_SrcA] src.loc[1] = np.r_[locB[0, :2].flatten(), z_SrcB] for rx in src.rxList: # Pole Rx if isinstance(rx, Rx.Pole): locM = rx.locs.copy() z_RxM = self.topo_function(locM[:, :2]) rx.locs = np.c_[locM[:, :2], z_RxM] # Dipole Rx elif isinstance(rx, Rx.Dipole): locM = rx.locs[0].copy() locN = rx.locs[1].copy() z_RxM = self.topo_function(locM[:, :2]) z_RxN = self.topo_function(locN[:, :2]) rx.locs[0] = np.c_[locM[:, :2], z_RxM] rx.locs[1] = np.c_[locN[:, :2], z_RxN] else: raise Exception() elif self.survey_geometry == "borehole": raise Exception( "Not implemented yet for borehole survey_geometry" ) else: raise Exception( "Input valid survey survey_geometry: surface or borehole" )
def quads_from_corner_lookup(lon, lat, corner_points, pixel_lon, pixel_lat, nadir_lon=0.0, inflate=1.0, extrapolate=True): """ Given corner offset data in corner_points located at ctr_lon, ctr_lat return interpolated corner offsets Arguments lon, lat: arrays, shape (N,M), of longitude and latitude giving the locations of the corresponding offsets in corner_points corner_points: array, shape (N,M,4,2) Corners of the pixel quadrilateral are given in order along the third dimension. Longitude and latitudes are indexes 0 and 1 in the trailing dimension, respectively. pixel_lon, pixel_lat: arrays, shape (P,), of longitudes and latitudes nadir_lon: geostationary satellite longitude. Added to lon and lat (or subtracted from pixel locations) so as to shift the lookup table to the correct earth-relative position. inflate: multiply the corner point delta by this amount. extrapolate: if True (default) and pixel is outside the domain of lon and lat, use the nearest neighbor in corner_points instead Returns quads: array, shape (P,4,2) of corner locations for each pixel. """ did_extrap = False n_corners = corner_points.shape[-2] n_coords = corner_points.shape[-1] lon_shift = lon + nadir_lon pixel_loc = np.vstack((pixel_lon, pixel_lat)).T grid_loc = (lon_shift.flatten(), lat.flatten()) quads = np.empty((pixel_lon.shape[0], n_corners, n_coords)) for ci in range(n_corners): corner_interp_lon = LinearNDInterpolator( grid_loc, corner_points[:, :, ci, 0].flatten()) #, bounds_error=True) corner_interp_lat = LinearNDInterpolator( grid_loc, corner_points[:, :, ci, 1].flatten()) #, bounds_error=True) dlon = corner_interp_lon(pixel_loc) dlat = corner_interp_lat(pixel_loc) if extrapolate: out_lon = np.isnan(dlon) out_lat = np.isnan(dlat) if out_lon.sum() > 0: did_extrap = True corner_extrap_lon = NearestNDInterpolator( grid_loc, corner_points[:, :, ci, 0].flatten()) dlon[out_lon] = corner_extrap_lon(pixel_loc[out_lon]) if out_lat.sum() > 0: did_extrap = True corner_extrap_lat = NearestNDInterpolator( grid_loc, corner_points[:, :, ci, 1].flatten()) dlat[out_lat] = corner_extrap_lat(pixel_loc[out_lat]) quads[:, ci, 0] = pixel_lon + dlon * inflate quads[:, ci, 1] = pixel_lat + dlat * inflate if did_extrap: log.warning(extrap_warning) return quads
from math import exp import numpy as np from scipy.interpolate import NearestNDInterpolator from scipy.optimize import least_squares from decimal import Decimal # Read csv data for tip and stalk cells at t=0.2s TCdata = np.loadtxt('Tip_Cell_ABM_Data_t2.csv', delimiter=',') ECdata = np.loadtxt('Stalk_Cell_ABM_Data_t2.csv', delimiter=',') # Store the data you have read in arrays, and use this to create an interpolant # function on a 2D grid X, Y, TCValues = TCdata[:, 0], TCdata[:, 1], TCdata[:, 2] ECValues = ECdata[:, 2] print('Finding interpolant function for TC data...') TCinterpolant = NearestNDInterpolator((X, Y), TCValues) print('Done!') print('Finding interpolant function for EC data...') ECinterpolant = NearestNDInterpolator((X, Y), ECValues) print('Done!') def y_fun(*args, **kwargs): TCdata = np.loadtxt('Tip_Cell_ABM_Data_t2.csv', delimiter=',') ECdata = np.loadtxt('Tip_Cell_ABM_Data_t2.csv', delimiter=',') TCdata = TCdata[:, 2] ECdata = ECdata[:, 2] TCdata = np.reshape(np.transpose(np.reshape(TCdata, (201, 201))), 201**2) TCdata = np.reshape(np.transpose(np.reshape(ECdata, (201, 201))), 201**2) for i in [4, 6, 8, 10, 12, 14, 16, 18, 20]: TC_name = 'ABM_Data_t' + str(i) + '.csv'
def setUp(self): np.random.seed(0) # First we need to define the direction of the inducing field # As a simple case, we pick a vertical inducing field of magnitude # 50,000nT. # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). H0 = (50000., 90., 0.) # Create a mesh h = [5, 5, 5] padDist = np.ones((3, 2)) * 100 nCpad = [2, 4, 2] # Create grid of points for topography # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(np.linspace(-200., 200., 50), np.linspace(-200., 200., 50)) b = 100 A = 50 zz = A * np.exp(-0.5 * ((xx / b)**2. + (yy / b)**2.)) # We would usually load a topofile topo = np.c_[Utils.mkvc(xx), Utils.mkvc(yy), Utils.mkvc(zz)] # Create and array of observation points xr = np.linspace(-100., 100., 20) yr = np.linspace(-100., 100., 20) X, Y = np.meshgrid(xr, yr) Z = A * np.exp(-0.5 * ((X / b)**2. + (Y / b)**2.)) + 5 # Create a MAGsurvey xyzLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)] rxLoc = PF.BaseMag.RxObs(xyzLoc) srcField = PF.BaseMag.SrcField([rxLoc], param=H0) survey = PF.BaseMag.LinearSurvey(srcField) # Get extent of points limx = np.r_[topo[:, 0].max(), topo[:, 0].min()] limy = np.r_[topo[:, 1].max(), topo[:, 1].min()] limz = np.r_[topo[:, 2].max(), topo[:, 2].min()] # Get center of the mesh midX = np.mean(limx) midY = np.mean(limy) midZ = np.mean(limz) nCx = int(limx[0] - limx[1]) / h[0] nCy = int(limy[0] - limy[1]) / h[1] nCz = int(limz[0] - limz[1] + int(np.min(np.r_[nCx, nCy]) / 3)) / h[2] # Figure out full extent required from input extent = np.max(np.r_[nCx * h[0] + padDist[0, :].sum(), nCy * h[1] + padDist[1, :].sum(), nCz * h[2] + padDist[2, :].sum()]) maxLevel = int(np.log2(extent / h[0])) + 1 # Number of cells at the small octree level # For now equal in 3D nCx, nCy, nCz = 2**(maxLevel), 2**(maxLevel), 2**(maxLevel) # nCy = 2**(int(np.log2(extent/h[1]))+1) # nCz = 2**(int(np.log2(extent/h[2]))+1) # Define the mesh and origin # For now cubic cells self.mesh = Mesh.TreeMesh( [np.ones(nCx) * h[0], np.ones(nCx) * h[1], np.ones(nCx) * h[2]]) # Set origin self.mesh.x0 = np.r_[-nCx * h[0] / 2. + midX, -nCy * h[1] / 2. + midY, -nCz * h[2] / 2. + midZ] # Refine the mesh around topography # Get extent of points F = NearestNDInterpolator(topo[:, :2], topo[:, 2]) zOffset = 0 # Cycle through the first 3 octree levels for ii in range(3): dx = self.mesh.hx.min() * 2**ii nCx = int((limx[0] - limx[1]) / dx) nCy = int((limy[0] - limy[1]) / dx) # Create a grid at the octree level in xy CCx, CCy = np.meshgrid(np.linspace(limx[1], limx[0], nCx), np.linspace(limy[1], limy[0], nCy)) z = F(mkvc(CCx), mkvc(CCy)) # level means number of layers in current OcTree level for _ in range(int(nCpad[ii])): self.mesh.insert_cells(np.c_[mkvc(CCx), mkvc(CCy), z - zOffset], np.ones_like(z) * maxLevel - ii, finalize=False) zOffset += dx self.mesh.finalize() # Define an active cells from topo actv = Utils.surface2ind_topo(self.mesh, topo) nC = int(actv.sum()) # We can now create a susceptibility model and generate data # Lets start with a simple block in half-space self.model = Utils.ModelBuilder.addBlock(self.mesh.gridCC, np.zeros(self.mesh.nC), np.r_[-20, -20, -5], np.r_[20, 20, 30], 0.05)[actv] # Create active map to go from reduce set to full self.actvMap = Maps.InjectActiveCells(self.mesh, actv, np.nan) # Creat reduced identity map idenMap = Maps.IdentityMap(nP=nC) # Create the forward model operator prob = PF.Magnetics.MagneticIntegral(self.mesh, chiMap=idenMap, actInd=actv) # Pair the survey and problem survey.pair(prob) # Compute linear forward operator and compute some data data = prob.fields(self.model) # Add noise and uncertainties (1nT) noise = np.random.randn(len(data)) data += noise wd = np.ones(len(data)) * 1. survey.dobs = data survey.std = wd # Create sensitivity weights from our linear forward operator rxLoc = survey.srcField.rxList[0].locs wr = np.zeros(prob.G.shape[1]) for ii in range(survey.nD): wr += (prob.G[ii, :] / survey.std[ii])**2. # wr = (wr/np.max(wr)) wr = wr**0.5 # Create a regularization reg = Regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap) reg.norms = np.c_[0, 0, 0, 0] reg.cell_weights = wr reg.mref = np.zeros(nC) # Data misfit function dmis = DataMisfit.l2_DataMisfit(survey) dmis.W = 1. / survey.std # Add directives to the inversion opt = Optimization.ProjectedGNCG(maxIter=20, lower=0., upper=10., maxIterLS=20, maxIterCG=20, tolCG=1e-4, stepOffBoundsFact=1e-4) invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=1e+4) # Here is where the norms are applied # Use pick a treshold parameter empirically based on the distribution of # model parameters IRLS = Directives.Update_IRLS(f_min_change=1e-3, maxIRLSiter=20, beta_tol=5e-1) update_Jacobi = Directives.UpdatePreconditioner() # saveOuput = Directives.SaveOutputEveryIteration() # saveModel.fileName = work_dir + out_dir + 'ModelSus' self.inv = Inversion.BaseInversion(invProb, directiveList=[IRLS, update_Jacobi])
model = getModel_mag() model = model[actv] # Here you can visualize the current model m_true = actvMap * model Mesh.TensorMesh.writeModelUBC(mesh, "model_mag.sus", m_true) airc = m_true == ndv m_true[airc] = np.nan print('exported mag model. Size: ', m_true.shape) # **Forward system:** # We create a synthetic survey with observations in cell center. X, Y = np.meshgrid(mesh.vectorCCx[npad:-npad:2], mesh.vectorCCy[npad:-npad:2]) # Using our topography, trape the survey and shift it up by the flight height Ftopo = NearestNDInterpolator(topo[:, :2], topo[:, 2]) Z = Ftopo(Utils.mkvc(X.T), Utils.mkvc(Y.T)) + Z_bird rxLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)] rxLoc = PF.BaseGrav.RxObs(rxLoc) print('number of data: ', rxLoc.locs.shape[0]) # The field parameters at TKC are [H:60,308 nT, I:83.8 d D:25.4 d ] H0 = (60308., 83.8, 25.4) srcField = PF.BaseMag.SrcField([rxLoc]) srcField.param = H0 survey = PF.BaseMag.LinearSurvey(srcField) # Now that we have a model and a survey we can build the linear system ... nactv = np.int(np.sum(actv)) # Creat reduced identity map
def interpolated_model(self, plot=False): """ Generate an interpolated model from the completeness curves. Waves have to be uniformly spaced """ if plot: import matplotlib.pyplot as plt import matplotlib as mpl mpl.use("tkagg") # bins to interpolate all completeness curves to, # in these coordinates 50% completeness always # at 1.0, also should in combination will fill_value # ensure 0 returns zero completeness in the # RectBivariateSpline fluxes_f50_units = linspace(0, 50, 5000) c_all = [] fgrid_for_mask = [] wgrid_for_mask = [] # Offset by half a bin brighter, so the mask kicks in # at the center location, not 1/2 a bin away. The # 0.999 factor is to ensure the actual value itself # is not in the mask fbsizediv2 = 0.999 * (self.fluxes[1] - self.fluxes[0]) / 2.0 for twave, tf50, c in zip(self.waves, self.f50, self.compl_curves): if plot: plt.plot(self.fluxes / tf50, c, linestyle="--") # Shift grid to center of bin, so don't # interpolate past value fgrid_for_mask.append((self.fluxes + fbsizediv2) / tf50) wgrid_for_mask.append(ones(len(self.fluxes)) * twave) # divide so 50% completeness to # convert to flux units of per f50 interpolator = interp1d(self.fluxes / tf50, c, bounds_error=False, fill_value=(0.0, c[-1])) # interpolate to the coordinates where 50% is at 1.0 c_all.append(interpolator(fluxes_f50_units)) c_all = array(c_all) # Make a combined model if self._wl_collapse: cmean = mean(c_all, axis=0) completeness_model = interp1d(fluxes_f50_units, cmean, fill_value=(0.0, cmean[-1]), bounds_error=False) if plot: vals_to_plot = completeness_model(fluxes_f50_units) else: # waves have to be uniformly spaced for this to work (? don't think so?) interp = RectBivariateSpline(self.waves, fluxes_f50_units, c_all, kx=3, ky=3) if self.dont_interp_to_zero: # Use this as a mask to not extrapolate toward 0.0 # if nearest point is zero compl_mask = zeros(self.compl_curves.shape) compl_mask[self.compl_curves > 0.0] = 1.0 interp_mask = NearestNDInterpolator( list( zip( array(wgrid_for_mask).ravel(), array(fgrid_for_mask).ravel())), compl_mask.ravel()) completeness_model = lambda x, y: interp_mask(x, y) * interp( x, y, grid=False) else: completeness_model = lambda x, y: interp(x, y, grid=False) if plot: vals_to_plot = completeness_model( self.waves[2] * ones(len(fluxes_f50_units)), fluxes_f50_units) if plot: plt.plot(fluxes_f50_units, vals_to_plot, "k.", lw=2.0) plt.xlim(0, 12.0) plt.xlabel("Flux/(50% flux) [erg/s/cm^2]") plt.ylabel("Normalized Completeness") plt.show() return completeness_model
def read_nubeam(filename, grid, e_range=(), p_range=(), btipsign=-1, species=1): """ #+#read_nubeam #+Reads NUBEAM fast-ion distribution function #+*** #+##Arguments #+ **filename**: NUBEAM guiding center fast-ion distribution function file e.g. 159245H01_fi_1.cdf #+ #+ **grid**: Interpolation grid #+ #+##Keyword Arguments #+ **btipsign**: Sign of the dot product of the magnetic field and plasma current #+ #+ **e_range**: Energy range to consider #+ #+ **p_range**: Pitch range to consider #+ #+ **species**: Fast-ion species number. Defaults to 1 #+ #+##Return Value #+Distribution structure #+ #+##Example Usage #+```python #+>>> dist = read_nubeam("./159245H02_fi_1.cdf",grid,btipsign=-1) #+``` """ species_var = "SPECIES_{}".format(species) sstr = read_ncdf(filename, vars=[species_var ])[species_var].tostring().decode('UTF-8') print("Species: " + sstr) var = read_ncdf(filename, vars=[ "TIME", "R2D", "Z2D", "E_" + sstr, "A_" + sstr, "F_" + sstr, "RSURF", "ZSURF", "BMVOL" ]) ngrid = len(var["R2D"]) try: time = var["TIME"][0] except: time = var["TIME"] r2d = var["R2D"] z2d = var["Z2D"] rsurf = var["RSURF"].T zsurf = var["ZSURF"].T bmvol = var["BMVOL"] pitch = var["A_" + sstr] energy = var["E_" + sstr] * 1e-3 fbm = var["F_" + sstr].T * 1e3 fbm = np.where( fbm > 0.0, 0.5 * fbm, 0.0) #0.5 to convert to pitch instead of solid angle d_omega/4pi if btipsign < 0: fbm = fbm[:, ::-1, :] #reverse pitch elements if not e_range: e_range = (np.min(energy), np.max(energy)) if not p_range: p_range = (np.min(pitch), np.max(pitch)) # Trim distribution according to e/p_range we = np.logical_and(energy >= e_range[0], energy <= e_range[1]) wp = np.logical_and(pitch >= p_range[0], pitch <= p_range[1]) energy = energy[we] nenergy = len(energy) pitch = pitch[wp] npitch = len(pitch) fbm = fbm[we, :, :] fbm = fbm[:, wp, :] dE = np.abs(energy[1] - energy[0]) dp = np.abs(pitch[1] - pitch[0]) emin, emax = np.maximum(np.min(energy) - 0.5 * dE, 0.0), np.max(energy) + 0.5 * dE pmin, pmax = np.maximum(np.min(pitch) - 0.5 * dp, -1.0), np.minimum(np.max(pitch) + 0.5 * dp, 1.0) print('Energy min/max: ', emin, emax) print('Pitch min/max: ', pmin, pmax) nr = grid["nr"] nz = grid["nz"] r = grid["r"] z = grid["z"] rgrid = grid["r2d"] zgrid = grid["z2d"] dr = np.abs(r[1] - r[0]) dz = np.abs(z[1] - z[0]) fdens = np.sum(fbm, axis=(0, 1)) * dE * dp ntot = np.sum(fdens * bmvol) print('Ntotal in phase space: ', ntot) tri = Delaunay(np.vstack( (r2d, z2d)).T) # Triangulation for barycentric interpolation pts = np.array([xx for xx in zip(r2d, z2d)]) itp = NearestNDInterpolator( pts, np.arange(ngrid)) #to find indices outside simplices points = np.array([xx for xx in zip(rgrid.flatten(), zgrid.flatten())]) t = tri.find_simplex(points) denf = np.zeros((nr, nz)) fbm_grid = np.zeros((nenergy, npitch, nr, nz)) for (ind, tt) in enumerate(t): i, j = np.unravel_index(ind, (nr, nz)) if tt == -1: ii = int(itp(r[i], z[j])) denf[i, j] = fdens[ii] fbm_grid[:, :, i, j] = fbm[:, :, ii] else: b = tri.transform[tt, :2].dot( np.transpose(points[ind] - tri.transform[tt, 2])) s = tri.simplices[tt, :] #perform barycentric linear interpolation denf[i, j] = b[0] * fdens[s[0]] + b[1] * fdens[s[1]] + ( 1 - np.sum(b)) * fdens[s[2]] fbm_grid[:, :, i, j] = b[0] * fbm[:, :, s[0]] + b[1] * fbm[:, :, s[1]] + ( 1 - np.sum(b)) * fbm[:, :, s[2]] denf[denf < 0] = 0 # Correct for points outside of seperatrix rmaxis = np.mean(rsurf[:, 0]) zmaxis = np.mean(zsurf[:, 0]) r_sep = rsurf[:, -1] z_sep = zsurf[:, -1] #plt.triplot(r2d,z2d,tri.simplices.copy()) #plt.plot(r2d,z2d,'o') #plt.plot(r_sep,z_sep) #plt.show() x_bdry = r_sep - rmaxis y_bdry = z_sep - zmaxis r_bdry = np.sqrt(x_bdry**2 + y_bdry**2) theta_bdry = np.arctan2(y_bdry, x_bdry) theta_bdry = np.where(theta_bdry < 0.0, theta_bdry + 2 * np.pi, theta_bdry) #[0,2pi] w = np.argsort(theta_bdry) theta_bdry = theta_bdry[w] r_bdry = r_bdry[w] theta_bdry, w = np.unique(theta_bdry, return_index=True) r_bdry = r_bdry[w] itp = interp1d(theta_bdry, r_bdry, 'cubic', fill_value='extrapolate') x_pts = grid["r2d"] - rmaxis y_pts = grid["z2d"] - zmaxis r_pts = np.sqrt(x_pts**2 + y_pts**2) theta_pts = np.arctan2(y_pts, x_pts) theta_pts = np.where(theta_pts < 0.0, theta_pts + 2 * np.pi, theta_pts) #[0,2pi] r_bdry_itp = itp(theta_pts) w = r_pts >= r_bdry_itp + 2 denf[w] = 0.0 fbm_grid[:, :, w] = 0.0 # enforce correct normalization ntot_denf = 2 * np.pi * dr * dz * np.sum(r * np.sum(denf, axis=1)) denf = denf * (ntot / ntot_denf) ntot_fbm = (2 * np.pi * dE * dp * dr * dz) * np.sum( r * np.sum(fbm_grid, axis=(0, 1, 3))) fbm_grid = fbm_grid * (ntot / ntot_denf) fbm_dict = { "type": 1, "time": time, "nenergy": nenergy, "energy": energy, "npitch": npitch, "pitch": pitch, "f": fbm_grid, "denf": denf, "data_source": os.path.abspath(filename) } return fbm_dict
def create_interpolating_functions(self): print('creating interpolating functions...') points = np.vstack((self.x, self.y, self.z)).T self.frho = NearestNDInterpolator(points, self.rho) self.fT = NearestNDInterpolator(points, self.T) self.nH2 = NearestNDInterpolator(points, self.nH2)
def get_v_panel_from_i(cell_param=np. array([ 6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02 ]), I_min: float = -4, I_max: float = 7, L_max: float = 1.2) -> Callable: """Returns a function to calculate voltages of a pv-panel at given Current. Includes reverse bias diode. Requiers physical cell parameters. - I_ph photo current - I_0 reverse saturation current - R_s series resistance - R_sh shunt resistance - v_th thermal voltage kT/e Args: cell_param (tuplelike): Physical Cell Parameters. Defaults to np.array([6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02]). I_min (type): Minimum current to be considered. Defaults to -4. I_max (type): Maximum current to be considered. Defaults to 7. L_max (type): Maximum Photocurrent to be considered in A. Defaults to 1.2. """ def single2v_from_i_with_nan(arg0=np.array([ 6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02 ])) -> Callable: """Return function to calculate voltage from curent of a single diode. Might include Nan. Args: arg0 (type): . Defaults to np.array( [6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02]). Returns: Callable: Calculate voltage from current of a single diode. """ (I_ph, I_0, R_s, R_sh, v_th) = arg0 def v_from_i(I: np.ndarray, L: np.ndarray, t_cell: float): """Return diode voltage for a single pv-cell (diode). Given the physical cell parameters (I_ph, I_0, R_s, R_sh, v_th) and the arguments Args: I (np.ndarray): Current through the cell in A. L (np.ndarray): Photocurrent in A . (Is considered proportional to the irradiance) t_cell (float): Cell temperature. Returns: np.ndarray: Voltage at given current without NAN catch. """ v_pn = pvlib.pvsystem.v_from_i(R_sh, R_s, v_th * (t_cell + 273) / 298.5, np.array(I, ndmin=2).T, I_0, I_ph * np.asarray(L)) return v_pn return v_from_i # Generate interpolation function to guarantee non nan values v_i_with_nan = single2v_from_i_with_nan(arg0=cell_param) I_arr = np.linspace(I_min, I_max, 110) L_arr = np.linspace(0, L_max, 100) T_arr = np.linspace(-20, 80, 100) data = v_i_with_nan(*np.meshgrid(I_arr, L_arr, T_arr)) I_ = np.meshgrid(I_arr, L_arr, T_arr)[0].flatten() L_ = np.meshgrid(I_arr, L_arr, T_arr)[1].flatten() T_ = np.meshgrid(I_arr, L_arr, T_arr)[2].flatten() not_nans = np.argwhere(np.logical_not(np.isnan( data.flatten()))).reshape(-1) v_i_interpolate = NearestNDInterpolator( (I_.flatten()[not_nans], L_.flatten()[not_nans], T_.flatten()[not_nans]), data.flatten()[not_nans]) def single2v_from_i(arg0=np.array([ 6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02 ])) -> Callable: """Return function to calculate voltage from curent of a single diode. Includes NAN catch. Args: arg0 (np.ndarray): Physical cell parameters (I_ph, I_0, R_s, R_sh, v_th). Defaults to np.array([6.48000332e+00, 6.37762333e-10, 8.45318984e-04, 1.65194938e+03, 3.14194723e-02]). Returns: Voltage at given current with NAN catch """ (I_ph, I_0, R_s, R_sh, v_th) = arg0 pvlib_v_from_i = pvlib.pvsystem.v_from_i @functools.lru_cache(maxsize=2048 * 16) def v_from_i(I_cells, Iph, t_cell): """Return diode voltage for a single pv-cell (diode). Given the physical cell parameters (I_ph, I_0, R_s, R_sh, v_th) and the arguments Includes NAN catch. Args: I_cells (tuple): Current through the cell in A. Iph (tuple): Photocurrent in A . (Is considered proportional to the irradiance) t_cell (float): Cell temperature. Returns: np.ndarray: Voltage at given current with NAN catch. """ v_pn = pvlib_v_from_i(R_sh, R_s, v_th * (t_cell + 273) / 298.5, np.array(I_cells, ndmin=2).T, I_0, I_ph * np.asarray(Iph)) if np.isnan(v_pn).any(): return v_i_interpolate( (np.array(I_cells, ndmin=2).T, np.asarray(Iph), t_cell)) else: return v_pn return v_from_i v_from_i = single2v_from_i(cell_param) def calc_t_cell(L: np.ndarray, T_am: float, W_10: float, model: str = 'roof_mount_cell_glassback'): """Wrapper function for cell temperature calculation Args: L (np.ndarray): Irradiance in kw. T_am (float): Ambient temperature. W_10 (float): Windspeed @10 meter. model (str): Defaults to 'roof_mount_cell_glassback'. Returns: float: Cell temperature in Kelvin. """ return pvsystem.sapm_celltemp( np.sum(np.hstack(L)) / np.size(np.hstack(L)) * 1e3, W_10, T_am, )['temp_cell'][0] @functools.lru_cache(maxsize=2048 * 16) def substr_v_P(I_substr: tuple, Iph_substr: tuple, t_cell: float = 0, v_rb: float = -.5): """Returns voltages of a substring in a panel at given currents. Returns voltages of a pv-panel including reverse bias diode given the physical cell parameters (I_ph, I_0, R_s, R_sh, v_th) and the arguments Args: I_substr (tuple): Current through the cell in A. Iph_substr (tuple): Photocurrent in A . t_cell (float, optional): Cell temperature. Defaults to 0. v_rb (float, optional): bypass diode breakthrough voltage in V (reverse bias). Defaults to -.5. Returns: np.ndarray: Voltages at given currents through the substring. """ return np.maximum( np.sum(v_from_i(I_substr, Iph_substr, t_cell), axis=1), v_rb * np.exp(np.asarray(I_substr) / 20)) def v_from_i_panel(args): """Returns voltages of a pv-panel at given currents. Args: args (tuple): ( I_pan : current through the cell in A Iph_panel : List of Photocurrents in A (Is considered proportional to the irradiance) t_amb : Cell temperature in Celsius W_10 : windspeed in 10m ) """ (I_pan, Iph_panel, T_am, W_10, _) = args t_cell = calc_t_cell(Iph_panel, T_am, W_10) return np.asarray( sum( substr_v_P(tuple(I_pan), Iph_substr=tuple(Iph_substring), t_cell=t_cell) for Iph_substring in Iph_panel)) return v_from_i_panel
photTable = aperture_photometry(data, apertures) contam[i + 1] = max(photTable['aperture_sum']) / target_flux contam = np.array(contam) # convert to numpy array else sphinx complains I = interp1d(rad, contam, fill_value=min(contam), bounds_error=False) with open(pfile, 'wb') as fp: pickle.dump(I, fp) # Visibility calculator for instrument.py and make_xml_files pfile = path.join(cache_path, 'visibility_interpolator.p') if not path.isfile(pfile): vfile = path.join(data_path, 'VisibilityTable.csv') visTable = Table.read(vfile) ra_ = visTable['RA'] * 180 / np.pi dec_ = visTable['Dec'] * 180 / np.pi vis = visTable['Efficiency'] I = NearestNDInterpolator((np.array([ra_, dec_])).T, vis) with open(pfile, 'wb') as fp: pickle.dump(I, fp) # T_eff v. G_BP-G_RP colour from # http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt # Version 2019.3.22 pfile = path.join(cache_path, 'Teff_BP_RP_interpolator.p') if not path.isfile(pfile): fT = path.join(here, 'data', 'EEM_dwarf_UBVIJHK_colors_Teff', 'EEM_dwarf_UBVIJHK_colors_Teff.txt') T = Table.read(fT, format='ascii', header_start=-1, fill_values=('...', np.nan)) b_p = np.array(T['Bp-Rp']) # convert to numpy array else sphinx complains
def create_seed_harvest_geoGrid_interpolator_and_read_data( path_to_csv_file, worldGeodeticSys84, geoTargetGrid, ilr_seed_harvest_data): "read seed/harvest dates and apoint climate stations" wintercrop = { "WW": True, "SW": False, "WR": True, "WRa": True, "WB": True, "SM": False, "GM": False, "SBee": False, "SU": False, "SB": False, "SWR": True, "CLALF": False, "PO": False } with open(path_to_csv_file) as _: reader = csv.reader(_) #print "reading:", path_to_csv_file # skip header line next(reader) points = [ ] # climate station position (lat, long transformed to a geoTargetGrid, e.g gk5) values = [] # climate station ids transformer = Transformer.from_crs(worldGeodeticSys84, geoTargetGrid, always_xy=True) prev_cs = None prev_lat_lon = [None, None] #data_at_cs = defaultdict() for row in reader: # first column, climate station cs = int(row[0]) # if new climate station, store the data of the old climate station if prev_cs is not None and cs != prev_cs: llat, llon = prev_lat_lon #r_geoTargetGrid, h_geoTargetGrid = transform(worldGeodeticSys84, geoTargetGrid, llon, llat) r_geoTargetGrid, h_geoTargetGrid = transformer.transform( llon, llat) points.append([r_geoTargetGrid, h_geoTargetGrid]) values.append(prev_cs) crop_id = row[3] is_wintercrop = wintercrop[crop_id] ilr_seed_harvest_data[crop_id]["is-winter-crop"] = is_wintercrop base_date = date(2001, 1, 1) sdoy = int(float(row[4])) ilr_seed_harvest_data[crop_id]["data"][cs]["sowing-doy"] = sdoy sd = base_date + timedelta(days=sdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "sowing-date"] = "0000-{:02d}-{:02d}".format(sd.month, sd.day) esdoy = int(float(row[8])) ilr_seed_harvest_data[crop_id]["data"][cs][ "earliest-sowing-doy"] = esdoy esd = base_date + timedelta(days=esdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "earliest-sowing-date"] = "0000-{:02d}-{:02d}".format( esd.month, esd.day) lsdoy = int(float(row[9])) ilr_seed_harvest_data[crop_id]["data"][cs][ "latest-sowing-doy"] = lsdoy lsd = base_date + timedelta(days=lsdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "latest-sowing-date"] = "0000-{:02d}-{:02d}".format( lsd.month, lsd.day) digit = 1 if is_wintercrop else 0 if crop_id == 'CLALF': digit = 2 hdoy = int(float(row[6])) ilr_seed_harvest_data[crop_id]["data"][cs]["harvest-doy"] = hdoy hd = base_date + timedelta(days=hdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "harvest-date"] = "000{}-{:02d}-{:02d}".format( digit, hd.month, hd.day) ehdoy = int(float(row[10])) ilr_seed_harvest_data[crop_id]["data"][cs][ "earliest-harvest-doy"] = ehdoy ehd = base_date + timedelta(days=ehdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "earliest-harvest-date"] = "000{}-{:02d}-{:02d}".format( digit, ehd.month, ehd.day) lhdoy = int(float(row[11])) ilr_seed_harvest_data[crop_id]["data"][cs][ "latest-harvest-doy"] = lhdoy lhd = base_date + timedelta(days=lhdoy - 1) ilr_seed_harvest_data[crop_id]["data"][cs][ "latest-harvest-date"] = "000{}-{:02d}-{:02d}".format( digit, lhd.month, lhd.day) lat = float(row[1]) lon = float(row[2]) prev_lat_lon = (lat, lon) prev_cs = cs ilr_seed_harvest_data[crop_id]["interpolate"] = NearestNDInterpolator( np.array(points), np.array(values))
def __init__(self, network, lgn_on, lgn_off, target, parameters, name): from numpy import random random.seed(1023) BaseComponent.__init__(self, network, parameters) self.name = name t_size = target.size_in_degrees() or_map = None if self.parameters.or_map: f = open(self.parameters.or_map_location, 'rb') or_map = pickle.load(f, encoding="latin1") * numpy.pi #or_map = pickle.load(f)*numpy.pi*2 #or_map = numpy.cos(or_map) + 1j*numpy.sin(or_map) coords_x = numpy.linspace(-t_size[0] / 2.0, t_size[0] / 2.0, numpy.shape(or_map)[0]) coords_y = numpy.linspace(-t_size[1] / 2.0, t_size[1] / 2.0, numpy.shape(or_map)[1]) X, Y = numpy.meshgrid(coords_x, coords_y) or_map = NearestNDInterpolator(list(zip(X.flatten(), Y.flatten())), or_map.flatten()) phase_map = None if self.parameters.phase_map: f = open(self.parameters.phase_map_location, 'rb') phase_map = pickle.load(f, encoding="latin1") coords_x = numpy.linspace(-t_size[0] / 2.0, t_size[0] / 2.0, numpy.shape(phase_map)[0]) coords_y = numpy.linspace(-t_size[1] / 2.0, t_size[1] / 2.0, numpy.shape(phase_map)[1]) X, Y = numpy.meshgrid(coords_x, coords_y) phase_map = NearestNDInterpolator( list(zip(X.flatten(), Y.flatten()), phase_map.flatten())) for (j, neuron2) in enumerate(target.pop.all()): if or_map: orientation = or_map(target.pop.positions[0][j], target.pop.positions[1][j]) else: orientation = parameters.orientation_preference.next() if phase_map: phase = phase_map(target.pop.positions[0][j], target.pop.positions[1][j]) else: phase = parameters.phase.next() aspect_ratio = parameters.aspect_ratio.next() frequency = parameters.frequency.next() size = parameters.size.next() assert orientation < numpy.pi target.add_neuron_annotation(j, 'LGNAfferentOrientation', orientation, protected=True) target.add_neuron_annotation(j, 'LGNAfferentAspectRatio', aspect_ratio, protected=True) target.add_neuron_annotation(j, 'LGNAfferentFrequency', frequency, protected=True) target.add_neuron_annotation(j, 'LGNAfferentSize', size, protected=True) target.add_neuron_annotation(j, 'LGNAfferentPhase', phase, protected=True) target.add_neuron_annotation(j, 'aff_samples', self.parameters.num_samples.next(), protected=True) if self.parameters.topological: target.add_neuron_annotation(j, 'LGNAfferentX', target.pop.positions[0][j] + parameters.rf_jitter.next(), protected=True) target.add_neuron_annotation(j, 'LGNAfferentY', target.pop.positions[1][j] + parameters.rf_jitter.next(), protected=True) else: target.add_neuron_annotation(j, 'LGNAfferentX', parameters.rf_jitter.next(), protected=True) target.add_neuron_annotation(j, 'LGNAfferentY', parameters.rf_jitter.next(), protected=True) ps = ParameterSet({ 'target_synapses': 'excitatory', 'weight_functions': { 'f1': { 'component': 'mozaik.connectors.vision.GaborArborization', 'params': { 'ON': True, } } }, 'delay_functions': self.parameters.delay_functions, 'weight_expression': 'f1', # a python expression that can use variables f1..fn where n is the number of functions in weight_functions, and fi corresponds to the name given to a ModularConnectorFunction in weight_function ParameterSet. It determines how are the weight functions combined to obtain the weights 'delay_expression': self.parameters.delay_expression, 'short_term_plasticity': self.parameters.short_term_plasticity, 'base_weight': self.parameters.base_weight, 'num_samples': 0, 'annotation_reference_name': 'aff_samples', }) ModularSamplingProbabilisticConnectorAnnotationSamplesCount( network, name + 'On', lgn_on, target, ps).connect() ps['weight_functions.f1.params.ON'] = False ps['base_weight'] = self.parameters.base_weight * self.parameters.off_bias ModularSamplingProbabilisticConnectorAnnotationSamplesCount( network, name + 'Off', lgn_off, target, ps).connect()
def cosmo2radar_data(radar, cosmo_coord, cosmo_data, time_index=0, slice_xy=True, slice_z=False, field_names=['temperature']): """ get the COSMO value corresponding to each radar gate using nearest neighbour interpolation Parameters ---------- radar : Radar the radar object containing the information on the position of the radar gates cosmo_coord : dict dictionary containing the COSMO coordinates cosmo_data : dict dictionary containing the COSMO data time_index : int index of the forecasted data slice_xy : boolean if true the horizontal plane of the COSMO field is cut to the dimensions of the radar field slice_z : boolean if true the vertical plane of the COSMO field is cut to the dimensions of the radar field field_names : str names of COSMO fields to convert (default temperature) Returns ------- cosmo_fields : list of dict list of dictionary with the COSMO fields and metadata """ # debugging # start_time = time.time() x_radar, y_radar, z_radar = _put_radar_in_swiss_coord(radar) (x_cosmo, y_cosmo, z_cosmo, ind_xmin, ind_ymin, ind_zmin, ind_xmax, ind_ymax, ind_zmax) = _prepare_for_interpolation(x_radar, y_radar, z_radar, cosmo_coord, slice_xy=slice_xy, slice_z=slice_z) cosmo_fields = [] for field in field_names: if field not in cosmo_data: warn('COSMO field ' + field + ' data not available') else: values = cosmo_data[field]['data'][time_index, ind_zmin:ind_zmax + 1, ind_ymin:ind_ymax + 1, ind_xmin:ind_xmax + 1].flatten() # find interpolation function interp_func = NearestNDInterpolator((z_cosmo, y_cosmo, x_cosmo), values) del values # interpolate data_interp = interp_func((z_radar, y_radar, x_radar)) # put field field_dict = get_metadata(field) field_dict['data'] = data_interp.astype(float) cosmo_fields.append({field: field_dict}) del data_interp if not cosmo_fields: warn('COSMO data not available') return None return cosmo_fields
else: # Interpolating the mesh coordinates field (which is a vector function space) # into the vector function space equivalent of our solution space gets us # global DOF values (stored in the dat) which are the coordinates of the global # DOFs of our solution space. This is the necessary coordinates field X. print('Getting coordinates field X') Vc = firedrake.VectorFunctionSpace(mesh, V.ufl_element()) X = firedrake.interpolate(mesh.coordinates, Vc).dat.data_ro[:] # Pick the appropriate "interpolate" method needed to create # u_interpolated given the chosen method print(f'Creating {method} interpolator') if method == 'nearest': interpolator = NearestNDInterpolator(xs, u_obs_vals) elif method == 'linear': interpolator = LinearNDInterpolator(xs, u_obs_vals, fill_value=0.0) elif method == 'clough-tocher': interpolator = CloughTocher2DInterpolator(xs, u_obs_vals, fill_value=0.0) elif method == 'gaussian': interpolator = Rbf(xs[:, 0], xs[:, 1], u_obs_vals, function='gaussian') print('Interpolating to create u_interpolated') u_interpolated = firedrake.Function(V, name=f'u_interpolated_{method}_{num_points}') u_interpolated.dat.data[:] = interpolator(X[:, 0], X[:, 1]) # Two terms in the functional - note difference in misfit term! misfit_expr = 0.5 * ((u_interpolated - u) / σ)**2 α = firedrake.Constant(0.5) regularisation_expr = 0.5 * α**2 * inner(grad(q), grad(q))
#Сборка матрицы глобальной СЛАУ for K in Th.simplices: for i in range(len(K)): if K[i] not in Sigma_theta: for j in range(len(K)): if K[j] not in Sigma_theta: G[K[i]][K[j]] += G_k(points[K])[i][j] else: f[K[i]] -= G_k(points[K])[i][j] * f[K[j]] print('...Решение СЛАУ...') #решение глобальной СЛАУ theta = np.linalg.solve(G, f) from scipy.interpolate import interp2d, NearestNDInterpolator, LinearNDInterpolator interp_t = NearestNDInterpolator(points, theta) Xm, Ym = np.meshgrid(z, rho) theta_grid = interp_t(Xm, Ym) #запись в файл в формате .mv2 with open('result.mv2', 'w') as f: f.write(str(len(points)) + ' ' + str(3) + ' ' + str(1) + ' theta \n') for i, p in enumerate(points): f.write( str(i + 1) + ' ' + str(p[1]) + ' ' + str(p[0]) + ' 0' + ' ' + str(theta[i]) + ' \n') f.write( str(len(Th.simplices)) + ' ' + str(3) + ' ' + str(3) + ' BC_id mat_id mat_id_Out \n') for i, K in enumerate(Th.simplices): f.write(
def _update(self): xy = list(self.cache.items()) x = np.array([_[0] for _ in xy]) y = np.array([_[1] for _ in xy]) i = NearestNDInterpolator(x, y) self.interpolator = i
def workflow(valid): """Our workflow""" if valid.month == 1 and valid.day == 1: print("prism_adjust_stage4, sorry Jan 1 processing is a TODO!") return # read prism tidx = daily_offset(valid) nc = ncopen("/mesonet/data/prism/%s_daily.nc" % (valid.year, ), "r") ppt = nc.variables["ppt"][tidx, :, :] # missing as zero ppt = np.where(ppt.mask, 0, ppt) lons = nc.variables["lon"][:] lats = nc.variables["lat"][:] nc.close() (lons, lats) = np.meshgrid(lons, lats) (i, j) = prismutil.find_ij(DEBUGLON, DEBUGLAT) LOG.debug("prism debug point ppt: %.3f", ppt[j, i]) # Interpolate this onto the stage4 grid nc = ncopen( ("/mesonet/data/stage4/%s_stage4_hourly.nc") % (valid.year, ), "a", timeout=300, ) p01m = nc.variables["p01m"] p01m_status = nc.variables["p01m_status"] s4lons = nc.variables["lon"][:] s4lats = nc.variables["lat"][:] i, j = find_ij(s4lons, s4lats, DEBUGLON, DEBUGLAT) # Values are in the hourly arrears, so start at -23 and thru current hour sts_tidx = hourly_offset(valid - datetime.timedelta(hours=23)) ets_tidx = hourly_offset(valid + datetime.timedelta(hours=1)) s4total = np.sum(p01m[sts_tidx:ets_tidx, :, :], axis=0) LOG.debug( "stage4 s4total: %.3f lon: %.2f (%.2f) lat: %.2f (%.2f)", s4total[i, j], s4lons[i, j], DEBUGLON, s4lats[i, j], DEBUGLAT, ) # make sure the s4total does not have zeros s4total = np.where(s4total < 0.001, 0.001, s4total) nn = NearestNDInterpolator((lons.flatten(), lats.flatten()), ppt.flat) prism_on_s4grid = nn(s4lons, s4lats) LOG.debug( "shape of prism_on_s4grid: %s s4lons: %s ll: %.2f s4lats: %s ll: %.2f", np.shape(prism_on_s4grid), np.shape(s4lons), s4lons[0, 0], np.shape(s4lats), s4lats[0, 0], ) multiplier = prism_on_s4grid / s4total LOG.debug( "prism avg: %.3f stageIV avg: %.3f prismons4grid avg: %.3f mul: %.3f", np.mean(ppt), np.mean(s4total), np.mean(prism_on_s4grid), np.mean(multiplier), ) LOG.debug( "Boone IA0807 prism: %.3f stageIV: %.4f prismons4grid: %.3f mul: %.3f", ppt[431, 746], s4total[i, j], prism_on_s4grid[i, j], multiplier[i, j], ) # Do the work now, we should not have to worry about the scale factor for tidx in range(sts_tidx, ets_tidx): oldval = p01m[tidx, :, :] # we threshold the s4total to at least 0.001, so we divide by 24 here # and denote that if the multiplier is zero, then we net zero newval = np.where(oldval < 0.001, 0.00004, oldval) * multiplier nc.variables["p01m"][tidx, :, :] = newval LOG.debug( "adjust tidx: %s oldval: %.3f newval: %.3f", tidx, oldval[i, j], newval[i, j], ) # make sure have data if np.ma.max(newval) > 0: p01m_status[tidx] = 2 else: print(("prism_adjust_stage4 NOOP for time %s[idx:%s]") % ( (datetime.datetime(valid.year, 1, 1, 0) + datetime.timedelta(hours=tidx)).strftime("%Y-%m-%dT%H"), tidx, )) nc.close()
def __init__(self, network, lgn_on, lgn_off, target, parameters,name): MozaikComponent.__init__(self, network,parameters) import pickle self.name = name on = lgn_on.pop off = lgn_off.pop on_weights=[] off_weights=[] t_size = target.size_in_degrees() or_map = None if self.parameters.or_map: f = open(self.parameters.or_map_location,'r') or_map = pickle.load(f)*numpy.pi coords_x = numpy.linspace(-t_size[0]/2.0,t_size[0]/2.0,numpy.shape(or_map)[0]) coords_y = numpy.linspace(-t_size[1]/2.0,t_size[1]/2.0,numpy.shape(or_map)[1]) X,Y = numpy.meshgrid(coords_x, coords_y) or_map = NearestNDInterpolator(zip(X.flatten(),Y.flatten()), or_map.flatten()) phase_map = None if self.parameters.phase_map: f = open(self.parameters.phase_map_location,'r') phase_map = pickle.load(f) coords_x = numpy.linspace(-t_size[0]/2.0,t_size[0]/2.0,numpy.shape(phase_map)[0]) coords_y = numpy.linspace(-t_size[1]/2.0,t_size[1]/2.0,numpy.shape(phase_map)[1]) X,Y = numpy.meshgrid(coords_x, coords_y) phase_map = NearestNDInterpolator(zip(X.flatten(),Y.flatten()), phase_map.flatten()) for (j,neuron2) in enumerate(target.pop.all()): if or_map: orientation = or_map(target.pop.positions[0][j],target.pop.positions[1][j]) else: orientation = parameters.orientation_preference.next()[0] if phase_map: phase = phase_map(target.pop.positions[0][j],target.pop.positions[1][j]) else: phase = parameters.phase.next()[0] # HACK!!! #if j == 0: # orientation = 0 # if target.name=='V1_Exc': # phase = 0 # elif target.name=='V1_Inh': # phase = 0 aspect_ratio = parameters.aspect_ratio.next()[0] frequency = parameters.frequency.next()[0] size = parameters.size.next()[0] if orientation > numpy.pi: print orientation target.add_neuron_annotation(j,'LGNAfferentOrientation',orientation,protected=True) target.add_neuron_annotation(j,'LGNAfferentAspectRatio',aspect_ratio,protected=True) target.add_neuron_annotation(j,'LGNAfferentFrequency',frequency,protected=True) target.add_neuron_annotation(j,'LGNAfferentSize',size,protected=True) target.add_neuron_annotation(j,'LGNAfferentPhase',phase,protected=True) for (i,neuron1) in enumerate(on.all()): if parameters.topological: on_weights.append((i,j,numpy.max((0,gabor(on.positions[0][i],on.positions[1][i],target.pop.positions[0][j],target.pop.positions[1][j],orientation+numpy.pi/2,frequency,phase,size,aspect_ratio))),parameters.delay)) off_weights.append((i,j,-numpy.min((0,gabor(off.positions[0][i],off.positions[1][i],target.pop.positions[0][j],target.pop.positions[1][j],orientation+numpy.pi/2,frequency,phase,size,aspect_ratio))),parameters.delay)) else: on_weights.append((i,j,numpy.max((0,gabor(on.positions[0][i],on.positions[1][i],0,0,orientation+numpy.pi/2,frequency,phase,size,aspect_ratio))),parameters.delay)) off_weights.append((i,j,-numpy.min((0,gabor(off.positions[0][i],off.positions[1][i],0,0,orientation+numpy.pi/2,frequency,phase,size,aspect_ratio))),parameters.delay)) if parameters.probabilistic: on_proj = SpecificProbabilisticArborization(network,lgn_on,target,on_weights,parameters.specific_arborization,'ON_to_[' + target.name + ']') off_proj = SpecificProbabilisticArborization(network,lgn_off,target,off_weights,parameters.specific_arborization,'OFF_to_[' + target.name + ']') else: on_proj = SpecificArborization(network,lgn_on,target,on_weights,parameters.specific_arborization,'ON_to_[' + target.name + ']') off_proj = SpecificArborization(network,lgn_off,target,off_weights,parameters.specific_arborization,'OFF_to_[' + target.name + ']') on_proj.connect() off_proj.connect()
def plot2Ddata( xyz, data, vec=False, nx=100, ny=100, ax=None, mask=None, level=False, figname=None, ncontour=10, dataloc=False, contourOpts={}, levelOpts={}, streamplotOpts={}, scale="linear", clim=None, method="linear", shade=False, shade_ncontour=100, shade_azimuth=-45.0, shade_angle_altitude=45.0, shadeOpts={}, ): """ Take unstructured xy points, interpolate, then plot in 2D :param numpy.ndarray xyz: data locations :param numpy.ndarray data: data values :param bool vec: plot streamplot? :param float nx: number of x grid locations :param float ny: number of y grid locations :param matplotlib.axes ax: axes :param boolean numpy.ndarray mask: mask for the array :param boolean level: boolean to plot (or not) :meth:`matplotlib.pyplot.contour` :param string figname: figure name :param float ncontour: number of :meth:`matplotlib.pyplot.contourf` contours :param bool dataloc: plot the data locations :param dict controuOpts: :meth:`matplotlib.pyplot.contourf` options :param dict levelOpts: :meth:`matplotlib.pyplot.contour` options :param numpy.ndarray clim: colorbar limits :param str method: interpolation method, either 'linear' or 'nearest' :param bool shade: add shading to the plot :param float shade_ncontour: number of :meth:`matplotlib.pyplot.contourf` contours for the shading :param float shade_azimuth: azimuth for the light source in shading :param float shade_angle_altitude: angle altitude for the light source in shading :param dict shaeOpts: :meth:`matplotlib.pyplot.contourf` options """ # Error checking and set vmin, vmax vlimits = [None, None] if clim is not None: vlimits = [np.min(clim), np.max(clim)] for i, key in enumerate(["vmin", "vmax"]): if key in contourOpts.keys(): if vlimits[i] is None: vlimits[i] = contourOpts.pop(key) else: if not np.isclose(contourOpts[key], vlimits[i]): raise Exception( "The values provided in the colorbar limit, clim {} " "does not match the value of {} provided in the " "contourOpts: {}. Only one value should be provided or " "the two values must be equal.".format( vlimits[i], key, contourOpts[key])) contourOpts.pop(key) vmin, vmax = vlimits[0], vlimits[1] # create a figure if it doesn't exist if ax is None: fig = plt.figure() ax = plt.subplot(111) # interpolate data to grid locations xmin, xmax = xyz[:, 0].min(), xyz[:, 0].max() ymin, ymax = xyz[:, 1].min(), xyz[:, 1].max() x = np.linspace(xmin, xmax, nx) y = np.linspace(ymin, ymax, ny) X, Y = np.meshgrid(x, y) xy = np.c_[X.flatten(), Y.flatten()] if vec is False: if method == "nearest": F = NearestNDInterpolator(xyz[:, :2], data) else: F = LinearNDInterpolator(xyz[:, :2], data) DATA = F(xy) DATA = DATA.reshape(X.shape) # Levels definitions dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf) if scale == "log": DATA = np.abs(DATA) # set vmin, vmax if they are not already set vmin = DATA[dataselection].min() if vmin is None else vmin vmax = DATA[dataselection].max() if vmax is None else vmax if scale == "log": levels = np.logspace(np.log10(vmin), np.log10(vmax), ncontour + 1) norm = colors.LogNorm(vmin=vmin, vmax=vmax) else: levels = np.linspace(vmin, vmax, ncontour + 1) norm = colors.Normalize(vmin=vmin, vmax=vmax) if mask is not None: Fmask = NearestNDInterpolator(xyz[:, :2], mask) MASK = Fmask(xy) MASK = MASK.reshape(X.shape) DATA = np.ma.masked_array(DATA, mask=MASK) contourOpts = { "levels": levels, "norm": norm, "zorder": 1, **contourOpts } cont = ax.contourf(X, Y, DATA, **contourOpts) if level: levelOpts = {"levels": levels, "zorder": 3, **levelOpts} CS = ax.contour(X, Y, DATA, **levelOpts) else: # Assume size of data is (N,2) datax = data[:, 0] datay = data[:, 1] if method == "nearest": Fx = NearestNDInterpolator(xyz[:, :2], datax) Fy = NearestNDInterpolator(xyz[:, :2], datay) else: Fx = LinearNDInterpolator(xyz[:, :2], datax) Fy = LinearNDInterpolator(xyz[:, :2], datay) DATAx = Fx(xy) DATAy = Fy(xy) DATA = np.sqrt(DATAx**2 + DATAy**2).reshape(X.shape) DATAx = DATAx.reshape(X.shape) DATAy = DATAy.reshape(X.shape) if scale == "log": DATA = np.abs(DATA) # Levels definitions dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf) # set vmin, vmax vmin = DATA[dataselection].min() if vmin is None else vmin vmax = DATA[dataselection].max() if vmax is None else vmax if scale == "log": levels = np.logspace(np.log10(vmin), np.log10(vmax), ncontour + 1) norm = colors.LogNorm(vmin=vmin, vmax=vmax) else: levels = np.linspace(vmin, vmax, ncontour + 1) norm = colors.Normalize(vmin=vmin, vmax=vmax) if mask is not None: Fmask = NearestNDInterpolator(xyz[:, :2], mask) MASK = Fmask(xy) MASK = MASK.reshape(X.shape) DATA = np.ma.masked_array(DATA, mask=MASK) contourOpts = { "levels": levels, "norm": norm, "zorder": 1, **contourOpts } cont = ax.contourf(X, Y, DATA, **contourOpts) streamplotOpts = {"zorder": 4, "color": "w", **streamplotOpts} ax.streamplot(X, Y, DATAx, DATAy, **streamplotOpts) if level: levelOpts = {"levels": levels, "zorder": 3, **levelOpts} CS = ax.contour(X, Y, DATA, levels=levels, zorder=3, **levelOpts) if shade: def hillshade(array, azimuth, angle_altitude): """ coded copied from https://www.neonscience.org/create-hillshade-py """ azimuth = 360.0 - azimuth x, y = np.gradient(array) slope = np.pi / 2.0 - np.arctan(np.sqrt(x * x + y * y)) aspect = np.arctan2(-x, y) azimuthrad = azimuth * np.pi / 180.0 altituderad = angle_altitude * np.pi / 180.0 shaded = np.sin(altituderad) * np.sin(slope) + np.cos( altituderad) * np.cos(slope) * np.cos( (azimuthrad - np.pi / 2.0) - aspect) return 255 * (shaded + 1) / 2 shadeOpts = { "cmap": "Greys", "alpha": 0.35, "antialiased": True, "zorder": 2, **shadeOpts, } ax.contourf(X, Y, hillshade(DATA, shade_azimuth, shade_angle_altitude), shade_ncontour, **shadeOpts) if dataloc: ax.plot(xyz[:, 0], xyz[:, 1], "k.", ms=2) ax.set_aspect("equal", adjustable="box") if figname: plt.axis("off") fig.savefig(figname, dpi=200) if level: return cont, ax, CS else: return cont, ax