def get_masked_data(self, bbox): """ Apply rectangular mask to multipoint data. :param bbox: (lonmin, lonmax, latmin, latmax) tuple :return: instance of :class:`MultiPointData`, where lons, lats, values and labels are replaced with masked arrays """ import numpy.ma as ma lonmin, lonmax, latmin, latmax = bbox lon_mask = ma.masked_outside(self.lons, lonmin, lonmax) lat_mask = ma.masked_outside(self.lats, latmin, latmax) lons = ma.array(self.lons, mask=lon_mask.mask + lat_mask.mask).compressed() lats = ma.array(self.lats, mask=lon_mask.mask + lat_mask.mask).compressed() Z = ma.array(self.z, lon_mask.mask + lat_mask.mask).compressed() labels = ma.array(self.labels, mask=lon_mask.mask + lat_mask.mask).compressed() if isinstance(self.values, dict): values = {} for key, val in self.values.items(): values[key] = ma.array(val, mask=lon_mask.mask + lat_mask.mask).compressed() else: values = ma.array(values, mask=lon_mask.mask + lat_mask.mask).compressed() style_params = {} for key, val in self.style_params.items(): style_params[key] = ma.array(val, mask=lon_mask.mask + lat_mask.mask).compressed() return MultiPointData(lons, lats, z=Z, values=values, labels=labels, style_params=style_params)
def normalshow(self,icemap): icemap = icemap.reshape(448, 304) cmap = plt.cm.gist_rainbow # plt.cm.jet cmap.set_bad('black',0.8) map1 = ma.masked_outside(icemap,200,450) map2 = ma.masked_outside(icemap,-3,+2) fig = plt.figure(figsize=(8, 10)) ax = fig.add_subplot(111) ax.set_title(self.melttype+' Freeze Date') #Earliest,Latest,Median Freeze Date cax = ax.imshow(map1, interpolation='nearest', vmin=263, vmax=426, cmap=cmap) cbar = fig.colorbar(cax, ticks=[274,305,335,366,397,426]) cbar.ax.set_yticklabels(['Oct','Nov','Dec','Jan', 'Feb', 'Mar']) ax.imshow(map2, interpolation='nearest', vmin=-1, vmax=1, cmap=plt.cm.Greys) ax.set_xlabel('white = usually not icefree', fontsize=14) # never,"usually not","not always" icefree ax.axes.get_yaxis().set_ticks([]) ax.axes.get_xaxis().set_ticks([]) ax.text(2, 8, r'Data: NSIDC', fontsize=10,color='black',fontweight='bold') ax.text(2, 18, r'Map: Nico Sun', fontsize=10,color='black',fontweight='bold') ax.text(-0.04, 0.48, 'https://sites.google.com/site/cryospherecomputing', transform=ax.transAxes,rotation='vertical',color='grey', fontsize=10) fig.tight_layout(pad=1) fig.subplots_adjust(left=0.05) fig.savefig('Special_Animation/'+self.melttype+'_Freeze_Date.png') plt.pause(0.01)
def water_mass_mixing(T, S, indices): """ Computes the water mass mixing percentage based on water mass core indices. The calculations are based on the mixing triagle (Mamayev 1975). Parameters ---------- T : array like Temperature. S : array like Salinity. indices : array like A list/array with the core thermohaline indices for each water mass. Returns ------- m1, m2, m3 : array like Relative composition for water masses 1, 2 and 3. Examples -------- Reference --------- [1] Mamayev, O. I. (Ed.). Temperature -- Salinity Analysis fo World Ocean Waters Elsevier, 1975, 11. Notes ----- This function is based upon code developed by Filipe Fernandes and available at https://ocefpaf.github.io/python4oceanographers/blog/ 2014/03/24/watermass/. """ # Makes sure input parameters are numpy arrays T, S, indices = asarray(T), asarray(S), asarray(indices) # Creates linear system of three equations based on input parameters. a = r_[indices, ones((1, 3))] b = c_[T.ravel(), S.ravel(), ones(T.shape).ravel()].T m = linalg.solve(a, b) # The mixing indices m1 = m[0].reshape(T.shape) m2 = m[1].reshape(T.shape) m3 = m[2].reshape(T.shape) # Mask values outside the mising triangle. m1 = ma.masked_outside(ma.masked_invalid(m1), 0, 1) m2 = ma.masked_outside(ma.masked_invalid(m2), 0, 1) m3 = ma.masked_outside(ma.masked_invalid(m3), 0, 1) return m1, m2, m3
def normalshow(self, icemap): icemap = icemap.reshape(332, 316) cmap = plt.cm.gist_rainbow # plt.cm.jet cmap.set_bad('black', 0.8) map1 = ma.masked_outside(icemap, 55, 300) map2 = ma.masked_outside(icemap, -3, +2) fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111) ax.set_title(self.melttype + ' Freeze Date') #Earliest,Latest,Median Freeze Date cax = ax.imshow(map1, interpolation='nearest', vmin=59, vmax=248, cmap=cmap) cbar = fig.colorbar(cax, ticks=[60, 91, 121, 152, 182, 213, 244]) cbar.ax.set_yticklabels( ['Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']) ax.imshow(map2, interpolation='nearest', vmin=-1, vmax=1, cmap=plt.cm.Greys) ax.set_xlabel('white = usually not icefree', fontsize=14) # never,"usually not","not always" icefree ax.axes.get_yaxis().set_ticks([]) ax.axes.get_xaxis().set_ticks([]) ax.text(2, 8, r'Data: NSIDC', fontsize=10, color='black', fontweight='bold') ax.text(2, 18, r'Map: Nico Sun', fontsize=10, color='black', fontweight='bold') ax.text(-0.04, 0.48, 'https://sites.google.com/site/cryospherecomputing', transform=ax.transAxes, rotation='vertical', color='grey', fontsize=10) fig.tight_layout(pad=1) fig.subplots_adjust(left=0.05) fig.savefig('tempgif/South_' + self.melttype + '_Freeze_Date.png') plt.pause(0.01)
def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]], verbose=True): ''' BBox : [[lllat,lllon], [urlat,urlon]] /* lat: -90 ~ 90 */ /* lon: -180 ~ 180 */ ''' srcPATH = get_path(srcDir, sDTime, eDTime) if len(srcPATH)==0: print "!"*50 print "Warning by %s"%(__file__.split("/")[-1]) print "No file for the time [%s]-[%s]"%(sDTime,eDTime) print "in %s"%(srcDir) print "!"*50 raise IOError ''' gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir) for path in srcPATH] ''' gtrkDim = [get_gtrack_dim(path, self.func_read_vs, self.cached, self.cacheDir, verbose=verbose) for path in srcPATH] DTime, Lat, Lon = zip(*gtrkDim) Granule = deque([]) for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH): mskLat = ma.masked_outside( lat, BBox[0][0], BBox[1][0] ).mask mskLon = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask #mask = (mskLat + mskLon).all(1) + mskTime mask = (mskLat + mskLon).all(0) + mskTime if not mask.all(): idx = ma.array( arange(dtime.size), "int", mask=mask).compressed() Granule.append([path, dtime[idx], lat[idx], lon[idx], idx ]) if verbose==True: print '* [V] ground track dimension (%s): %s'%(self.cached,path) else: if verbose==False: print '* [_] ground track dimension (%s): %s'%(self.cached,path) summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n' \ .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime ) line = '+' + '-'*len(summary[3:]) + '+\n' print line + summary + line return list(Granule)
def mask_array_I05(self, HIGH=273, LOW=230): if hasattr(self, "I04_mask") or hasattr(self, "I05_msk"): new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.masked_outside(self.__I05, LOW, HIGH).mask) new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.masked_outside(self.__I05, LOW, HIGH).mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) else: self.I05_mask = npma.masked_outside(self.__I05, LOW, HIGH) self.I04_mask = npma.array(self.__I04, mask=self.I05_mask.mask)
def _Veggspretting(self): a = ma.masked_outside( self.pos, 0, self.L) # Finner de partiklene som er uttafor boksen self.hast += -2 * self.hast * a.mask # Endrer hastigheten til de partiklene som er utenfor boksen med 180 grader b = ma.masked_inside(self.pos[:, 0], self.L / 4, 3 * self.L / 4) # Finner alle partikler som treffer hullet c = ma.masked_inside(self.pos[:, 1], self.L / 4, 3 * self.L / 4) e = ma.masked_outside(self.pos[:, 2], 0, self.L) #print(Sum(b.mask*c.mask * e.mask)) self.hullteller = Sum(e.mask * b.mask * c.mask) self.bevegelsesmengde = abs( Sum(self.m_H * self.hast[:, 0] * b.mask * c.mask * e.mask))
def grididx(self, xpts, ypts): # Get the indices of the grid for each point iLon = np.floor(self.nx() * (xpts - self.x0) / (self.x1 - self.x0)) iLon = iLon.astype(int) iLat = np.floor(self.ny() * (ypts - self.y0) / (self.y1 - self.y0)) iLat = iLat.astype(int) # Set nan where outside of grid iLat = ma.masked_outside(iLat, 0, self.ny() - 1) iLon = ma.masked_outside(iLon, 0, self.nx() - 1) return iLon, iLat
def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90, -180], [90, 180]], thresh=0.001): ''' BBox : [[lllat,lllon], [urlat,urlon]] /* lat: -90 ~ 90 */ /* lon: -180 ~ 180 */ ''' srcPATH = get_path(srcDir, sDTime, eDTime) gtrkDim = [ get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir) for path in srcPATH ] DTime, Lat, Lon = list(zip(*gtrkDim)) Granule = [] for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH): mskLat = ma.masked_outside(lat, BBox[0][0], BBox[1][0]).mask mskLon = ma.masked_outside(lon, BBox[0][1], BBox[1][1]).mask mskTime = ma.masked_outside(dtime, sDTime, eDTime).mask #mask = (mskLat + mskLon).any(1) + mskTime mask = (mskLat + mskLon).all(1) + mskTime if not mask.all(): idx = ma.array(arange(dtime.size), 'int', mask=mask).compressed() Granule.append([path, dtime[idx], lat[idx], lon[idx], idx]) print('* [V] ground track dimension (%s): %s' % (self.cached, path)) else: print('* [_] ground track dimension (%s): %s' % (self.cached, path)) summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n' \ .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime ) line = '+' + '-' * len(summary[3:]) + '+\n' print(line + summary + line) return Granule
def onselect(ymin, ymax): indmin = np.argmin(abs(eV_To_nm / self.wavelenght - ymin)) indmax = np.argmin(abs(eV_To_nm / self.wavelenght - ymax)) # ## thisx = x[indmin:indmax] if abs(indmax - indmin) < 1: return # indmin, indmax = np.sort((indmax,indmin)) # thiswave = wavelenght[indmin:indmax] # mask = np.zeros(hypSpectrum.shape) # mask[:,:,indmin:indmax] = 1 # hypimage=np.sum(hypSpectrum*mask,axis=2) # hypimage -= hypimage.min() # lumimage.set_array(hypimage) self.wavelenght = ma.masked_outside(self.wavelenght, eV_To_nm / ymin, eV_To_nm / ymax) hypmask = np.resize(self.wavelenght.mask, self.hypSpectrum.shape) self.hypSpectrum = ma.masked_array(self.hypSpectrum, mask=hypmask) self.hypimage = np.sum(self.hypSpectrum, axis=2) self.hypimage -= self.hypimage.min() self.lumimage.set_array(self.hypimage) self.cx.set_ylim(eV_To_nm / self.wavelenght.max(), eV_To_nm / self.wavelenght.min()) self.fig.canvas.draw_idle()
def makeFluxSigMask(flux=None, minThresh=2, maxThresh=5): """ Compute the mean total integrated flux value and its standard deviation. Find all pixels with a flux in between min/max thresholds and mask them. Parameters ---------- fluxImage: array_like The 2D array of the total integrated fluxes. min/maxThresh: int Sigma limit thresholds for the min/max. Return ------ Boolean mask. """ sigma = ma.std(flux) ave = ma.mean(flux) if sigma > ave: intervalMin = ave else: intervalMin = ave - (minThresh * sigma) intervalMax = ave + (maxThresh * sigma) maskedOutside = ma.masked_outside(flux, intervalMin, intervalMax) maskedZeros = ma.masked_where(maskedOutside == 0, maskedOutside, copy=False) return ma.getmask(maskedZeros)
def integ(self, nu_min, nu_max, t, freqs, flux): '''This function returns the integrated light curve in the given frequency band[nu_min, nu_max] ''' licur = np.zeros_like(t) if nu_min < freqs[0]: print("nu_min =", nu_min, "\nminimum frequency in array =", freqs[0]) return licur if nu_max > freqs[-1]: print("nu_max =", nu_max, "\nmaximum frequency in array=", freqs[-1]) return licur if nu_max == nu_min: for i in range(t.size): licur[i] = np.exp( np.interp(np.log(nu_max), np.log(freqs), np.log(flux[i, :], where=(flux[i, :] != 0.0)))) else: nu_mskd = ma.masked_outside(freqs, nu_min, nu_max) nus = nu_mskd.compressed() Fnu = flux[:, ~nu_mskd.mask] / nus for i in range(t.size): # NOTE: The integral is logarithmic, therfore the nus multiplying Fnu licur[i] = sci_integ.simps(nus * Fnu[i, :], x=np.log(nus)) return licur
def Fill2ThetaAzimuthMap(masks, TA, tam, image): 'Needs a doc string' Zlim = masks['Thresholds'][1] rings = masks['Rings'] arcs = masks['Arcs'] TA = np.dstack((ma.getdata(TA[1]), ma.getdata(TA[0]), ma.getdata(TA[2]))) #azimuth, 2-theta, dist tax, tay, tad = np.dsplit(TA, 3) #azimuth, 2-theta, dist**2/d0**2 for tth, thick in rings: tam = ma.mask_or( tam.flatten(), ma.getmask( ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.), tth + thick / 2.))) for tth, azm, thick in arcs: tamt = ma.getmask( ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.), tth + thick / 2.)) tama = ma.getmask(ma.masked_inside(tax.flatten(), azm[0], azm[1])) tam = ma.mask_or(tam.flatten(), tamt * tama) taz = ma.masked_outside(image.flatten(), int(Zlim[0]), Zlim[1]) tabs = np.ones_like(taz) tam = ma.mask_or(tam.flatten(), ma.getmask(taz)) tax = ma.compressed(ma.array(tax.flatten(), mask=tam)) #azimuth tay = ma.compressed(ma.array(tay.flatten(), mask=tam)) #2-theta taz = ma.compressed(ma.array(taz.flatten(), mask=tam)) #intensity tad = ma.compressed(ma.array(tad.flatten(), mask=tam)) #dist**2/d0**2 tabs = ma.compressed(ma.array( tabs.flatten(), mask=tam)) #ones - later used for absorption corr. return tax, tay, taz, tad, tabs
def computeAveragesUsingNumpy(): global sizeX, sizeY, sizeZ flattenedArrays = [] for fileName in fileNames: fpath = os.path.join(basepath, fileName) print('processing %s' % fpath) year = fileName.split('_')[-1][:-4] dataset = gdal.Open(fpath) sumArray = ma.zeros((dataset.RasterYSize, dataset.RasterXSize)) total = 0 count = 0 numBands = dataset.RasterCount for bandId in range(numBands): band = ma.masked_outside(dataset.GetRasterBand(bandId + 1).ReadAsArray(), VALUE_RANGE[0], VALUE_RANGE[1]) sumArray += band sumArray /= numBands total = ma.sum(ma.sum(sumArray)) count = sumArray.count() minCell = ma.min(sumArray) maxCell = ma.max(sumArray) sizeX = dataset.RasterXSize sizeY = dataset.RasterYSize flattenedArrays.append(np.ndarray.flatten(sumArray[::-1,:], 0).astype(np.dtype(np.int32))) sizeZ = len(flattenedArrays) return np.ma.concatenate(flattenedArrays)
def Fph(nu_min, nu_max, freqs, Fnu): '''Calculate the photon flux for the frequency band [nu_min, nu_max] from a given flux density. Input: nu_min, nu_max: scalars freqs: array Fnu: array Output: Photon flux: scalar photon flux spectral indices: array ''' if nu_min < freqs[0] or nu_max > freqs[-1]: return print('Error: nu_min and nu_max outside frequencies array') nu_mskd = ma.masked_outside(freqs, nu_min, nu_max) nus = nu_mskd.compressed() flux = Fnu[~nu_mskd.mask] / nus num_nus = len(nus) integral = 0.0 pwli = pwlf.PwlInteg() for i in range(num_nus - 1): if (flux[i] > 1e-100) & (flux[i + 1] > 1e-100): s = -np.log(flux[i + 1] / flux[i]) / np.log(nus[i + 1] / nus[i]) integral += flux[i] * nus[i] * pwli.P(nus[i + 1] / nus[i], s) return nus[:-1], flux[:-1], integral / mbs.hPlanck
def get_response(geojson, region, local=False): if local: config = util.get_config('config.local.yml') else: config = util.get_config() data_source = config['vrt'][region] dataset_names = util.get_dataset_names(config, region) with Env(GDAL_DISABLE_READDIR_ON_OPEN=True): with open(data_source) as src: for feature in geojson['features']: geom_latlng = deepcopy(feature['geometry']) proj_string = src.profile['crs'].to_proj4() transformer = Transformer.from_crs('epsg:4326', proj_string, always_xy=True) geom_transformed = util.transform_geom(geom_latlng, transformer) geom = [geom_transformed] out_image, out_transform = mask(src, geom, pad=True, crop=True) # TODO this might need to be refactored arr = masked_outside(out_image, 0.0, 100.0) if 'properties' not in feature: feature['properties'] = {} feature['properties']['mean'] = {} for i in range(0, len(arr)): index_name = dataset_names[i] mean = get_zonal_stat(arr[i]) feature['properties']['mean'][index_name] = mean return geojson
def mask_unphysical(self, data): """Mask data array where values are outside physically valid range.""" try: return ma.masked_outside(data, self.valid_range[0], self.valid_range[1]) except AttributeError: return data
def update(self, ymin=None, ymax=None): if ((ymin is None) & (ymax is None)): self.wavelenght = ma.masked_array(self.wavelenght.data, mask=False) else: self.wavelenght = ma.masked_outside(self.wavelenght.data, eV_To_nm / ymax, eV_To_nm / ymin) self.Emin = eV_To_nm / self.wavelenght.max() self.Emax = eV_To_nm / self.wavelenght.min() self.Spectrum.set_limitbar(self.wavelenght.min(), self.wavelenght.max()) hypmask = np.resize(self.wavelenght.mask, self.hypSpectrum.shape) self.hypSpectrum = ma.masked_array(self.hypSpectrum.data, mask=hypmask) self.hypimage = np.nansum(self.hypSpectrum, axis=2) self.lumimage.set_array(self.hypimage) self.linescan = np.sum(self.hypSpectrum, axis=0) if self.normalize: self.linescan /= self.linescan.max() #self.im.set_array((self.linescan.T[:,:]).ravel())#flat shading #self.im.set_array((self.linescan.T).ravel())#gouraud shading self.linescanmap.set_clim(vmin=np.nanmin(self.linescan), vmax=np.nanmax(self.linescan)) self.hyperspectralmap.set_clim(vmin=np.nanmin(self.hypimage), vmax=np.nanmax(self.hypimage)) self.cx.set_ylim(eV_To_nm / self.wavelenght.max(), eV_To_nm / self.wavelenght.min()) self.im.set_norm(self.linescanmap.norm) self.fig.canvas.draw_idle() self.Spectrum.update()
def calcBBScore(self, field, attempt=-1): tm = self.ClasData[attempt] mean = tm['BBVpp'].mean() lothresh = mean * self.MeasSettings['MinVppFracMean'][0] hithresh = mean * self.MeasSettings['MaxVppFracMean'][0] dat = [] for r in range(0, len(self.meas_rows)): v = tm[tm['Row'] == r][field].values v_ma = ma.masked_outside(v, lothresh, hithresh) dat.append(v_ma) average = ma.masked_array((dat)).mean(axis=0) bb = ma.compressed(average) mask = average.mask scores = [] for c in self.Candidates: field = c + '_bbvpp' k = ma.compressed( ma.masked_array(self.bbvpp_kernels[field][0:len(average)], mask=mask)) # print('len(bb): ', len(bb), ' len(k): ', len(k)) score = np.corrcoef(bb, k)[0, 1] scores.append(score) self.BBScores = scores #self.nBB = ma.count(average) return dat, average, scores
def corte_latitud(lat, dim, dimz, imin, imax): corte= np.zeros((dimz, dim)) step=1 z0=0 z1=dimz-1 if tipo==0: z0=dimz-1 z1=0 step=-1 if variables[1][propiedades[0]]==-1: for i in range (z0,z1,step): aux=dataset.variables["R1"][propiedades[1],1,i,:,lat] corte[i,:]=aux else: for i in range (z0,z1,step): aux=dataset.variables[variables[0][propiedades[0]]][propiedades[1],i,:,lat] corte[i,:]=aux v_m= np.nanmin(corte[:]) try: corte[ corte==v_m ] = np.nan except: print("fallo") v1b = masked_inside(corte,imin,imax) v1a = masked_outside(corte,imin,imax) fig,ax = plt.subplots() fig.tight_layout pa = ax.imshow(v1a,interpolation='nearest',cmap = matplotlib.cm.jet, vmin= min_range.value, vmax= max_range.value) pb = ax.imshow(v1b,interpolation='nearest',cmap=matplotlib.cm.Pastel1, vmax= 3, vmin= 3) cbar = plt.colorbar(pa,shrink=0.25)
def mask_tiles_to_bbox(self, min_lat, max_lat, min_lon, max_lon, tiles): for tile in tiles: tile.latitudes = ma.masked_outside(tile.latitudes, min_lat, max_lat) tile.longitudes = ma.masked_outside(tile.longitudes, min_lon, max_lon) # Or together the masks of the individual arrays to create the new mask data_mask = ma.getmaskarray(tile.times)[:, np.newaxis, np.newaxis] \ | ma.getmaskarray(tile.latitudes)[np.newaxis, :, np.newaxis] \ | ma.getmaskarray(tile.longitudes)[np.newaxis, np.newaxis, :] tile.data = ma.masked_where(data_mask, tile.data) return tiles
def imshow_rango(v1, imin, imax): #Se crea un maskarray que contenga los valores dentro del rango y otro que no, para pintarlos con rangos de colores distintos v1b = masked_inside(v1, imin, imax) v1a = masked_outside(v1, imin, imax) fig, ax = plt.subplots() fig.tight_layout pa = ax.imshow(v1a, interpolation='nearest', cmap=matplotlib.cm.jet, vmin=min_range.value, vmax=max_range.value) pb = ax.imshow(v1b, interpolation='nearest', cmap=matplotlib.cm.Pastel1, vmax=3, vmin=3) cbar = plt.colorbar(pa, shrink=0.25) try: cbar.set_label(dataset.variables[variables[0][propiedades[0]]].units) except: cbar.set_label("Unidades no especificadas") plt.title(variables[0][propiedades[0]]) plt.ylabel("Latitude") plt.xlabel("Longitude") plt.show() return fig
def mask_tiles_to_bbox_and_time(self, min_lat, max_lat, min_lon, max_lon, start_time, end_time, tiles): for tile in tiles: tile.times = ma.masked_outside(tile.times, start_time, end_time) tile.latitudes = ma.masked_outside(tile.latitudes, min_lat, max_lat) tile.longitudes = ma.masked_outside(tile.longitudes, min_lon, max_lon) # Or together the masks of the individual arrays to create the new mask data_mask = ma.getmaskarray(tile.times)[:, np.newaxis, np.newaxis] \ | ma.getmaskarray(tile.latitudes)[np.newaxis, :, np.newaxis] \ | ma.getmaskarray(tile.longitudes)[np.newaxis, np.newaxis, :] tile.data = ma.masked_where(data_mask, tile.data) tiles[:] = [tile for tile in tiles if not tile.data.mask.all()] return tiles
def mixing(self, T, S, inds): self.T = T self.S = S self.inds = inds self.jumlah_masa_air = inds.shape[1] self.l = np.zeros((S.shape[0], S.shape[1], self.jumlah_masa_air)) if self.jumlah_masa_air == 2: a = np.r_[self.inds] b = np.c_[self.T.ravel(), self.S.ravel()].T elif self.jumlah_masa_air == 3: a = np.r_[self.inds, np.ones((1, self.jumlah_masa_air))] b = np.c_[self.T.ravel(), self.S.ravel(), np.ones(self.T.shape).ravel()].T else: print("------------------------------------------------------") print("-----------ERROR JUMLAH MASA AIR----------------------") print("-----------Mohon Masukan 2 atau 3 masa air------------") print("------------------------------------------------------") import sys sys.exit() m = np.linalg.solve(a, b) for i in range(self.jumlah_masa_air): self.l[0:, 0:, i] = m[i].reshape(T.shape) self.l[0:, 0:, i] = ma.masked_outside(ma.masked_invalid(self.l[0:, 0:, i]), 0, 1) self.l[0:, 0:, i] = 100 * self.l[0:, 0:, i] return self.l
def assign_xcoords(self, xpts): """Assign coordinates on the x axis to a grid index. The first grid index is 0. Returns a list of indices as a masked array. """ ix = np.floor(self.nx * (xpts - self.x0) / (self.x1 - self.x0)) return ma.masked_outside(ix.astype(int), 0, self.nx - 1)
def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]], thresh=0.001): ''' BBox : [[lllat,lllon], [urlat,urlon]] /* lat: -90 ~ 90 */ /* lon: -180 ~ 180 */ ''' srcPATH = get_path(srcDir, sDTime, eDTime) gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir) for path in srcPATH] DTime, Lat, Lon = zip(*gtrkDim) Granule = [] for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH): mskLat = ma.masked_outside( lat, BBox[0][0], BBox[1][0] ).mask mskLon = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask #mask = (mskLat + mskLon).any(1) + mskTime mask = (mskLat + mskLon).all(1) + mskTime if not mask.all(): idx = ma.array( arange(dtime.size), 'int', mask=mask).compressed() Granule.append([path, dtime[idx], lat[idx], lon[idx], idx ]) print '* [V] ground track dimension (%s): %s'%(self.cached,path) else: print '* [_] ground track dimension (%s): %s'%(self.cached,path) summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n' \ .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime ) line = '+' + '-'*len(summary[3:]) + '+\n' print line + summary + line return Granule
def eval(self, x, fill_value=0): """Return the histogram value at `x`.""" mbins = ma.masked_outside(self.findbin(x), 0, self.hist.size-1) value = ma.masked_where(mbins.mask, self.hist[mbins.filled(0)]) if np.iterable(x): return value.filled(fill_value) else: return value.filled(fill_value).item()
def Find_k_nearest_neighbors(u, X_train, Y_train, K, Weights): distances = np.sqrt(np.dot((X_train - u)**2, Weights)) indices_sort_distances = np.argsort(distances, axis=0) mask_k_nearest_neighbors = ma.masked_outside(indices_sort_distances, K, float("Inf")).mask distances_k_nearest_neighbors = distances[mask_k_nearest_neighbors] Label_k_nearest_neighbors = Y_train[mask_k_nearest_neighbors] return distances_k_nearest_neighbors, Label_k_nearest_neighbors
def eval(self, x, fill_value=0): """Return the histogram value at `x`.""" mbins = ma.masked_outside(self.findbin(x), 0, self.hist.size - 1) value = ma.masked_where(mbins.mask, self.hist[mbins.filled(0)]) if np.iterable(x): return value.filled(fill_value) else: return value.filled(fill_value).item()
def assign_ycoords(self, ypts): """Assign coordinates on the y-axis to a grid index. The first grid index is 0. Returns a list of indices as a numpy masked array, defining whether the points are inside the grid. """ iy = np.floor(self.ny * (ypts - self.y0) / (self.y1 - self.y0)) return ma.masked_outside(iy.astype(int), 0, self.ny - 1)
def _jprimes(x, i, x_bounds=None): """ Helper function to return the j' indices for the master curve fit This function is a helper function for :py:func:`quality`. It is not supposed to be called directly. Parameters ---------- x : mapping to ndarrays The x values. i : int The row index (finite size index) x_bounds : 2-tuple, optional bounds on x values Returns ------- ret : mapping to ndarrays Has the same keys and shape as `x`. Its element ``ret[i'][j]`` is the j' such that :math:`x_{i'j'} \leq x_{ij} < x_{i'(j'+1)}`. If no such j' exists, the element is np.nan. Convert the element to int to use as an index. """ j_primes = -np.ones_like(x) try: x_masked = ma.masked_outside(x, x_bounds[0], x_bounds[1]) except (TypeError, IndexError): x_masked = ma.asanyarray(x) k, n = x.shape # indices of lower and upper bounds edges = ma.notmasked_edges(x_masked, axis=1) x_lower = np.zeros(k, dtype=int) x_upper = np.zeros(k, dtype=int) x_lower[edges[0][0]] = edges[0][-1] x_upper[edges[-1][0]] = edges[-1][-1] for i_prime in range(k): if i_prime == i: j_primes[i_prime][:] = np.nan continue jprimes = np.searchsorted(x[i_prime], x[i], side='right').astype(float) - 1 jprimes[np.logical_or(jprimes < x_lower[i_prime], jprimes >= x_upper[i_prime])] = np.nan j_primes[i_prime][:] = jprimes return j_primes
def ret_extract_a1mask(Lat=None,Lon=None,dtime=None,BBox=None,sDTime=None,eDTime=None): if type(Lat) !=bool: mskLat = ma.masked_outside( Lat, BBox[0][0], BBox[1][0] ).mask else: mskLat = False if type(Lon) !=bool: mskLon = ma.masked_outside( Lon, BBox[0][1], BBox[1][1] ).mask else: mskLon = False if type(dtime) !=bool: mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask else: mskTime = False mask = (mskLat + mskLon).all(1) + mskTime return mask
def clip(self, value): """ Remove outliers and replace them with NaN :param value: array of numbers :return: masked_array, out """ # Masked array out = ma.masked_outside(value, self.ref_domain[0], self.ref_domain[1]) # Replace outliers with NaN return ma.filled(out, np.nan)
def make_box(ra, dec, a, ralist, declist, col=False): decmin, decmax, ramin, ramax = dec - a, dec + a, ra - a, ra + a ra_range = ma.masked_outside(ralist, ramin, ramax) dec_range = ma.masked_outside(declist, decmin, decmax) declist = dec_range[np.where((ra_range.mask == False) & (dec_range.mask == False))] ralist = ra_range[np.where((ra_range.mask == False) & (dec_range.mask == False))] if col: col = col[np.where((ra_range.mask == False) & (dec_range.mask == False))] return (ralist, declist, col) else: return (ralist, declist) # # Popen("echo Hello") # plt.scatter([1,2,5],[2,3,3]) # plt.show()
def ueval(self, x, fill_value=0, fill_err=0): """Return the histogram value and uncertainty at `x`.""" mbins = ma.masked_outside(self.findbin(x), 0, self.hist.size-1) value, err = ma.masked_where(mbins.mask, self.hist[mbins.filled(0)]), \ ma.masked_where(mbins.mask, self.errs[mbins.filled(0)]) if np.iterable(x): return uarray((value.filled(fill_value), err.filled(fill_err))) else: return ufloat((value.filled(fill_value).item(), \ err.filled(fill_err).item()))
def apply_range(cube_coord): if isinstance(cube_coord, iris.cube.Cube): data = cube_coord.data.squeeze() elif isinstance(cube_coord, (iris.coords.AuxCoord, iris.coords.Coord)): data = cube_coord.points.squeeze() actual_range = cube_coord.attributes.get("actual_range") if actual_range is not None: vmin, vmax = actual_range data = ma.masked_outside(data, vmin, vmax) return data
def Fill2ThetaMap(data,TA,image): import numpy.ma as ma Zmin = data['Zmin'] Zmax = data['Zmax'] tax,tay = TA # 2-theta & yaxis taz = ma.masked_outside(image.flatten()-Zmin,0,Zmax-Zmin) tam = ma.getmask(taz) tax = ma.compressed(ma.array(tax.flatten(),mask=tam)) tay = ma.compressed(ma.array(tay.flatten(),mask=tam)) taz = ma.compressed(ma.array(taz.flatten(),mask=tam)) del(tam) return tax,tay,taz
def mk_epc_id_25bins(a3epc, a2pc_edge): NEM_USE = 3 ny,nx,nz = a3epc.shape a2id_db = np.zeros([ny,nx],np.int32) for iem in range(NEM_USE): a1bin = a2pc_edge[iem] a2idTmp = np.digitize(a3epc[:,:,iem], a1bin, right=False) - 1 a2idTmp = ma.masked_outside(a2idTmp,0,24) a2id_db = a2id_db + a2idTmp*pow(25, NEM_USE-1-iem) a2id_db = a2id_db.filled(-9999) return a2id_db
def eucdistance_numexpr(self,f,coords,x_grid,y_grid,z_grid,xt,yt,zt): """ Euclidean distance between a point and the closest point of the surface """ NS = 10 #Number of voxel subdivisions # coords is coordinates in the tree position = self.pos(coords) mindist = 1000 minxpos = (0,0,0) # position is coordinates in the plane. f000 = f[position[0],position[2],position[4]] f001 = f[position[0],position[2],position[5]] f010 = f[position[0],position[3],position[4]] f011 = f[position[0],position[2],position[5]] f100 = f[position[1],position[2],position[4]] f101 = f[position[1],position[2],position[5]] f110 = f[position[1],position[3],position[4]] f111 = f[position[1],position[2],position[5]] fac0 = f000-f100 fac1 = f000-f010-f100+f110 fac2 = f000-f001-f100+f101 fac3 = f000-f001-f010+f011-f100+f101+f110-f111 fac4 = f000+f001+f010-f011 x,y = np.meshgrid(np.linspace(0.0,1.0,NS), np.linspace(0.0,1.0,NS)) #numexpr z = ne.evaluate( "(fac0*x-(fac1*x-f000+f010)*y-f000)/(fac2*x-(fac3*x-fac4)*y-f000+f001)") scalx = x_grid[position[1]]-x_grid[position[0]] scaly = y_grid[position[3]]-y_grid[position[2]] scalz = z_grid[position[5]]-z_grid[position[4]] x0 = x_grid[position[0]] y0 = y_grid[position[2]] z0 = z_grid[position[4]] dist = ne.evaluate( '(x*scalx+x0-xt)**2+(y*scaly+y0-yt)**2+(z*scalz+z0-zt)**2') mdist= ma.masked_outside(dist,0.0,1.0) #Find minimum distance mindist = mdist.min() #ACHTUNG. These indices may be switched (miniy,minix) = np.unravel_index(mdist.argmin(),(NS,NS)) minxpos = (x[miniy,minix]*scalx+x0, y[miniy,minix]*scaly+y0, z[miniy,minix]*scalz+z0) return (minxpos,mindist)
def clean_outliers(self): """ Function to remove outliers. Parameters ---------- self.outlier_perc : integer Percentile value for mstats.scoreatpercentile function. Mask all values greater than this value. """ # Outliers using percentiles - num_rows * [min, max] outlier_all = ma.array([[mstats.scoreatpercentile(self.xs[i, :], 100 - self.outlier_perc), mstats.scoreatpercentile(self.xs[i, :], self.outlier_perc)] for i in xrange(self.rows_N)]) self.xs = ma.array([ma.hstack((ma.masked_outside(self.xs[i, :-self.keep_n_values], outlier_all[i, 0], outlier_all[i, 1]), self.xs[i, -self.keep_n_values:])) for i in xrange(self.rows_N)])
def hotspots(timestep, location, distance=5, zFilter_lt=1, minpixels=5, task_id=None, task_name=None): ''' Run a hotspot analysis at a given timestep and location ''' logging.info('Startting...') start=time.time() filename = stageData(timestep,location) raster = gdal.Open(filename) rarray = raster.ReadAsArray() xyz = pandas.DataFrame(gdal2points(filename)) logging.info('Cleaning up files...') cleanup(filename) y = numpy.array(xyz.z) y_in = ma.masked_outside( (10**((14.54 + y)/ 10.0)), 60, 2500) ij = ma.masked_array(xyz.ij, mask=y_in.mask).compressed() data = numpy.array(ij.tolist()) logging.info('Doing GetisOrd...') Z_array = applyResults(GetisOrd(y_in.compressed(),data, distance).Zs, xyz.ij, y_in.mask, rarray.shape) Z_array_ma = ma.masked_where(Z_array < zFilter_lt, Z_array) group, idx = ndimage.measurements.label(ma.filled(Z_array_ma,0)) group = ma.masked_where(Z_array < zFilter_lt, group) orig_xy = numpy.array(xyz.xy).reshape(rarray.shape) outdata = [] for i in range(idx): if idx > 0: points = ma.masked_where(group != i, orig_xy) values = ma.masked_where(group != i, rarray).compressed() zvalues = ma.masked_where(group != i, Z_array).compressed() if len(values) >= minpixels: stats = { 'n': int(len(values)), 'max': float(values.max()), 'min': float(values.min()), 'mean': float(values.mean()), 'std': float(values.std()), 'range': float(values.max() - values.min()), 'sum': float(values.sum()), 'median': float(numpy.median(values)), 'zmax': float(zvalues.max()), 'zmin': float(zvalues.min()), 'zmean': float(zvalues.mean()), 'zmedian': float(numpy.median(zvalues)), 'zstd': float(zvalues.std()), 'tsid': i, 'task_id': task_id } cvhull = MultiPoint( list(points.compressed())).convex_hull stats.update({'timestep':timestep, 'loc': location}) outdoc = json.loads(geojson.dumps(geojson.Feature(id=int(str(timestep.replace('.','')) + str(i).zfill(3)), geometry=json.loads(geojson.dumps(cvhull)), properties=stats))) con['bioscatter']['pysal'].insert(outdoc) logging.info('It took us %s seconds to process GetisOrd*' % (time.time() - start)) return 'Success'
def eval(self, x, fill_value=0): """ Return the histogram value at `x`. See findbin(). """ bins = self.findbin(x) mbins = [ma.masked_outside(bins[i], 0, self.hist[i].size-1, False) \ for i in range(len(bins))] value = ma.array(\ self.hist[[mbins[i].filled(0) for i in range(len(mbins))]], mask=np.logical_or.reduce([ma.getmaskarray(mbins[i]) \ for i in range(len(mbins))])) return value.filled(fill_value)
def filter_offscreen(data_wide, limit=0, fill=np.nan): """ Off-screen data filter. Replaces invalid samples with /fill/ @author: Raimondas Zemblys @email: [email protected] """ data = np.copy(data_wide) for _dir in ['x', 'y']: interval = np.min(data['_'.join(('target_angle', _dir))])-limit, \ np.max(data['_'.join(('target_angle', _dir))])+limit for eye in parseTrackerMode(data['eyetracker_mode'][0]): mask = ma.getmask(ma.masked_outside(data['_'.join((eye, 'angle', _dir))], interval[0], interval[1])) data['_'.join((eye, 'angle', _dir))][mask]=fill data['_'.join((eye, 'gaze', _dir))][mask]=fill return data
def ueval(self, x, fill_value=0, fill_err=0): """ Return the histogram value and uncertainty at `x`. See findbin(). """ bins = self.findbin(x) mbins = [ma.masked_outside(bins[i], 0, self.hist[i].size-1, False) \ for i in range(len(bins))] valuemask = np.logical_or.reduce([ma.getmaskarray(mbins[i]) \ for i in range(len(mbins))]) filledbins = [mbins[i].filled(0) for i in range(len(mbins))] value, err = ma.array(self.hist[filledbins], mask=valuemask), \ ma.array(self.errs[filledbins], mask=valuemask) return uarray((value.filled(fill_value), err.filled(fill_err)))
def mask(x, xm, x0, x1): if numpy.ndim(xm) != 1: print "utility.mask(): array to used to derive mask must be 1D" return(numpy.array([])) xmask = ma.masked_outside(xm, x0, x1) tmask =ma.getmask(xmask) if numpy.ndim(x) == 1: xnew = ma.array(x, mask=tmask) return(xnew.compressed()) if numpy.ndim(x) == 2: for i in range(0, numpy.shape(x)[0]): xnew= ma.array(x[i,:], mask=tmask) xcmp = ma.compressed(xnew) if i == 0: print ma.shape(xcmp)[0] print numpy.shape(x)[0] xout = numpy.zeros((numpy.shape(x)[0], ma.shape(xcmp)[0])) xout[i,:] = xcmp return(xout) else: print "Utility.Mask: dimensions of input arrays are not acceptable" return(numpy.array([]))
def threshold_series(series, vmin=None, vmax=None): """ Threshold an series by flagging with NaN values below `vmin` and above `vmax`. Examples -------- >>> series = [0.1, 20, 30, 35.5, 34.9, 43.5, 34.6, 40] >>> threshold_series(series, vmin=30, vmax=40) masked_array(data = [-- -- 30.0 35.5 34.9 -- 34.6 40.0], mask = [ True True False False False True False False], fill_value = 1e+20) <BLANKLINE> >>> from pandas import Series, date_range >>> series = Series(series, index=date_range('1980-01-19', ... periods=len(series))) >>> threshold_series(series, vmin=30, vmax=40) 1980-01-19 NaN 1980-01-20 NaN 1980-01-21 30.0 1980-01-22 35.5 1980-01-23 34.9 1980-01-24 NaN 1980-01-25 34.6 1980-01-26 40.0 Freq: D, dtype: float64 """ if not vmin: vmin = min(series) if not vmax: vmax = max(series) masked = ma.masked_outside(series, vmin, vmax) if masked.mask.any(): if isinstance(series, Series): series[masked.mask] = np.NaN return series return masked
def Fill2ThetaAzimuthMap(masks,TA,tam,image): 'Needs a doc string' Zlim = masks['Thresholds'][1] rings = masks['Rings'] arcs = masks['Arcs'] TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2]))) #azimuth, 2-theta, dist tax,tay,tad = np.dsplit(TA,3) #azimuth, 2-theta, dist**2/d0**2 for tth,thick in rings: tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))) for tth,azm,thick in arcs: tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)) tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1])) tam = ma.mask_or(tam.flatten(),tamt*tama) taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1]) tabs = np.ones_like(taz) tam = ma.mask_or(tam.flatten(),ma.getmask(taz)) tax = ma.compressed(ma.array(tax.flatten(),mask=tam)) #azimuth tay = ma.compressed(ma.array(tay.flatten(),mask=tam)) #2-theta taz = ma.compressed(ma.array(taz.flatten(),mask=tam)) #intensity tad = ma.compressed(ma.array(tad.flatten(),mask=tam)) #dist**2/d0**2 tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr. return tax,tay,taz,tad,tabs
def isOutlier(f): """ Is Outlier Identifies single outliers based on a median & percentile filter. Parameters ---------- f : Column to perform outlier rejection. Returns ------- mask : Boolean array. True is outlier. """ medf = nd.median_filter(f,size=4) resf = f - medf resf = ma.masked_invalid(resf) resfcomp = resf.compressed() lo,up = np.percentile(resfcomp,0.1),np.percentile(resfcomp,99.9) resf = ma.masked_outside(resf,lo,up,copy=True) mask = resf.mask return mask
def robust(data,f=1.5): """return a subset of the data with outliers robustly removed data - can be masked """ if type(data) == np.ndarray: d = np.sort(data) else: d = np.sort(data).compressed() n= len(d) n1 = n/4 n2 = n/2 n3 = (3*n)/4 q1 = d[n1] q2 = d[n2] q3 = d[n3] d = q3-q1 f1 = q1-f*d f3 = q3+f*d # median = np.median(d) # print "robust: Median: ", median # print "robust: n,Q1,2,3:",n,q1,q2,q3 # print "robust: f1,f3:",f1,f3 dm = ma.masked_outside(data,f1,f3) return dm
def decode(spec, result, mask=True, shape=True, unscale=True): """Create masked numpy array from the encoded data in a tile query's HTTP results.""" t = gdal_numpy_mapping[spec['data_type']] s = base64.b64decode(result['data']) a = np.frombuffer(s, dtype=t) log.debug("decoding {ubid}:{x},{y}".format(**result)) if mask and spec['data_fill']: log.debug("masking fill: {}".format(spec['data_fill'])) a = ma.masked_equal(a, spec['data_fill']) if mask and spec['data_range']: v1, v2 = spec['data_range'] log.debug("masking range: {}".format(spec['data_range'])) a = ma.masked_outside(a, v1, v2) if shape and spec['data_shape']: log.debug("shaping: {}".format(spec['data_shape'])) a = a.reshape(spec['data_shape']) if unscale and spec['data_scale']: log.debug("scaling: {}".format(spec['data_scale'])) a = a * spec['data_scale'] return a
def test_testOddFeatures(self): # Test of other odd features x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert_(x[1, 0] == 12) z = x + 10j * x assert_(eq(z.real, x)) assert_(eq(z.imag, 10 * x)) assert_(eq((z * conjugate(z)).real, 101 * x * x)) z.imag[...] = 0.0 x = arange(10) x[3] = masked assert_(str(x[3]) == str(masked)) c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) assert_(z[7] is masked) assert_(z[8] is not masked) assert_(z[9] is not masked) assert_(eq(x, z)) z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) z = masked_where(c, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) assert_(eq(x, z)) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) c[0] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) assert_(eq(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2))) assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) assert_(eq(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])) assert_(eq(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])) assert_(eq(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])) assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])) assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5])) atest = ones((10, 10, 10), dtype=np.float32) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_(eq(atest, ctest)) z = choose(c, (-x, x)) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) x = arange(6) x[5] = masked y = arange(6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_(eq(z, zm)) assert_(getmask(zm) is nomask) assert_(eq(zm, [0, 1, 2, 30, 40, 50])) z = where(c, masked, 1) assert_(eq(z, [99, 99, 99, 1, 1, 1])) z = where(c, 1, masked) assert_(eq(z, [99, 1, 1, 99, 99, 99]))
array = '_HF_Offset_Avg' num_array = 6 shape_offset = 0 ymax,xmax = 60,300 mean_length=10 ylabel('Heat Flux (kW/m$^2$)', fontsize=20) data_average = np.zeros(shape=(data_len, num_array)) for channel in data.columns[1:]: if any([substring in channel for substring in group]): if 'TC ' or 'HF ' in channel: if info['Replicate_Of'][test_name] == test_name: for i in range(0,int(info['Replicate_Num'][test_name])): Num = int(test_name[-2:])+i-1 data2 = pd.read_csv(data_dir + info['Rep_Name'][Num] + '.csv') data_sub[:,i] = data2[channel][:data_len] data_average[:,k] = ma.masked_outside(data_sub,-3000,3000).mean(axis=1) k=k+1 if 'TC Plume' in group: data_average = np.fliplr(data_average) for i in range(data_average.shape[1]-shape_offset): y = pd.rolling_mean(data_average[:,i],mean_length,center=True) plot(time,y,color=colors[i],marker=markers[i],markevery=10,ms=8,label=labels[i]) ax1 = gca() xlabel('Time (s)', fontsize=20) xticks(fontsize=16) yticks(fontsize=16) axis([0, xmax, 0, ymax]) box = ax1.get_position() ax1.set_position([box.x0, box.y0, box.width * 0.7, box.height]) ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # legend(numpoints=1,loc=1,ncol=1,fontsize=16)
for record in curs: # Depending on buy/sell status, # append to the proper list pricing and volume if record[1] == False: sellprice.append(record[0]) sellcount.append(record[2]) else: buyprice.append(record[0]) buycount.append(record[2]) # process the buy side if len(buyprice) > 1: top = scoreatpercentile(buyprice, 99) bottom = scoreatpercentile(buyprice, 5) # mask out the bottom 5% of orders so we can try to eliminate the BS buy_masked = ma.masked_outside(buyprice, bottom, top) tempmask = buy_masked.mask buycount_masked = ma.array(buycount, mask=tempmask, fill_value = 0) ma.fix_invalid(buy_masked, mask=0) ma.fix_invalid(buycount_masked, mask=0) buyavg = np.nan_to_num(ma.average(buy_masked, 0, buycount_masked)) buymean = np.nan_to_num(ma.mean(buy_masked)) buymedian = np.nan_to_num(ma.median(buy_masked)) buy_std_dev = np.nan_to_num(np.std(buy_masked)) buy_95_percentile = top if len(buyprice) < 4: buyavg = np.nan_to_num(ma.average(buyprice)) buymean = np.nan_to_num(ma.mean(buyprice)) buyprice.sort() buy = buyprice.pop()
def worker(job_json): """ For every incoming message, this worker function is called. Be extremely careful not to do anything CPU-intensive here, or you will see blocking. Sockets are async under gevent, so those are fair game. """ # Receive raw market JSON strings. market_json = zlib.decompress(job_json) # Un-serialize the JSON data to a Python dict. market_data = simplejson.loads(market_json) # Save to your choice of DB here. global dbConn query = PySQLPool.getNewQuery(dbConn) if market_data['resultType'] == 'orders': rows = market_data['rowsets'] try: for row in rows: if len(row['rows']) == 0: pass genTime = dateutil.parser.parse(row['generatedAt']) genTime = int(time.mktime(genTime.timetuple())) typeID = row['typeID'] regionID = row['regionID'] buyCount = [] sellCount = [] buyPrice = [] sellPrice = [] tempMask = [] buyAvg = 0 buyMean = 0 buyTotal = 0 sellAvg = 0 sellMean = 0 sellTotal = 0 buy = 0 sell = 0 set = 0 stuff = row['rows'] search = "SELECT * FROM prices WHERE uniquek = '%s' AND dateTime > '%s'" % (str(regionID) + str(typeID), genTime) query.Query(search) if (len(query.record) == 1) or (genTime > int(time.mktime(time.gmtime()))): pass for data in stuff: if data[6] == True: buyPrice.append(data[0]) buyCount.append(data[4] - data[1]) elif data[6] == False: sellPrice.append(data[0]) sellCount.append(data[4] - data[1]) else: pass if len(buyPrice) > 1: top = stats.scoreatpercentile(buyPrice, 90) bottom = stats.scoreatpercentile(buyPrice, 10) buyMasked = ma.masked_outside(buyPrice, bottom, top) tempMask = buyMasked.mask buyCountMasked = ma.array(buyCount, mask=tempMask, fill_value = 0) ma.fix_invalid(buyMasked, mask=0) ma.fix_invalid(buyCountMasked, mask=0) buyAvg = ma.average(buyMasked, 0, buyCountMasked) buyMean = ma.mean(buyMasked) buyTotal = ma.sum(buyCountMasked) if buyTotal == 0: buyAvg = 0 buyMean = 0 set = 1 if len(buyPrice) < 4: buyAvg = ma.average(buyPrice) buyMean = ma.mean(buyPrice) buyPrice.sort() buy = buyPrice.pop() if len(sellPrice) > 3: top = stats.scoreatpercentile(sellPrice, 90) bottom = stats.scoreatpercentile(sellPrice, 1) sellMasked = ma.masked_outside(sellPrice, bottom, top) tempMask = sellMasked.mask sellCountMasked = ma.array(sellCount, mask=tempMask, fill_value = 0) ma.fix_invalid(sellMasked, mask=0) ma.fix_invalid(sellCountMasked, mask=0) sellAvg = ma.average(sellMasked, 0, sellCountMasked) sellMean = ma.mean(sellMasked) sellTotal = ma.sum(sellCountMasked) if sellTotal == 0: sellAvg = 0 sellMean = 0 set = 1 if len(sellPrice) < 4: sellMean = ma.mean(sellPrice) sellTotal = ma.sum(sellPrice) sellPrice.sort() sellPrice.reverse() sell = sellPrice.pop() data = "REPLACE INTO prices SET uniquek = '%s', region = '%i', itemid = '%i', buymean = '%.2f', buyavg = '%.2f', sellmean = '%.2f', sellavg = '%.2f', buycount = '%i', sellcount = '%i', buy = '%.2f', sell = '%.2f', dateTime = '%i'" % (str(regionID) + str(typeID), regionID, typeID, np.nan_to_num(buyMean), np.nan_to_num(buyAvg), np.nan_to_num(sellMean), np.nan_to_num(sellAvg), np.nan_to_num(buyTotal), np.nan_to_num(sellTotal), buy, sell, genTime) query.Query(data) except: pass
import numpy.ma as ma x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] ma.masked_outside(x, -0.3, 0.3) ma.masked_outside(x, 0.3, -0.3)
if event.dblclick == True: print "double click detected" line.figure.canvas.mpl_disconnect(self.cid) exit() #extract data from netcdf f = Dataset(args.inf) meta = extract(f, args.variable, args.time, args.vert) ncvar = meta[0] meta_trans = extract_vertical(f, args.variable, args.time)[0] ze = extract(f, "zeta", args.time, args.vert)[0] mask_rho = extract(f, "mask_rho", args.time, args.vert)[0] zeta = ma.masked_outside(ze, -1e+36,1e+36) Cs_r = extract_vert(f, 'Cs_r') s_rho = extract_vert(f, 's_rho') Cs_w = extract_vert(f, 'Cs_w') s_w = extract_vert(f, 's_w') Vtransform = extract_vert(f, 'Vtransform') #Vstretching = extract_vert(args.inf, 'Vstretching') N = len(s_rho) Np = len(s_w) print Np, "Np", N, "N" #Tcline = extract_vert(args.inf, 'Tcline') #theta_s = extract_vert(args.inf, 'theta_s') #theta_b = extract_vert(args.inf, 'theta_b') hc= extract_vert(f, 'hc') #print Vtransform, Vstretching, N,Tcline, hc, theta_s, theta_b h_m = extract(f, "h", args.time, args.vert)[0]
data = pd.read_csv(data_dir + test_name + '.csv') data_sub_TC=np.zeros(shape=(int(min(info['End_Time'])), int(info['Replicate_Num'][test_name]))) data_average = np.zeros(shape=(int(min(info['End_Time'])), num_array)) time = np.arange(0,int(min(info['End_Time'])),1) # Generate subsets for each setup for group in sensor_groups: for channel in data.columns[1:]: if any([substring in channel for substring in group]): if 'TC ' in channel: if info['Replicate_Of'][test_name] == test_name: for i in range(0,int(info['Replicate_Num'][test_name])): Num = int(test_name[-2:])+i-1 data2 = pd.read_csv(data_dir + info['Rep_Name'][Num] + '.csv') data_sub_TC[:,i] = data2[channel][:int(min(info['End_Time']))] data_average[:,int(channel[10:])-1] = ma.masked_outside(data_sub_TC,0.001,3000).mean(axis=1) fig = figure() for i in range(data_average.shape[1]): y = data_average[:,i] plot(time,y,color=colors[i],marker=markers[i],markevery=50,ms=8,label=data.columns[i+1]) ax1 = gca() xlabel('Time (s)', fontsize=20) ylabel('Temperature ($^{\circ}$C)', fontsize=20) xticks(fontsize=16) yticks(fontsize=16) legend(numpoints=1,loc=1,ncol=1,fontsize=16) legend(bbox_to_anchor=(1.06,1.05)) axis([0, 300, 0, 1000]) grid(True) savefig(plot_dir + test_name[:-2] + '_TC_Plume_Avg.pdf',format='pdf') close()
#print depth f.close() f = Dataset("/home/mitya/models/NorROMS/Apps/Common/Grid/arctic4km_grd.nc") mask_rho = extract(f, "mask_rho", args.time, args.vert)[0] lat, lon = extract_lat_lon(f) f.close() ncvar = meta[0] fillval = 1e+36 import matplotlib.pyplot as plt from pylab import * from matplotlib import colors, cm #masked data mx = ma.masked_outside(ncvar, -1e+36,1e+36) mx_trans = ma.masked_outside(meta_trans, -1e+36,1e+36) print "mx_trans", mx_trans.shape cmap=plt.cm.spectral #event handler file = open(args.segments,"r") column,lat_vert,lon_vert,vert,absx,lx,ly =[],[],[],[],[],[],[] dy= [] z_dict = {} zr_dict= {} h_dict = {} var_dict = {} #for i in range(Np): # z_dict[str(i)]=[]
z_w[k] = zeta + (zeta + h)*z0 elif Vtransform == 1: #not tested for k in range(Np): z0 = hc * s_w[k]*np.ones(h.shape) + (h - hc*np.ones(h.shape)) * Cs_w[k] z_w[k] = z0 + zeta * (np.ones(h.shape) + z0/h) return z_w #extract data from netcdf f = Dataset(args.inf) meta = extract(f, args.variable, args.time, args.vert) meta_trans = extract_vertical(f, args.variable, args.time)[0] meta = extract(f, args.variable, args.time, args.vert) ze = extract(f, "zeta", args.time, args.vert)[0] mask_rho = extract(f, "mask_rho", args.time, args.vert)[0] zeta = ma.masked_outside(ze, -1e+36,1e+36) Cs_r = extract_vert(f, 'Cs_r') s_rho = extract_vert(f, 's_rho') Cs_w = extract_vert(f, 'Cs_w') s_w = extract_vert(f, 's_w') Vtransform = extract_vert(f, 'Vtransform') Vstretching = extract_vert(f, 'Vstretching') N = len(s_rho) Np = len(s_w) print Np, "Np", N, "N" #Tcline = extract_vert(args.inf, 'Tcline') #theta_s = extract_vert(args.inf, 'theta_s') #theta_b = extract_vert(args.inf, 'theta_b') hc= extract_vert(f, 'hc') #print Vtransform, Vstretching, N,Tcline, hc, theta_s, theta_b h_m = extract(f, "h", args.time, args.vert)[0]