def set_UVC(self, U, V, C=None): # We need to ensure we have a copy, not a reference # to an array that might change before draw(). U = ma.masked_invalid(U, copy=True).ravel() V = ma.masked_invalid(V, copy=True).ravel() if C is not None: C = ma.masked_invalid(C, copy=True).ravel() for name, var in zip(('U', 'V', 'C'), (U, V, C)): if not (var is None or var.size == self.N or var.size == 1): raise ValueError(f'Argument {name} has a size {var.size}' f' which does not match {self.N},' ' the number of arrow positions') mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True) if C is not None: mask = ma.mask_or(mask, C.mask, copy=False, shrink=True) if mask is ma.nomask: C = C.filled() else: C = ma.array(C, mask=mask, copy=False) self.U = U.filled(1) self.V = V.filled(1) self.Umask = mask if C is not None: self.set_array(C) self._new_UV = True self.stale = True
def __setslice__(self, i, j, value): "Sets the slice described by [i,j] to `value`." _localdict = self.__dict__ d = self._data m = _localdict['_fieldmask'] names = self.dtype.names if value is masked: for n in names: m[i:j][n] = True elif not self._hardmask: fval = filled(value) mval = getmaskarray(value) for n in names: d[n][i:j] = fval m[n][i:j] = mval else: mindx = getmaskarray(self)[i:j] dval = np.asarray(value) valmask = getmask(value) if valmask is nomask: for n in names: mval = mask_or(m[n][i:j], valmask) d[n][i:j][~mval] = value elif valmask.size > 1: for n in names: mval = mask_or(m[n][i:j], valmask) d[n][i:j][~mval] = dval[~mval] m[n][i:j] = mask_or(m[n][i:j], mval) self._fieldmask = m
def Fill2ThetaAzimuthMap(masks, TA, tam, image): 'Needs a doc string' Zlim = masks['Thresholds'][1] rings = masks['Rings'] arcs = masks['Arcs'] TA = np.dstack((ma.getdata(TA[1]), ma.getdata(TA[0]), ma.getdata(TA[2]))) #azimuth, 2-theta, dist tax, tay, tad = np.dsplit(TA, 3) #azimuth, 2-theta, dist**2/d0**2 for tth, thick in rings: tam = ma.mask_or( tam.flatten(), ma.getmask( ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.), tth + thick / 2.))) for tth, azm, thick in arcs: tamt = ma.getmask( ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.), tth + thick / 2.)) tama = ma.getmask(ma.masked_inside(tax.flatten(), azm[0], azm[1])) tam = ma.mask_or(tam.flatten(), tamt * tama) taz = ma.masked_outside(image.flatten(), int(Zlim[0]), Zlim[1]) tabs = np.ones_like(taz) tam = ma.mask_or(tam.flatten(), ma.getmask(taz)) tax = ma.compressed(ma.array(tax.flatten(), mask=tam)) #azimuth tay = ma.compressed(ma.array(tay.flatten(), mask=tam)) #2-theta taz = ma.compressed(ma.array(taz.flatten(), mask=tam)) #intensity tad = ma.compressed(ma.array(tad.flatten(), mask=tam)) #dist**2/d0**2 tabs = ma.compressed(ma.array( tabs.flatten(), mask=tam)) #ones - later used for absorption corr. return tax, tay, taz, tad, tabs
def mask(self, mask): if isinstance(mask, str): fitsmask = fits.getdata(mask) if np.median(fitsmask) == 0: self.__pixeldata.mask = fitsmask >= self.maskthresh else: self.__pixeldata.mask = fitsmask <= self.maskthresh # if the mask is a separated array elif isinstance(mask, np.ndarray): self.__pixeldata.mask = mask # if the mask is not given elif mask is None: # check the fits file and try to find it as an extension if self.attached_to == 'PrimaryHDU': self.__pixeldata = ma.masked_invalid(self.__img.data) elif self.attached_to == 'ndarray': self.__pixeldata = ma.masked_invalid(self.__img) elif self.attached_to == 'HDUList': if self.header['EXTEND']: fitsmask = self.__img[1].data if np.median(fitsmask) == 0: self.__pixeldata.mask = fitsmask >= self.maskthresh else: self.__pixeldata.mask = fitsmask <= self.maskthresh else: self.__pixeldata = ma.masked_invalid(self.__img[0].data) # if a path is given where we find a fits file search on extensions else: try: ff = fits.open(self.attached_to) if 'EXTEND' in ff[0].header.keys(): if ff[0].header['EXTEND']: try: fitsmask = ff[1].data if np.median(fitsmask) == 0: self.__pixeldata.mask = fitsmask >= \ self.maskthresh else: self.__pixeldata.mask = fitsmask <= \ self.maskthresh except IndexError: self.__pixeldata = ma.masked_invalid( self.__pixeldata.data) except IOError: self.__pixeldata = ma.masked_invalid(self.__pixeldata) else: masked = ma.masked_greater(self.__pixeldata, 65000.) if not np.sum(~masked.mask) < 1000.: self.__pixeldata = masked mask_lower = ma.masked_less(self.__pixeldata, -50.) mask_greater = ma.masked_greater(self.__pixeldata, 65000.) self.__pixeldata.mask = ma.mask_or(self.__pixeldata.mask, mask_lower.mask) self.__pixeldata.mask = ma.mask_or(self.__pixeldata.mask, mask_greater.mask)
def mask_array_I05(self, HIGH=273, LOW=230): if hasattr(self, "I04_mask") or hasattr(self, "I05_msk"): new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.masked_outside(self.__I05, LOW, HIGH).mask) new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.masked_outside(self.__I05, LOW, HIGH).mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) else: self.I05_mask = npma.masked_outside(self.__I05, LOW, HIGH) self.I04_mask = npma.array(self.__I04, mask=self.I05_mask.mask)
def mask_diff_sat_sun_zenith(self, threshold=61): data = np.abs(self.solar_zenith - self.satellite_azimuth) if hasattr(self, "I04_mask") or hasattr(self, "I05_mask"): new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.array(self.I04, mask=data >= threshold).mask) new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.array(self.I05, mask=data >= threshold).mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) else: self.I04_mask = npma.array(self.I04, mask=data >= threshold) self.I05_mask = npma.array(self.I05, mask=data >= threshold)
def Make2ThetaAzimuthMap(data, masks, iLim, jLim, times): #most expensive part of integration! 'Needs a doc string' #transforms 2D image from x,y space to 2-theta,azimuth space based on detector orientation pixelSize = data['pixelSize'] scalex = pixelSize[0] / 1000. scaley = pixelSize[1] / 1000. tay, tax = np.mgrid[iLim[0] + 0.5:iLim[1] + .5, jLim[0] + .5:jLim[1] + .5] #bin centers not corners tax = np.asfarray(tax * scalex, dtype=np.float32) tay = np.asfarray(tay * scaley, dtype=np.float32) nI = iLim[1] - iLim[0] nJ = jLim[1] - jLim[0] t0 = time.time() #make position masks here frame = masks['Frames'] tam = ma.make_mask_none((nI, nJ)) if frame: tamp = ma.make_mask_none((1024 * 1024)) tamp = ma.make_mask( pm.polymask( nI * nJ, tax.flatten(), tay.flatten(), len(frame), frame, tamp)[:nI * nJ]) - True #switch to exclude around frame tam = ma.mask_or(tam.flatten(), tamp) polygons = masks['Polygons'] for polygon in polygons: if polygon: tamp = ma.make_mask_none((1024 * 1024)) tamp = ma.make_mask( pm.polymask(nI * nJ, tax.flatten(), tay.flatten(), len(polygon), polygon, tamp)[:nI * nJ]) tam = ma.mask_or(tam.flatten(), tamp) if tam.shape: tam = np.reshape(tam, (nI, nJ)) spots = masks['Points'] for X, Y, diam in spots: tamp = ma.getmask( ma.masked_less((tax - X)**2 + (tay - Y)**2, (diam / 2.)**2)) tam = ma.mask_or(tam, tamp) times[0] += time.time() - t0 t0 = time.time() TA = np.array(GetTthAzmG( tax, tay, data)) #includes geom. corr. as dist**2/d0**2 - most expensive step times[1] += time.time() - t0 TA[1] = np.where(TA[1] < 0, TA[1] + 360, TA[1]) return np.array( TA), tam #2-theta, azimuth & geom. corr. arrays & position mask
def MakeFrameMask(data,frame): pixelSize = data['pixelSize'] scalex = pixelSize[0]/1000. scaley = pixelSize[1]/1000. blkSize = 512 Nx,Ny = data['size'] nXBlks = (Nx-1)/blkSize+1 nYBlks = (Ny-1)/blkSize+1 tam = ma.make_mask_none(data['size']) for iBlk in range(nXBlks): iBeg = iBlk*blkSize iFin = min(iBeg+blkSize,Nx) for jBlk in range(nYBlks): jBeg = jBlk*blkSize jFin = min(jBeg+blkSize,Ny) nI = iFin-iBeg nJ = jFin-jBeg tax,tay = np.mgrid[iBeg+0.5:iFin+.5,jBeg+.5:jFin+.5] #bin centers not corners tax = np.asfarray(tax*scalex,dtype=np.float32) tay = np.asfarray(tay*scaley,dtype=np.float32) tamp = ma.make_mask_none((1024*1024)) tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(), tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True #switch to exclude around frame if tamp.shape: tamp = np.reshape(tamp[:nI*nJ],(nI,nJ)) tam[iBeg:iFin,jBeg:jFin] = ma.mask_or(tamp[0:nI,0:nJ],tam[iBeg:iFin,jBeg:jFin]) else: tam[iBeg:iFin,jBeg:jFin] = True return tam.T
def mask_by_state(state_index, file_in, file_out): fh_states = Dataset(os.path.join("Data", "US_States", "usa_states.nc"), "r") states_array = fh_states.variables["states_flag"][:] mask_array = ma.getmaskarray( ma.masked_where(states_array != state_index, states_array)) fh_in = Dataset(file_in, "r") fh_out = Dataset(file_out, "w") for name, dim in fh_in.dimensions.items(): fh_out.createDimension(name, len(dim)) for v_name, varin in fh_in.variables.items(): if v_name == 'lat' or v_name == 'lon': outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) outVar[:] = varin[:] else: outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) origi_mask = ma.getmaskarray(varin[:]) masked_values = ma.array(varin[:], mask=ma.mask_or(origi_mask, mask_array)) outVar[:] = masked_values if v_name == "soil_moisture": if masked_values.count() < 2000: os.remove(file_out) return file_out[-11:-3] fh_out.close() fh_in.close()
def mask_by_mask_array(mask_array, file_in, file_out, reverse=False): if reverse: mask_array = ~mask_array fh_in = Dataset(file_in, "r") fh_out = Dataset(file_out, "w") for name, dim in fh_in.dimensions.items(): fh_out.createDimension(name, len(dim)) for v_name, varin in fh_in.variables.items(): if v_name == 'lat' or v_name == 'lon': outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) outVar[:] = varin[:] else: outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) origi_mask = ma.getmaskarray(varin[:]) outVar[:] = ma.array(varin[:], mask=ma.mask_or(origi_mask, mask_array)) fh_out.close() fh_in.close()
def apply_intersection_mask_to_two_arrays(array1, array2): """ Ensure two (optionally) masked arrays have the same mask. If both arrays are masked the intersection of the masks is used. If one array is masked and the other is not, the mask from the masked array is applied to the unmasked array. If neither array is masked then both arrays are returned as masked arrays with an empty mask. :param array1: An (optionally masked) array :param array2: Another (optionally masked) array :return: Two masked arrays with a common mask """ import numpy.ma as ma if isinstance(array1, ma.MaskedArray): if isinstance(array2, ma.MaskedArray): intersection_mask = ma.mask_or(array1.mask, array2.mask) else: intersection_mask = array1.mask else: if isinstance(array2, ma.MaskedArray): intersection_mask = array2.mask else: intersection_mask = False array1 = ma.array(array1, mask=intersection_mask) array2 = ma.array(array2, mask=intersection_mask) return array1, array2
def MakeFrameMask(data, frame): pixelSize = data['pixelSize'] scalex = pixelSize[0] / 1000. scaley = pixelSize[1] / 1000. blkSize = 512 Nx, Ny = data['size'] nXBlks = (Nx - 1) / blkSize + 1 nYBlks = (Ny - 1) / blkSize + 1 tam = ma.make_mask_none(data['size']) for iBlk in range(nXBlks): iBeg = iBlk * blkSize iFin = min(iBeg + blkSize, Nx) for jBlk in range(nYBlks): jBeg = jBlk * blkSize jFin = min(jBeg + blkSize, Ny) nI = iFin - iBeg nJ = jFin - jBeg tax, tay = np.mgrid[iBeg + 0.5:iFin + .5, jBeg + .5:jFin + .5] #bin centers not corners tax = np.asfarray(tax * scalex, dtype=np.float32) tay = np.asfarray(tay * scaley, dtype=np.float32) tamp = ma.make_mask_none((1024 * 1024)) tamp = ma.make_mask( pm.polymask( nI * nJ, tax.flatten(), tay.flatten(), len(frame), frame, tamp)[:nI * nJ]) - True #switch to exclude around frame if tamp.shape: tamp = np.reshape(tamp[:nI * nJ], (nI, nJ)) tam[iBeg:iFin, jBeg:jFin] = ma.mask_or(tamp[0:nI, 0:nJ], tam[iBeg:iFin, jBeg:jFin]) else: tam[iBeg:iFin, jBeg:jFin] = True return tam.T
def subplot(self, axes, subfigure): """ Generate the subplot on the figure. :param axes: :class:`matplotlib.axes` instance where the plot will be added. :param tuple subfigure: A tuple that holds all the required elements of the plot, including the data, x- and y-grids, title, contour levels, colorbar label and map keyword arguments. """ from mpl_toolkits.basemap import maskoceans data, xgrid, ygrid, title, lvls, cbarlab, map_kwargs = subfigure mapobj, mx, my = self.createMap(axes, xgrid, ygrid, map_kwargs) dmask = data.mask masked_data = maskoceans(xgrid, ygrid, data, inlands=False) omask = ma.getmask(masked_data) nmask = ma.mask_or(dmask, omask) masked_data.mask = nmask cmap = selectColormap(lvls) CS = mapobj.contourf(mx, my, masked_data, levels=lvls, extend='both', cmap=cmap) CB = self.colorbar(CS, ticks=lvls[::2], ax=axes, extend='both') CB.set_label(cbarlab) axes.set_title(title) self.labelAxes(axes) self.addGraticule(axes, mapobj) self.addCoastline(mapobj) self.fillContinents(mapobj, fillcolor="#EEEEEE") self.addMapScale(mapobj)
def generate_temporal_neighboring_regions(in_file, search_path, out_folder, variable, mode, n_hist=None, n_cohort=None): fh_in = Dataset(in_file, "r") in_doy = datetime.strptime(in_file.split("/")[-1][:-3], "%Y%m%d").date() in_mask = ma.getmaskarray(fh_in.variables[variable][:]) out_folder = get_out_path(out_folder) temp_candidates = [ nc_file for nc_file in os.listdir(search_path) if nc_file.endswith(".nc") ] temp_candidates = sorted( temp_candidates, key=lambda x: datetime.strptime(x[:-3], '%Y%m%d'))[::-1] for nc_file in temp_candidates: nc_doy = datetime.strptime(nc_file[:-3], "%Y%m%d").date() doy_diff = (in_doy - nc_doy).days if (mode == "window" and (0 <= doy_diff <= n_hist)) \ or (mode == "most_recent" and doy_diff > 0) \ or (mode == "cohort" and (doy_diff // 12 == (n_cohort - 1))): fh_doy = Dataset(os.path.join(search_path, nc_file), "r") doy_mask = ma.mask_or( ma.getmaskarray(fh_doy.variables[variable][:]), in_mask) if ma.any(~doy_mask): print(in_file, "--", nc_file, doy_diff) fh_out = Dataset(os.path.join(out_folder, nc_file), "w") for name, dim in fh_doy.dimensions.items(): fh_out.createDimension(name, len(dim)) for v_name, varin in fh_doy.variables.items(): if v_name == 'lat' or v_name == 'lon': outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts( {k: varin.getncattr(k) for k in varin.ncattrs()}) outVar[:] = varin[:] else: outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions) outVar.setncatts( {k: varin.getncattr(k) for k in varin.ncattrs()}) outVar[:] = ma.array(varin[:], mask=doy_mask) fh_out.close() if mode == "most_recent": fh_doy.close() break fh_doy.close() fh_in.close()
def set_UVC(self, U, V, C=None): U = ma.masked_invalid(U, copy=False).ravel() V = ma.masked_invalid(V, copy=False).ravel() mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True) if C is not None: C = ma.masked_invalid(C, copy=False).ravel() mask = ma.mask_or(mask, C.mask, copy=False, shrink=True) if mask is ma.nomask: C = C.filled() else: C = ma.array(C, mask=mask, copy=False) self.U = U.filled(1) self.V = V.filled(1) self.Umask = mask if C is not None: self.set_array(C) self._new_UV = True
def belowhorizon(z): """Return masked z values that are below the horizon. Below the horizon means either than z is negative or the z has a nonzero imaginary part. """ imagz_ma = ma.getmaskarray(ma.masked_not_equal(z.imag, 0.)) negz_ma = ma.getmaskarray(ma.masked_less(z, .0)) belowhrz = ma.mask_or(imagz_ma, negz_ma) return belowhrz
def mask_using_I01_percentile(self, percentile=10): if self.is_valid: if self.I01 is None: raise ValueError("No I1 data present") I01_percentile = np.percentile(self.I01, 100 - percentile) if hasattr(self, "I04_mask") or hasattr(self, "I05_mask"): new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.array(self.I04, mask=self.I01 <= I01_percentile).mask) new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.array(self.I05, mask=self.I01 <= I01_percentile).mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) else: self.I04_mask = npma.array(self.I04, mask=self.I01 <= I01_percentile) self.I05_mask = npma.array(self.I05, mask=self.I01 <= I01_percentile)
def __init__(self, x, y): x = masked_array(x, copy=False, subok=True, dtype=float_, order="F").ravel() y = masked_array(y, copy=False, subok=True, dtype=float_, order="F").ravel() if x.size != y.size: msg = "Incompatible size between observations (%s) and response (%s)!" raise ValueError(msg % (x.size, y.size)) idx = x.argsort() self._x = x[idx] self._y = y[idx] self._mask = mask_or(self._x._mask, self._y._mask, copy=False)
def set_UVC(self, U, V, C=None): # We need to ensure we have a copy, not a reference # to an array that might change before draw(). U = ma.masked_invalid(U, copy=True).ravel() V = ma.masked_invalid(V, copy=True).ravel() mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True) if C is not None: C = ma.masked_invalid(C, copy=True).ravel() mask = ma.mask_or(mask, C.mask, copy=False, shrink=True) if mask is ma.nomask: C = C.filled() else: C = ma.array(C, mask=mask, copy=False) self.U = U.filled(1) self.V = V.filled(1) self.Umask = mask if C is not None: self.set_array(C) self._new_UV = True
def mask_using_I01(self, reflectance_cutoff=80): """ Use I01 band (if present) to mask dimmer pixels below a certain reflectance """ if self.I01 is None: raise ValueError("No I1 data present") if hasattr(self, "I04_mask") or hasattr(self, "I05_mask"): new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.array(self.I04, mask=self.I01 <= reflectance_cutoff).mask) new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.array(self.I05, mask=self.I01 <= reflectance_cutoff).mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) else: self.I04_mask = npma.array(self.I04, mask=self.I01 <= reflectance_cutoff) self.I05_mask = npma.array(self.I05, mask=self.I01 <= reflectance_cutoff)
def __init__(self, x, y): x = masked_array(x, copy=False, subok=True, dtype=float_, order='F').ravel() y = masked_array(y, copy=False, subok=True, dtype=float_, order='F').ravel() if x.size != y.size: msg = "Incompatible size between observations (%s) and response (%s)!" raise ValueError(msg % (x.size, y.size)) idx = x.argsort() self._x = x[idx] self._y = y[idx] self._mask = mask_or(self._x._mask, self._y._mask, copy=False)
def unifyMsks3Mtrx(aMetrcECPflld,aMskIsfrmsSmGn,aMskdExpr,minDgr): """ Input: aMetrcECPflld is a matrix with ECP values or nan where 0|null. aMskIsfrmsSmGn is a matrix with a mask (0,1) for genes coming from the same gene. aMskdExpr is a matrix with a mask for genes whose correlation in expression is of interest (positive for instance). minDgr is a degree number. Output: aMskDegreeGrtrN is the unified mask with only pairs following the previous description. NOTE: Unify masks from three different sources. The first one is a matrix with ECP values with nan in place of 0|null. The other two matrices are masks with binary data. It retrieves a unified mask for genes with degrees higher than n. """ aMsk = ma.masked_invalid(aMetrcECPflld).mask aMsk = ma.mask_or(aMsk,aMskIsfrmsSmGn) aMsk = ma.mask_or(aMsk,aMskdExpr) #-------------------------- # mask node with nodes of a degree lower than minDgr aMskDegreeGrtrN = rtrnMskDegreeGrtrN(aMsk,minDgr) return aMskDegreeGrtrN
def Make2ThetaAzimuthMap(data,masks,iLim,jLim,times): #most expensive part of integration! 'Needs a doc string' #transforms 2D image from x,y space to 2-theta,azimuth space based on detector orientation pixelSize = data['pixelSize'] scalex = pixelSize[0]/1000. scaley = pixelSize[1]/1000. tay,tax = np.mgrid[iLim[0]+0.5:iLim[1]+.5,jLim[0]+.5:jLim[1]+.5] #bin centers not corners tax = np.asfarray(tax*scalex,dtype=np.float32) tay = np.asfarray(tay*scaley,dtype=np.float32) nI = iLim[1]-iLim[0] nJ = jLim[1]-jLim[0] t0 = time.time() #make position masks here frame = masks['Frames'] tam = ma.make_mask_none((nI,nJ)) if frame: tamp = ma.make_mask_none((1024*1024)) tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(), tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True #switch to exclude around frame tam = ma.mask_or(tam.flatten(),tamp) polygons = masks['Polygons'] for polygon in polygons: if polygon: tamp = ma.make_mask_none((1024*1024)) tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(), tay.flatten(),len(polygon),polygon,tamp)[:nI*nJ]) tam = ma.mask_or(tam.flatten(),tamp) if tam.shape: tam = np.reshape(tam,(nI,nJ)) spots = masks['Points'] for X,Y,diam in spots: tamp = ma.getmask(ma.masked_less((tax-X)**2+(tay-Y)**2,(diam/2.)**2)) tam = ma.mask_or(tam,tamp) times[0] += time.time()-t0 t0 = time.time() TA = np.array(GetTthAzmG(tax,tay,data)) #includes geom. corr. as dist**2/d0**2 - most expensive step times[1] += time.time()-t0 TA[1] = np.where(TA[1]<0,TA[1]+360,TA[1]) return np.array(TA),tam #2-theta, azimuth & geom. corr. arrays & position mask
def Fill2ThetaAzimuthMap(masks,TA,tam,image): 'Needs a doc string' Zlim = masks['Thresholds'][1] rings = masks['Rings'] arcs = masks['Arcs'] TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2]))) #azimuth, 2-theta, dist tax,tay,tad = np.dsplit(TA,3) #azimuth, 2-theta, dist**2/d0**2 for tth,thick in rings: tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))) for tth,azm,thick in arcs: tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)) tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1])) tam = ma.mask_or(tam.flatten(),tamt*tama) taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1]) tabs = np.ones_like(taz) tam = ma.mask_or(tam.flatten(),ma.getmask(taz)) tax = ma.compressed(ma.array(tax.flatten(),mask=tam)) #azimuth tay = ma.compressed(ma.array(tay.flatten(),mask=tam)) #2-theta taz = ma.compressed(ma.array(taz.flatten(),mask=tam)) #intensity tad = ma.compressed(ma.array(tad.flatten(),mask=tam)) #dist**2/d0**2 tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr. return tax,tay,taz,tad,tabs
def mask_half(self, half="right"): blank_mask = np.zeros_like(self.__I05) if half == "top": idx = np.arange(0, int(len(self.__I05) / 2)) blank_mask[idx, :] = 1 if half == "left": idx = np.arange(0, int(len(self.__I05) / 2)) blank_mask[:, idx] = 1 if half == "bottom": idx = np.arange(int(len(self.__I05) / 2), len(self.__I05)) blank_mask[idx, :] = 1 if half == "right": idx = np.arange(int(len(self.__I05) / 2), len(self.__I05)) blank_mask[:, idx] = 1 blank_mask = blank_mask.astype("bool") if hasattr(self, "I04_mask") or hasattr(self, "I05_mask"): new_I05_mask = npma.mask_or(self.I05_mask.mask, blank_mask) new_I04_mask = npma.mask_or(self.I04_mask.mask, blank_mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) else: self.I05_mask = npma.array(self.__I05, mask=blank_mask) self.I04_mask = npma.array(self.__I04, mask=blank_mask)
def mask_thin_cirrus(self, reflectance_cutoff=50): """ Use M9 band (if present) as a mask for the I04,I05 band above a given threshold. Ice present in high thin cirrus clouds will reflect light at a high altitude. In longer range bands I04,I05 this will make the pixel appear colder than the cloud top actually is :param reflectance_cutoff: Mask all pixels with a thin cirrus reflectance above this :return: None """ if self.M09 is None: raise ValueError("No M9 data present") if hasattr(self, "I04_mask") or hasattr(self, "I05_mask"): new_I04_mask = npma.mask_or( self.I04_mask.mask, npma.array(self.I04, mask=self.M09 >= reflectance_cutoff).mask) new_I05_mask = npma.mask_or( self.I05_mask.mask, npma.array(self.I05, mask=self.M09 >= reflectance_cutoff).mask) self.I04_mask = npma.array(self.__I04, mask=new_I04_mask) self.I05_mask = npma.array(self.__I05, mask=new_I05_mask) else: self.I04_mask = npma.array(self.I04, mask=self.M09 >= reflectance_cutoff) self.I05_mask = npma.array(self.I05, mask=self.M09 >= reflectance_cutoff)
def __init__(self, bare_dir, dem_dir, outdir): #loads the GDAL derived DEMs to produce the crop height model if not os.path.exists(outdir): os.mkdir(outdir) os.path.join(dem_dir,bare_dir) bare = None crop_model = None for bare_dem in os.listdir(bare_dir): os.chdir(bare_dir) bare = LoadImage(bare_dem) bare_filtered = filters.median_filter(bare.stacked,size=(9,9)) bare_spatial = bare.spatial for survey in os.listdir(dem_dir): im_path = os.path.join(dem_dir, survey) os.chdir(im_path) for image in os.listdir(im_path): crop = LoadImage(image) crop_filtered = filters.median_filter(crop.stacked,size=(3,3)) crop_model = crop_filtered-bare_filtered bare_mask = ma.masked_where(bare_filtered==-999, bare_filtered) crop_mask = ma.masked_where(crop_filtered==-999, crop_filtered) #combine these into a single mask so that anything masked in either dataset #will be masked in the combined output combined_mask = ma.mask_or(bare_mask.mask, crop_mask.mask) #use this mask t omask the crop model crop_model = ma.array(crop_model, mask=combined_mask) #convert back to a bog-standard numpy array and fill the masked values crop_model = crop_model.filled(-999) name = survey+'_CropHeightModel.tif' self.writeimage(outdir, name, crop_model.shape[0], crop_model.shape[1], crop_model, bare_spatial)
def __setattr__(self, attr, val): "Sets the attribute attr to the value val." # newattr = attr not in self.__dict__ try: # Is attr a generic attribute ? ret = object.__setattr__(self, attr, val) except: # Not a generic attribute: exit if it's not a valid field fielddict = self.dtype.names or {} if attr not in fielddict: exctype, value = sys.exc_info()[:2] raise exctype, value else: if attr in ['_mask', 'fieldmask']: self.__setmask__(val) return # Get the list of names ...... _names = self.dtype.names if _names is None: _names = [] else: _names = list(_names) # Check the attribute self_dict = self.__dict__ if attr not in _names + list(self_dict): return ret if attr not in self_dict: # We just added this one try: # or this setattr worked on an internal # attribute. object.__delattr__(self, attr) except: return ret # Case #1.: Basic field ............ base_fmask = self._fieldmask _names = self.dtype.names or [] if attr in _names: if val is masked: fval = self.fill_value[attr] mval = True else: fval = filled(val) mval = getmaskarray(val) if self._hardmask: mval = mask_or(mval, base_fmask.__getattr__(attr)) self._data.__setattr__(attr, fval) base_fmask.__setattr__(attr, mval) return
def __setattr__(self, attr, val): "Sets the attribute attr to the value val." # newattr = attr not in self.__dict__ try: # Is attr a generic attribute ? ret = object.__setattr__(self, attr, val) except: # Not a generic attribute: exit if it's not a valid field fielddict = self.dtype.names or {} if attr not in fielddict: exctype, value = sys.exc_info()[:2] raise exctype, value else: if attr in ['_mask','fieldmask']: self.__setmask__(val) return # Get the list of names ...... _names = self.dtype.names if _names is None: _names = [] else: _names = list(_names) # Check the attribute self_dict = self.__dict__ if attr not in _names+list(self_dict): return ret if attr not in self_dict: # We just added this one try: # or this setattr worked on an internal # attribute. object.__delattr__(self, attr) except: return ret # Case #1.: Basic field ............ base_fmask = self._fieldmask _names = self.dtype.names or [] if attr in _names: if val is masked: fval = self.fill_value[attr] mval = True else: fval = filled(val) mval = getmaskarray(val) if self._hardmask: mval = mask_or(mval, base_fmask.__getattr__(attr)) self._data.__setattr__(attr, fval) base_fmask.__setattr__(attr, mval) return
def __init__(self, bare_dir, dem_dir, outdir): #loads the GDAL derived DEMs to produce the crop height model if not os.path.exists(outdir): os.mkdir(outdir) os.path.join(dem_dir, bare_dir) bare = None crop_model = None for bare_dem in os.listdir(bare_dir): os.chdir(bare_dir) bare = LoadImage(bare_dem) bare_filtered = filters.median_filter(bare.stacked, size=(9, 9)) bare_spatial = bare.spatial for survey in os.listdir(dem_dir): im_path = os.path.join(dem_dir, survey) os.chdir(im_path) for image in os.listdir(im_path): crop = LoadImage(image) crop_filtered = filters.median_filter(crop.stacked, size=(3, 3)) crop_model = crop_filtered - bare_filtered bare_mask = ma.masked_where(bare_filtered == -999, bare_filtered) crop_mask = ma.masked_where(crop_filtered == -999, crop_filtered) #combine these into a single mask so that anything masked in either dataset #will be masked in the combined output combined_mask = ma.mask_or(bare_mask.mask, crop_mask.mask) #use this mask t omask the crop model crop_model = ma.array(crop_model, mask=combined_mask) #convert back to a bog-standard numpy array and fill the masked values crop_model = crop_model.filled(-999) name = survey + '_CropHeightModel.tif' self.writeimage(outdir, name, crop_model.shape[0], crop_model.shape[1], crop_model, bare_spatial)
def approx (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): """Returns true if all components of a and b are equal subject to given tolerances. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. The relative error rtol should be positive and << 1.0 The absolute error atol comes into play for those elements of b that are very small or zero; it says how small a must be also. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return np.equal(d1,d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = np.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) return d.ravel()
def __init__(self,vals,vals_dmin,vals_dmax,mask=ma.nomask): super(UncertContainer, self).__init__() # If input data already masked arrays extract unmasked data if ma.isMaskedArray(vals): vals = vals.data if ma.isMaskedArray(vals_dmin): vals_dmin = vals_dmin.data if ma.isMaskedArray(vals_dmax): vals_dmax = vals_dmax.data # Adjust negative values ineg = np.where(vals_dmin <= 0.0) vals_dmin[ineg] = TOL*vals[ineg] # Calculate weight based on fractional uncertainty diff = vals_dmax - vals_dmin diff_m = ma.masked_where(vals_dmax == vals_dmin,diff) self.vals = ma.masked_where(vals == 0.0,vals) self.wt = (self.vals/diff_m)**2 self.uncert = diff_m/self.vals self.wt.fill_value = np.inf self.uncert.fill_vaule = np.inf assert np.all(self.wt.mask == self.uncert.mask) # Mask data if uncertainty is not finite or if any of the inputs were # already masked mm = ma.mask_or(self.wt.mask,mask) self.vals.mask = mm self.wt.mask = mm self.uncert.mask = mm self.dmin = ma.array(vals_dmin,mask=mm,fill_value=np.inf) self.dmax = ma.array(vals_dmax,mask=mm,fill_value=np.inf) self.mask = ma.getmaskarray(self.vals)
def calcTBScore(self, attempt=-1): field1 = 'BottomThick' field2 = 'TBVpp' tm = self.ClasData[attempt] loBTthresh = self.MeasSettings['MinBotThickNs'][0] / 1000 hiBTthresh = self.MeasSettings['MaxBotThickNs'][0] / 1000 loVppthresh = self.MeasSettings['MinTBVppInclusionThreshold'][0] hiVppthresh = self.MeasSettings['MaxTBVppInclusionThreshold'][0] botthick = [] # alld = [] for r in range(0, len(self.meas_rows)): bt = tm[tm['Row'] == r][field1].values bt_ma = ma.masked_outside(bt, loBTthresh, hiBTthresh) tbvpp = tm[tm['Row'] == r][field2].values tbvpp_ma = ma.masked_outside(tbvpp, loVppthresh, hiVppthresh) thismask = ma.mask_or(bt_ma.mask, tbvpp_ma.mask) bt = ma.masked_array(bt, mask=thismask) # thisd = [bt_ma,tbvpp_ma] # alld.append(thisd) botthick.append(bt) # return botthick,alld average = ma.masked_array((botthick)).mean(axis=0) thickness = ma.compressed(average) self.nTB = len(thickness) # if self.nTB == 0 : # return mask = average.mask scores = [] for c in self.Candidates: field = c + '_tofdelta' k = ma.compressed( ma.masked_array(self.tof_kernels[field][0:len(average)], mask=mask)) # print('len(bb): ', len(bb), ' len(k): ', len(k)) score = np.corrcoef(thickness, k)[0, 1] scores.append(score) #self.TBScores = scores #self.nTB = ma.count(average) return botthick, average, scores
def make_cube_movie(source_key, colorbar_title, frame_dir, filetag_suffix="", saveslice=None, saveslice_file=None, outputdir="./", sigmarange=6., ignore=None, multiplier=1., transverse=False, title=None, sigmacut=None, logscale=False, physical=False, convolve=False, tag=None): """Make a stack of spatial slice maps and animate them transverse plots along RA and freq and image plane is in Dec First mask any points that exceed `sigmacut`, and then report the extent of `sigmarange` away from the mean """ datapath_db = data_paths.DataPath() cosmology = Cosmology() littleh = (cosmology.H0 / 100.0) beam_data = np.array([0.316148488246, 0.306805630985, 0.293729620792, 0.281176247549, 0.270856788455, 0.26745856078, 0.258910010848, 0.249188429031]) freq_data = np.array([695, 725, 755, 785, 815, 845, 875, 905], dtype=float) beam_fwhm = interp1d(freq_data, beam_data) freq_data_Hz = freq_data * 1.0e6 if tag is None: tag = '_'.join(source_key.split(";")) tag = '-'.join(tag.split(":")) # for a given path #tag = ".".join(source_key.split(".")[:-1]) # extract root name #tag = tag.split("/")[-1] if title is None: title = "%(tag)s (i = %(index)d, " title += "freq = %(freq)3.1f MHz, z = %(redshift)3.3f, " title += "Dc=%(distance)3.0f cMpc)" print tag fileprefix = frame_dir + tag nlevels = 500 if transverse: orientation = "_freqRA" else: orientation = "_RADec" # prepare the data #cube = algebra.make_vect(algebra.load(source_key)) * multiplier cube = datapath_db.fetch_multi(source_key) * multiplier if logscale: cube[cube < 0] = np.min(np.abs(cube)) cube = np.log10(cube) isnan = np.isnan(cube) isinf = np.isinf(cube) maskarray = ma.mask_or(isnan, isinf) if ignore is not None: maskarray = ma.mask_or(maskarray, (cube == ignore)) convolved = "" if convolve: convolved = "_convolved" beamobj = beam.GaussianBeam(beam_data, freq_data_Hz) cube = beamobj.apply(cube) if sigmacut: #np.set_printoptions(threshold=np.nan, precision=4) deviation = np.abs((cube - np.mean(cube)) / np.std(cube)) extend_maskarray = (cube > (sigmacut * deviation)) maskarray = ma.mask_or(extend_maskarray, maskarray) mcube = ma.masked_array(cube, mask=maskarray) try: whmaskarray = np.where(maskarray)[0] mask_fraction = float(len(whmaskarray)) / float(cube.size) except: mask_fraction = 0. print "fraction of map clipped: %f" % mask_fraction (cube_mean, cube_std) = (mcube.mean(), mcube.std()) print "cube mean=%g std=%g" % (cube_mean, cube_std) try: len(sigmarange) color_axis = np.linspace(sigmarange[0], sigmarange[1], nlevels, endpoint=True) except TypeError: if (sigmarange > 0.): color_axis = np.linspace(cube_mean - sigmarange * cube_std, cube_mean + sigmarange * cube_std, nlevels, endpoint=True) else: if saveslice is not None: color_axis = np.linspace(ma.min(mcube[saveslice, :, :]), ma.max(mcube[saveslice, :, :]), nlevels, endpoint=True) else: color_axis = np.linspace(ma.min(mcube), ma.max(mcube), nlevels, endpoint=True) print "using range: [%g, %g]" % (np.min(color_axis), np.max(color_axis)) freq_axis = cube.get_axis('freq') (ra_axis, dec_axis) = (cube.get_axis('ra'), cube.get_axis('dec')) runlist = [] # TODO: make transverse work with gnuplot if transverse: for decind in range(cube.shape[2]): fulltitle = tag + " (dec = %3.1f)" % (dec_axis[decind]) runlist.append((decind, cube[:, :, decind], freq_axis, ra_axis, color_axis, ["Freq", "Ra"], 20., fulltitle, colorbar_title, fileprefix, 800.)) else: for freqind in range(cube.shape[0]): outfilename = fileprefix + str('.%03d' % freqind) + '.jpeg' # if in angle, freq coordinates if not physical: freq_MHz = freq_axis[freqind] / 1.e6 z_here = cc.freq_21cm_MHz / freq_MHz - 1. comoving_distance = cosmology.comoving_distance(z_here) / littleh proper_distance = cosmology.proper_distance(z_here) / littleh angular_scale = 20. / units.degree / proper_distance title_info = {"index": freqind, "redshift": z_here, "distance": comoving_distance, "freq": freq_MHz, "tag": tag} fulltitle = title % title_info if (freq_MHz <= freq_data.min()) or \ (freq_MHz >= freq_data.max()): fwhm = 0. else: fwhm = beam_fwhm(freq_MHz) FWHM_circle = {"primitive": "circle", "center_x": 0.9, "center_y": 0.15, "radius": fwhm / 2., "width": 5, "color": "purple" } region_scale = {"primitive": "rect", "center_x": 0.9, "center_y": 0.15, "size_x": angular_scale, "size_y": angular_scale, "width": 3, "color": "black" } draw_objects = [FWHM_circle, region_scale] runlist.append((outfilename, cube[freqind, :, :], ra_axis, dec_axis, color_axis, ["RA", "Dec"], 1., fulltitle, colorbar_title, draw_objects)) else: distance = freq_axis[freqind] / 1.e6 fulltitle = "%s (i = %d, Dc = %10.3f cMpc)" % \ (title, freqind, distance) runlist.append((outfilename, cube[freqind, :, :], ra_axis, dec_axis, color_axis, ["x (RA, cMpc/h)", "y (Dec, cMpc/h)"], 1., fulltitle, colorbar_title, draw_objects)) if saveslice is not None: print "saving just slice %d" % saveslice (outfilename, cube_slice, xaxis, yaxis, vaxis, xylabels, \ aspect, fulltitle, cbar_title, draw_objects) = runlist[saveslice] plot_slice.gnuplot_2D(outfilename, cube_slice, xaxis, yaxis, vaxis, xylabels, aspect, fulltitle, cbar_title, eps_outfile=saveslice_file, draw_objects=draw_objects) else: pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4)) pool.map(gnuplot_single_slice, runlist) #gnuplot_single_slice(runlist[0]) # for troubleshooting argument = frame_dir + tag + '.%03d.jpeg' outfile = outputdir + tag + filetag_suffix + orientation + convolved + '.mp4' subprocess.check_call(('ffmpeg', '-vb', '2500000', '-r', '10', '-y', '-i', argument, outfile)) for fileindex in range(len(runlist)): os.remove(fileprefix + str('.%03d' % fileindex) + '.jpeg')
def ImageRecalibrate(self, data, masks): 'Needs a doc string' import ImageCalibrants as calFile print 'Image recalibration:' time0 = time.time() pixelSize = data['pixelSize'] scalex = 1000. / pixelSize[0] scaley = 1000. / pixelSize[1] pixLimit = data['pixLimit'] cutoff = data['cutoff'] data['rings'] = [] data['ellipses'] = [] if not data['calibrant']: print 'no calibration material selected' return True skip = data['calibskip'] dmin = data['calibdmin'] Bravais, SGs, Cells = calFile.Calibrants[data['calibrant']][:3] HKL = [] for bravais, sg, cell in zip(Bravais, SGs, Cells): A = G2lat.cell2A(cell) if sg: SGData = G2spc.SpcGroup(sg)[1] hkl = G2pwd.getHKLpeak(dmin, SGData, A) HKL += hkl else: hkl = G2lat.GenHBravais(dmin, bravais, A) HKL += hkl HKL = G2lat.sortHKLd(HKL, True, False) varyList = [item for item in data['varyList'] if data['varyList'][item]] parmDict = { 'dist': data['distance'], 'det-X': data['center'][0], 'det-Y': data['center'][1], 'tilt': data['tilt'], 'phi': data['rotation'], 'wave': data['wavelength'], 'dep': data['DetDepth'] } Found = False wave = data['wavelength'] frame = masks['Frames'] tam = ma.make_mask_none(self.ImageZ.shape) if frame: tam = ma.mask_or(tam, MakeFrameMask(data, frame)) for iH, H in enumerate(HKL): if debug: print H dsp = H[3] tth = 2.0 * asind(wave / (2. * dsp)) if tth + abs(data['tilt']) > 90.: print 'next line is a hyperbola - search stopped' break ellipse = GetEllipse(dsp, data) Ring = makeRing(dsp, ellipse, pixLimit, cutoff, scalex, scaley, ma.array(self.ImageZ, mask=tam)) if Ring: if iH >= skip: data['rings'].append(np.array(Ring)) data['ellipses'].append(copy.deepcopy(ellipse + ('r', ))) Found = True elif not Found: #skipping inner rings, keep looking until ring found continue else: #no more rings beyond edge of detector data['ellipses'].append([]) continue # break rings = np.concatenate((data['rings']), axis=0) chisq = FitDetector(rings, varyList, parmDict) data['wavelength'] = parmDict['wave'] data['distance'] = parmDict['dist'] data['center'] = [parmDict['det-X'], parmDict['det-Y']] data['rotation'] = np.mod(parmDict['phi'], 360.0) data['tilt'] = parmDict['tilt'] data['DetDepth'] = parmDict['dep'] data['chisq'] = chisq N = len(data['ellipses']) data['ellipses'] = [] #clear away individual ellipse fits for H in HKL[:N]: ellipse = GetEllipse(H[3], data) data['ellipses'].append(copy.deepcopy(ellipse + ('b', ))) print 'calibration time = ', time.time() - time0 G2plt.PlotImage(self, newImage=True) return True
def interp(datain,xin,yin,xout,yout,checkbounds=False,masked=False,order=1): """This function is originally from matplotlib.toolkits.basemap.interp Copyright 2008, Jeffrey Whitaker Interpolate data (``datain``) on a rectilinear grid (with x = ``xin`` y = ``yin``) to a grid with x = ``xout``, y= ``yout``. .. tabularcolumns:: |l|L| ============== ==================================================== Arguments Description ============== ==================================================== datain a rank-2 array with 1st dimension corresponding to y, 2nd dimension x. xin, yin rank-1 arrays containing x and y of datain grid in increasing order. xout, yout rank-2 arrays containing x and y of desired output grid. ============== ==================================================== .. tabularcolumns:: |l|L| ============== ==================================================== Keywords Description ============== ==================================================== checkbounds If True, values of xout and yout are checked to see that they lie within the range specified by xin and xin. If False, and xout,yout are outside xin,yin, interpolated values will be clipped to values on boundary of input grid (xin,yin) Default is False. masked If True, points outside the range of xin and yin are masked (in a masked array). If masked is set to a number, then points outside the range of xin and yin will be set to that number. Default False. order 0 for nearest-neighbor interpolation, 1 for bilinear interpolation, 3 for cublic spline (default 1). order=3 requires scipy.ndimage. ============== ==================================================== .. note:: If datain is a masked array and order=1 (bilinear interpolation) is used, elements of dataout will be masked if any of the four surrounding points in datain are masked. To avoid this, do the interpolation in two passes, first with order=1 (producing dataout1), then with order=0 (producing dataout2). Then replace all the masked values in dataout1 with the corresponding elements in dataout2 (using numpy.where). This effectively uses nearest neighbor interpolation if any of the four surrounding points in datain are masked, and bilinear interpolation otherwise. Returns ``dataout``, the interpolated data on the grid ``xout, yout``. """ # xin and yin must be monotonically increasing. if xin[-1]-xin[0] < 0 or yin[-1]-yin[0] < 0: raise ValueError, 'xin and yin must be increasing!' if xout.shape != yout.shape: raise ValueError, 'xout and yout must have same shape!' # check that xout,yout are # within region defined by xin,yin. if checkbounds: if xout.min() < xin.min() or \ xout.max() > xin.max() or \ yout.min() < yin.min() or \ yout.max() > yin.max(): raise ValueError, 'yout or xout outside range of yin or xin' # compute grid coordinates of output grid. delx = xin[1:]-xin[0:-1] dely = yin[1:]-yin[0:-1] if max(delx)-min(delx) < 1.e-4 and max(dely)-min(dely) < 1.e-4: # regular input grid. xcoords = (len(xin)-1)*(xout-xin[0])/(xin[-1]-xin[0]) ycoords = (len(yin)-1)*(yout-yin[0])/(yin[-1]-yin[0]) else: # irregular (but still rectilinear) input grid. xoutflat = xout.flatten(); youtflat = yout.flatten() ix = (np.searchsorted(xin,xoutflat)-1).tolist() iy = (np.searchsorted(yin,youtflat)-1).tolist() xoutflat = xoutflat.tolist(); xin = xin.tolist() youtflat = youtflat.tolist(); yin = yin.tolist() xcoords = []; ycoords = [] for n,i in enumerate(ix): if i < 0: xcoords.append(-1) # outside of range on xin (lower end) elif i >= len(xin)-1: xcoords.append(len(xin)) # outside range on upper end. else: xcoords.append(float(i)+(xoutflat[n]-xin[i])/(xin[i+1]-xin[i])) for m,j in enumerate(iy): if j < 0: ycoords.append(-1) # outside of range of yin (on lower end) elif j >= len(yin)-1: ycoords.append(len(yin)) # outside range on upper end else: ycoords.append(float(j)+(youtflat[m]-yin[j])/(yin[j+1]-yin[j])) xcoords = np.reshape(xcoords,xout.shape) ycoords = np.reshape(ycoords,yout.shape) # data outside range xin,yin will be clipped to # values on boundary. if masked: xmask = np.logical_or(np.less(xcoords,0),np.greater(xcoords,len(xin)-1)) ymask = np.logical_or(np.less(ycoords,0),np.greater(ycoords,len(yin)-1)) xymask = np.logical_or(xmask,ymask) xcoords = np.clip(xcoords,0,len(xin)-1) ycoords = np.clip(ycoords,0,len(yin)-1) # interpolate to output grid using bilinear interpolation. if order == 1: xi = xcoords.astype(np.int32) yi = ycoords.astype(np.int32) xip1 = xi+1 yip1 = yi+1 xip1 = np.clip(xip1,0,len(xin)-1) yip1 = np.clip(yip1,0,len(yin)-1) delx = xcoords-xi.astype(np.float32) dely = ycoords-yi.astype(np.float32) dataout = (1.-delx)*(1.-dely)*datain[yi,xi] + \ delx*dely*datain[yip1,xip1] + \ (1.-delx)*dely*datain[yip1,xi] + \ delx*(1.-dely)*datain[yi,xip1] elif order == 0: xcoordsi = np.around(xcoords).astype(np.int32) ycoordsi = np.around(ycoords).astype(np.int32) dataout = datain[ycoordsi,xcoordsi] elif order == 3: try: from scipy.ndimage import map_coordinates except ImportError: raise ValueError('scipy.ndimage must be installed if order=3') coords = [ycoords,xcoords] dataout = map_coordinates(datain,coords,order=3,mode='nearest') else: raise ValueError,'order keyword must be 0, 1 or 3' if masked and isinstance(masked,bool): dataout = ma.masked_array(dataout) newmask = ma.mask_or(ma.getmask(dataout), xymask) dataout = ma.masked_array(dataout,mask=newmask) elif masked and is_scalar(masked): dataout = np.where(xymask,masked,dataout) return dataout
def read_avhrrgac(f, a, tim, cha, tsm_corr=None): # get angle and geolocation sza, szanam = read_var(a, 'image1') lat, latnam = read_var(f, 'lat') lon, lonnam = read_var(f, 'lon') # get measurement # channel 1 tar1, tarname1 = read_var(f, 'image1') tar1[:] = tar1 / 100. # channel 2 tar2, tarname2 = read_var(f, 'image2') tar2[:] = tar2 / 100. # channel 3b tar3, tarname3 = read_var(f, 'image3') # channel 4 tar4, tarname4 = read_var(f, 'image4') # channel 5 tar5, tarname5 = read_var(f, 'image5') # channel 3a tar6, tarname6 = read_var(f, 'image6') tar6[:] = tar6 / 100. # --- START temporary scan motor issue correction if tsm_corr: # absolute difference because ch1 is very similar to ch2 abs_d12 = abs(tar1 - tar2) # relative difference because ch4 and ch5 differ rel_d45 = 100.0*(tar4 - tar5)/tar5 # standard deviation of abs_d12 and rel_d45 box_size = 3 fill_value = -9999.0 std_d12 = gridbox_std(abs_d12, box_size, fill_value) std_d45 = gridbox_std(rel_d45, box_size, fill_value) # using ch1, ch2, ch4, ch5 in combination # all channels seems to be affected throughout the whole orbit, # independent of VIS and NIR or day and night ind1 = np.where( (std_d12 > 0.02) & (std_d45 > 2.00) ) tar1[ind1] = -999.0 tar2[ind1] = -999.0 tar3[ind1] = -999.0 tar4[ind1] = -999.0 tar5[ind1] = -999.0 tar6[ind1] = -999.0 # --- END temporary scan motor issue correction if cha == 'ch1': tar = tar1 tarname = tarname1 elif cha == 'ch2': tar = tar2 tarname = tarname2 elif cha == 'ch3b': tar = tar3 tarname = tarname3 elif cha == 'ch4': tar = tar4 tarname = tarname4 elif cha == 'ch5': tar = tar5 tarname = tarname5 elif cha == 'ch3a': tar = tar6 tarname = tarname6 # some lat/lon fields are not fill_value although they should be # lat/lon min/max outside realistic values # fixed here in read_var # but then tar and lat/lon do not have the same masked elements # thus: all_masks = [lat < -90., lat > 90., lon < -180., lon > 180.] total_mask = reduce(np.logical_or, all_masks) lat = ma.masked_where(total_mask, lat) lon = ma.masked_where(total_mask, lon) tar = ma.masked_where(total_mask, tar) sza = ma.masked_where(total_mask, sza) # select time if tim == 'day_90sza': # consider only daytime, i.e. sza < 90 lon = ma.masked_where(sza >= 90., lon) lat = ma.masked_where(sza >= 90., lat) tar = ma.masked_where(sza >= 90., tar) if tim == 'day': # consider only daytime, i.e. sza < 80 lon = ma.masked_where(sza >= 80., lon) lat = ma.masked_where(sza >= 80., lat) tar = ma.masked_where(sza >= 80., tar) if tim == 'twilight': # consider only twilight, i.e. 80 <= sza < 90 # mask everything outside the current sza range omask = ma.mask_or(sza < 80, sza >= 90) lon = ma.masked_where(omask, lon) lat = ma.masked_where(omask, lat) tar = ma.masked_where(omask, tar) if tim == 'night': # consider only night, i.e. sza >= 90 lon = ma.masked_where(sza < 90., lon) lat = ma.masked_where(sza < 90., lat) tar = ma.masked_where(sza < 90., tar) return lat, lon, tar
def expand_value_coverage(input_raster, expand_raster, output_raster, union=False, compress='DEFLATE', verbose=False, logger=None): """ Expand a raster based on occurrence of informative cells in another. Argument "union" can be used to define if only the mask of the expand raster should be used (False, default), or an union between masks of input and expand raster (True). :param input_raster: String path to input raster. :param expand_raster: String path to mask raster. :param output_raster: String path to output raster. :param union: Boolean should masks of input_raster and expand_raster be unioned. :param compress: String compression level used for the output raster. :param verbose: Boolean indicating how much information is printed out. :param logger: logger object to be used. :return Boolean success. """ # 1. Setup -------------------------------------------------------------- all_start = timer() if not logger: logging.basicConfig() llogger = logging.getLogger('maskvalue_coverage') llogger.setLevel(logging.DEBUG if verbose else logging.INFO) else: llogger = logger # 2. Read and process raster --------------------------------------------- # First, get the mask and dtype from the mask raster expand_raster = rasterio.open(expand_raster) expand_raster_src = expand_raster.read(1, masked=True) expand_mask = expand_raster_src.mask # Read raster bands directly to Numpy arrays. with rasterio.open(input_raster) as raster: llogger.info("Reading and processing raster {}".format(input_raster)) input_nodata = raster.nodata # Read in the data src = raster.read(1, masked=True) src_dtype = src.dtype src_mask = src.mask # Perform a union on the masks if needed if union: llogger.info("[NOTE] Using union of masks") expand_mask = ma.mask_or(expand_mask, src_mask) llogger.debug("Number of informative cells in the data: {}".format( np.sum(~src_mask))) llogger.debug( "Number of informative cells in the expand mask: {}".format( np.sum(~expand_mask))) # Change the mask and the underlying values src.mask = expand_mask src.data[src.mask] = input_nodata # There might be some NoData values lurking around, replace them with # zero. src.data[src == input_nodata] = 0.0 profile = raster.profile profile.update(dtype=src_dtype, count=1, compress=compress, nodata=input_nodata) with rasterio.open(output_raster, 'w', **profile) as dst: llogger.info("Writing output raster {}".format(output_raster)) #import pdb; pdb.set_trace() dst.write(src.astype(src_dtype), 1) all_end = timer() all_elapsed = round(all_end - all_start, 2) llogger.info(" [TIME] Masking took {} sec".format(all_elapsed))
def ImageRecalibrate(self,data,masks): 'Needs a doc string' import ImageCalibrants as calFile print 'Image recalibration:' time0 = time.time() pixelSize = data['pixelSize'] scalex = 1000./pixelSize[0] scaley = 1000./pixelSize[1] pixLimit = data['pixLimit'] cutoff = data['cutoff'] data['rings'] = [] data['ellipses'] = [] if not data['calibrant']: print 'no calibration material selected' return True skip = data['calibskip'] dmin = data['calibdmin'] Bravais,SGs,Cells = calFile.Calibrants[data['calibrant']][:3] HKL = [] for bravais,sg,cell in zip(Bravais,SGs,Cells): A = G2lat.cell2A(cell) if sg: SGData = G2spc.SpcGroup(sg)[1] hkl = G2pwd.getHKLpeak(dmin,SGData,A) HKL += hkl else: hkl = G2lat.GenHBravais(dmin,bravais,A) HKL += hkl HKL = G2lat.sortHKLd(HKL,True,False) varyList = [item for item in data['varyList'] if data['varyList'][item]] parmDict = {'dist':data['distance'],'det-X':data['center'][0],'det-Y':data['center'][1], 'tilt':data['tilt'],'phi':data['rotation'],'wave':data['wavelength'],'dep':data['DetDepth']} Found = False wave = data['wavelength'] frame = masks['Frames'] tam = ma.make_mask_none(self.ImageZ.shape) if frame: tam = ma.mask_or(tam,MakeFrameMask(data,frame)) for iH,H in enumerate(HKL): if debug: print H dsp = H[3] tth = 2.0*asind(wave/(2.*dsp)) if tth+abs(data['tilt']) > 90.: print 'next line is a hyperbola - search stopped' break ellipse = GetEllipse(dsp,data) Ring = makeRing(dsp,ellipse,pixLimit,cutoff,scalex,scaley,ma.array(self.ImageZ,mask=tam)) if Ring: if iH >= skip: data['rings'].append(np.array(Ring)) data['ellipses'].append(copy.deepcopy(ellipse+('r',))) Found = True elif not Found: #skipping inner rings, keep looking until ring found continue else: #no more rings beyond edge of detector data['ellipses'].append([]) continue # break rings = np.concatenate((data['rings']),axis=0) chisq = FitDetector(rings,varyList,parmDict) data['wavelength'] = parmDict['wave'] data['distance'] = parmDict['dist'] data['center'] = [parmDict['det-X'],parmDict['det-Y']] data['rotation'] = np.mod(parmDict['phi'],360.0) data['tilt'] = parmDict['tilt'] data['DetDepth'] = parmDict['dep'] data['chisq'] = chisq N = len(data['ellipses']) data['ellipses'] = [] #clear away individual ellipse fits for H in HKL[:N]: ellipse = GetEllipse(H[3],data) data['ellipses'].append(copy.deepcopy(ellipse+('b',))) print 'calibration time = ',time.time()-time0 G2plt.PlotImage(self,newImage=True) return True
def interp(datain, xin, yin, xout, yout, checkbounds=False, masked=False, order=1): """This function is originally from matplotlib.toolkits.basemap.interp Copyright 2008, Jeffrey Whitaker Interpolate data (``datain``) on a rectilinear grid (with x = ``xin`` y = ``yin``) to a grid with x = ``xout``, y= ``yout``. .. tabularcolumns:: |l|L| ============== ==================================================== Arguments Description ============== ==================================================== datain a rank-2 array with 1st dimension corresponding to y, 2nd dimension x. xin, yin rank-1 arrays containing x and y of datain grid in increasing order. xout, yout rank-2 arrays containing x and y of desired output grid. ============== ==================================================== .. tabularcolumns:: |l|L| ============== ==================================================== Keywords Description ============== ==================================================== checkbounds If True, values of xout and yout are checked to see that they lie within the range specified by xin and xin. If False, and xout,yout are outside xin,yin, interpolated values will be clipped to values on boundary of input grid (xin,yin) Default is False. masked If True, points outside the range of xin and yin are masked (in a masked array). If masked is set to a number, then points outside the range of xin and yin will be set to that number. Default False. order 0 for nearest-neighbor interpolation, 1 for bilinear interpolation, 3 for cublic spline (default 1). order=3 requires scipy.ndimage. ============== ==================================================== .. note:: If datain is a masked array and order=1 (bilinear interpolation) is used, elements of dataout will be masked if any of the four surrounding points in datain are masked. To avoid this, do the interpolation in two passes, first with order=1 (producing dataout1), then with order=0 (producing dataout2). Then replace all the masked values in dataout1 with the corresponding elements in dataout2 (using numpy.where). This effectively uses nearest neighbor interpolation if any of the four surrounding points in datain are masked, and bilinear interpolation otherwise. Returns ``dataout``, the interpolated data on the grid ``xout, yout``. """ # xin and yin must be monotonically increasing. if xin[-1] - xin[0] < 0 or yin[-1] - yin[0] < 0: raise ValueError, 'xin and yin must be increasing!' if xout.shape != yout.shape: raise ValueError, 'xout and yout must have same shape!' # check that xout,yout are # within region defined by xin,yin. if checkbounds: if xout.min() < xin.min() or \ xout.max() > xin.max() or \ yout.min() < yin.min() or \ yout.max() > yin.max(): raise ValueError, 'yout or xout outside range of yin or xin' # compute grid coordinates of output grid. delx = xin[1:] - xin[0:-1] dely = yin[1:] - yin[0:-1] if max(delx) - min(delx) < 1.e-4 and max(dely) - min(dely) < 1.e-4: # regular input grid. xcoords = (len(xin) - 1) * (xout - xin[0]) / (xin[-1] - xin[0]) ycoords = (len(yin) - 1) * (yout - yin[0]) / (yin[-1] - yin[0]) else: # irregular (but still rectilinear) input grid. xoutflat = xout.flatten() youtflat = yout.flatten() ix = (np.searchsorted(xin, xoutflat) - 1).tolist() iy = (np.searchsorted(yin, youtflat) - 1).tolist() xoutflat = xoutflat.tolist() xin = xin.tolist() youtflat = youtflat.tolist() yin = yin.tolist() xcoords = [] ycoords = [] for n, i in enumerate(ix): if i < 0: xcoords.append(-1) # outside of range on xin (lower end) elif i >= len(xin) - 1: xcoords.append(len(xin)) # outside range on upper end. else: xcoords.append( float(i) + (xoutflat[n] - xin[i]) / (xin[i + 1] - xin[i])) for m, j in enumerate(iy): if j < 0: ycoords.append(-1) # outside of range of yin (on lower end) elif j >= len(yin) - 1: ycoords.append(len(yin)) # outside range on upper end else: ycoords.append( float(j) + (youtflat[m] - yin[j]) / (yin[j + 1] - yin[j])) xcoords = np.reshape(xcoords, xout.shape) ycoords = np.reshape(ycoords, yout.shape) # data outside range xin,yin will be clipped to # values on boundary. if masked: xmask = np.logical_or(np.less(xcoords, 0), np.greater(xcoords, len(xin) - 1)) ymask = np.logical_or(np.less(ycoords, 0), np.greater(ycoords, len(yin) - 1)) xymask = np.logical_or(xmask, ymask) xcoords = np.clip(xcoords, 0, len(xin) - 1) ycoords = np.clip(ycoords, 0, len(yin) - 1) # interpolate to output grid using bilinear interpolation. if order == 1: xi = xcoords.astype(np.int32) yi = ycoords.astype(np.int32) xip1 = xi + 1 yip1 = yi + 1 xip1 = np.clip(xip1, 0, len(xin) - 1) yip1 = np.clip(yip1, 0, len(yin) - 1) delx = xcoords - xi.astype(np.float32) dely = ycoords - yi.astype(np.float32) dataout = (1.-delx)*(1.-dely)*datain[yi,xi] + \ delx*dely*datain[yip1,xip1] + \ (1.-delx)*dely*datain[yip1,xi] + \ delx*(1.-dely)*datain[yi,xip1] elif order == 0: xcoordsi = np.around(xcoords).astype(np.int32) ycoordsi = np.around(ycoords).astype(np.int32) dataout = datain[ycoordsi, xcoordsi] elif order == 3: try: from scipy.ndimage import map_coordinates except ImportError: raise ValueError('scipy.ndimage must be installed if order=3') coords = [ycoords, xcoords] dataout = map_coordinates(datain, coords, order=3, mode='nearest') else: raise ValueError, 'order keyword must be 0, 1 or 3' if masked and isinstance(masked, bool): dataout = ma.masked_array(dataout) newmask = ma.mask_or(ma.getmask(dataout), xymask) dataout = ma.masked_array(dataout, mask=newmask) elif masked and is_scalar(masked): dataout = np.where(xymask, masked, dataout) return dataout
def getTimeseries(productcode, subproductcode, version, mapsetcode, wkt, start_date, end_date): # Extract timeseries from a list of files and return as JSON object # It applies to a single dataset (prod/sprod/version/mapset) and between 2 dates ogr.UseExceptions() theGeomWkt = ' '.join(wkt.strip().split()) geom = Geometry(wkt=str(theGeomWkt), srs=4326) # Get Product Info product_info = querydb.get_product_out_info(productcode=productcode, subproductcode=subproductcode, version=version) if product_info.__len__() > 0: scale_factor = 0 scale_offset = 0 nodata = 0 date_format = '' for row in product_info: scale_factor = row.scale_factor scale_offset = row.scale_offset nodata = row.nodata unit = row.unit date_format = row.date_format [list_files, dates_list] = getFilesList(productcode, subproductcode, version, mapsetcode, date_format, start_date, end_date) # Built a dictionary with filesnames/dates dates_to_files_dict = dict(zip(dates_list, list_files)) # Generate unique list of files unique_list = set(list_files) uniqueFilesValues = [] for infile in unique_list: if os.path.isfile(infile): try: mx = [] single_result = {'filename': '', 'meanvalue_noscaling': nodata, 'meanvalue': nodata} with Raster(infile) as img: # Assign nodata from prod_info img._nodata = nodata with img.clip(geom) as clipped: # Save clipped image (for debug only) # clipped.save(dataset.fullpath+'clipped_'+productfilename) mx = clipped.array() nodata_array_masked = ma.masked_equal(mx, nodata) merged_mask = ma.mask_or(ma.getmask(mx), ma.getmask(nodata_array_masked)) mxnodata = ma.masked_array(ma.getdata(mx), merged_mask) if mxnodata.count() == 0: meanResult = 0.0 else: meanResult = mxnodata.mean() single_result['filename'] = infile single_result['meanvalue_noscaling'] = meanResult # Scale to physical value finalvalue = (meanResult*scale_factor+scale_offset) single_result['meanvalue'] = finalvalue uniqueFilesValues.append(single_result) except Exception, e: logger.debug('ERROR: clipping - %s' % (e)) # sys.exit (1) else: logger.debug('ERROR: raster file does not exist - %s' % infile) # sys.exit (1) # Define a dictionary to associate filenames/values files_to_values_dict = dict((x['filename'], x['meanvalue']) for x in uniqueFilesValues) # Prepare array for result resultDatesValues = [] # Returns a list of 'filenames', 'dates', 'values' for mydate in dates_list: # my_result = {'date': datetime.date.today(), 'filename':'', 'meanvalue':nodata} my_result = {'date': datetime.date.today(), 'meanvalue':nodata} # Assign the date my_result['date'] = mydate # Assign the filename my_filename = dates_to_files_dict[mydate] # my_result['filename'] = my_filename # Map from array of Values my_result['meanvalue'] = files_to_values_dict[my_filename] # Map from array of dates resultDatesValues.append(my_result) return resultDatesValues
def dgt(obsdata_file, powerlaw, userT, userWidth, snr_line, snr_lim, plotting, domcmc, nsims): interp = False # interpolate loglike on model grid (for mcmc sampler) # this is not used yet, because needs some fixing # check user inputs (T and width) valid_T = [0, 10, 15, 20, 25, 30, 35, 40, 45, 50] valid_W = [0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] if userT in valid_T and userWidth in valid_W: userinputOK = True else: userinputOK = False print("!!! User input (temperature or width) invalid. Exiting.") print("!!!") exit() # Valid (i.e. modeled) input molecular lines are: valid_lines=['CO10','CO21','CO32',\ 'HCN10','HCN21','HCN32',\ 'HCOP10','HCOP21','HCOP32',\ 'HNC10','HNC21','HNC32',\ '13CO10','13CO21','13CO32',\ 'C18O10','C18O21','C18O32',\ 'C17O10','C17O21','C17O32',\ 'CS10','CS21','CS32'\ ] if not snr_line in valid_lines: print("!!! Line for SNR limit is invalid. Must be one of:") print(valid_lines) print("!!!") exit() ########################### ### get observations ###### ########################### obs = read_obs(obsdata_file) ########################### ##### validate input ###### ########################### # check for coordinates in input file have_radec = False have_ra_special = False if 'RA' in obs.keys() and 'DEC' in obs.keys(): have_radec = True elif '#RA' in obs.keys() and 'DEC' in obs.keys(): have_radec = True have_ra_special = True else: have_radec = False if not have_radec: print("!!!") print( "!!! No coordinates found in input ascii file. Check column header for 'RA' and 'DEC'. Exiting." ) print("!!!") exit() # count number of lines in input data ct_l = 0 obstrans = [] # this list holds the user input line keys for key in obs.keys(): if key in valid_lines: ct_l += 1 obstrans.append(key) # Only continue if number of molecular lines is > number of free parameters: if userT > 0 and userWidth > 0: dgf = ct_l - 1 elif userT > 0 or userT > 0: dgf = ct_l - 2 # degrees of freedom = nrlines-2 if temperature is fixed. Free parameters: n,width else: dgf = ct_l - 3 # Free parameters: n,T,width if not dgf > 0: print("!!!") print( "!!! Number of observed lines too low. Degrees of Freedom <1. Try a fixed temperature or check column header. Valid lines are: " ) print(valid_lines) print("!!!") exit() if have_ra_special: ra = np.array(obs['#RA']) else: ra = np.array(obs['RA']) de = np.array(obs['DEC']) ############################################################################# # Check input observations for lowest J CO line (used for normalization) ############################################################################# have_co10 = False have_co21 = False have_co32 = False # loop through observed lines/transitions for t in obstrans: if t == 'CO10': have_co10 = True if t == 'CO21': have_co21 = True if t == 'CO32': have_co32 = True if have_co10: normtrans = 'CO10' uc_normtrans = 'UC_CO10' elif have_co21: normtrans = 'CO21' uc_normtrans = 'UC_CO21' elif have_co32: normtrans = 'CO32' uc_normtrans = 'UC_CO32' else: print( "No CO line found in input data file. Check column headers for 'CO10', 'CO21' or 'CO32'. Exiting." ) exit() ########################### ##### get the models ###### ########################### mdl = {} mdl = read_grid_ndist(obstrans, userT, userWidth, powerlaw) ############################################################################# ############################################################################# # Calculate line ratios and save in new dictionary # use line ratios (normalize to lowest CO transition in array) to determine chi2 # note that the abundances are fixed by design of the model grid files ############################################################################# ############################################################################# lr = {} # loop through observed lines/transitions for t in obstrans: if t != normtrans: # calc line ratios lr[t] = obs[t] / obs[normtrans] mdl[t] = mdl[t] / mdl[normtrans] uc = 'UC_' + t lr[uc] = abs(obs[uc] / obs[t]) + abs( obs[uc_normtrans] / obs[normtrans]) ############################################################# ############################################################# # loop through pixels, i.e. rows in ascii input file ############################################################# ############################################################# result = [] for p in range(len(ra)): ################################# ####### calculate chi2 ########## ################################# diff = {} for t in obstrans: if t != normtrans: uc = 'UC_' + t if obs[t][p] > obs[uc][p] and obs[t][p] > 0.0: diff[t] = np.array(((lr[t][p] - mdl[t]) / lr[uc][p])**2) else: diff[t] = np.nan * np.zeros_like(mdl[t]) # vertical stack of diff arrays vstack = np.vstack(list(diff.values())) # sum up diff of all line ratios--> chi2 chi2 = vstack.sum(axis=0) # if model correct, we expect: # nu^2 ~ nu +/- sqrt(2*nu) # make a SNR cut using line and limit from user uc = 'UC_' + snr_line SNR = round(obs[snr_line][p] / obs[uc][p], 2) width = ma.array(mdl['width']) densefrac = ma.array(mdl['densefrac']) # filter out outliers chi2lowlim, chi2uplim = np.quantile(chi2, [0.0, 0.95]) # create masks # invalid (nan) values of chi2 chi2 = ma.masked_invalid(chi2) mchi2invalid = ma.getmask(chi2) # based on chi2 chi2 = ma.array(chi2) chi2 = ma.masked_outside(chi2, chi2lowlim, chi2uplim) mchi2 = ma.getmask(chi2) # based on densefrac densefraclowlim = 0. densefracuplim = 99999. densefrac = ma.masked_outside(densefrac, densefraclowlim, densefracuplim) mwidth = ma.getmask(densefrac) # combine masks m1 = ma.mask_or(mchi2, mwidth) m = ma.mask_or(m1, mchi2invalid) width = ma.array(width, mask=m) densefrac = ma.array(densefrac, mask=m) chi2 = ma.array(chi2, mask=m) # n,T grid_n = mdl['n'] n = ma.array(grid_n, mask=m) grid_T = mdl['T'] T = ma.array(grid_T, mask=m) ########################################################### ########## find best fit set of parameters ################ ################### from chi2 credible interval ########### ########################################################### # These limits correspond to +/-1 sigma error if dgf > 0: cutoff = 0.05 # area to the right of critical value; here 5% --> 95% confidence --> +/- 2sigma #cutoff=0.32 # area to the right of critical value; here 32% --> 68% confidence --> +/- 1sigma deltachi2 = scipychi2.ppf(1 - cutoff, dgf) else: print("DGF is zero or negative.") # The minimum # find best fit set of parameters chi2min = np.ma.min(chi2) bestfitindex = ma.where(chi2 == chi2min)[0] bestchi2 = scalar(chi2[bestfitindex].data) bestn = scalar(n[bestfitindex].data) bestwidth = scalar(width[bestfitindex].data) bestT = scalar(T[bestfitindex].data) bestdensefrac = scalar(densefrac[bestfitindex].data) bestchi2 = round(bestchi2, 2) bestreducedchi2 = round(bestchi2 / dgf, 2) ################################################# ########## Show Chi2 result on screen ########### ################################################# if not domcmc: if SNR > snr_lim and bestn > 0: print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ) print("#### Bestfit Parameters for pixel nr. " + str(p + 1) + " (" + str(round(ra[p], 5)) + "," + str(round(de[p], 5)) + ") ####") print("chi2\t\t" + str(bestchi2)) print("red. chi2\t\t" + str(bestreducedchi2)) print("n\t\t" + str(bestn)) print("T\t\t" + str(bestT)) print("Width\t\t" + str(bestwidth)) print() ############################################# # save results in array for later file export result.append([ ra[p], de[p], ct_l, dgf, bestchi2, bestn, bestT, bestwidth, obstrans ]) do_this_plot = True else: print("!-!-!-!-!-!") print("Pixel no. " + str(p + 1) + " --> SNR too low or density<0.") print() result.append([ ra[p], de[p], ct_l, dgf, -99999.9, -99999.9, -99999.9, -99999.9, obstrans ]) do_this_plot = False ################################################################### ################################################################### ################################# MCMC ############################ ################################################################### if domcmc: if SNR > snr_lim and bestn > 0: #### Create directory for output png files ### if not os.path.exists('./results/'): os.makedirs('./results/') starttime = datetime.now() ndim, nwalkers = 3, 50 # model grid in results file grid_theta = np.array([n, T, width], dtype=np.float64) grid_loglike = -0.5 * 10**chi2 # note that variable "chi2" is in fact log10(chi2) here # Set up the backend # Don't forget to clear it in case the file already exists status_filename = "./results/" + obsdata_file[: -4] + "_mcmc_" + str( p + 1) + ".h5" backend = emcee.backends.HDFBackend(status_filename) backend.reset(nwalkers, ndim) #### main #### mymcmc(grid_theta, grid_loglike, ndim, nwalkers, backend, interp, nsims) ############## duration = datetime.now() - starttime print("Duration for Pixel " + str(p + 1) + ": " + str(duration.seconds) + "sec") ########## MAKE CORNER PLOT ######### outpngfile = "./results/" + obsdata_file[:-4] + "_mcmc_" + str( p + 1) + ".png" bestn_mcmc_val, bestn_mcmc_upper, bestn_mcmc_lower, bestT_mcmc_val, bestT_mcmc_upper, bestT_mcmc_lower, bestW_mcmc_val, bestW_mcmc_upper, bestW_mcmc_lower = mcmc_corner_plot( status_filename, outpngfile) print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ) print("#### Bestfit Parameters for pixel nr. " + str(p + 1) + " (" + str(round(ra[p], 5)) + "," + str(round(de[p], 5)) + ") ####") print("n\t\t" + str(bestn_mcmc_val) + " " + str(bestn_mcmc_upper) + " " + str(bestn_mcmc_lower)) print("T\t\t" + str(bestT_mcmc_val) + " " + str(bestT_mcmc_upper) + " " + str(bestT_mcmc_lower)) print("Width\t\t" + str(bestW_mcmc_val) + " " + str(bestW_mcmc_upper) + " " + str(bestW_mcmc_lower)) print() ############################################# # save results in array for later file export result.append([ra[p],de[p],ct_l,dgf,float(bestn_mcmc_val),float(bestn_mcmc_upper),float(bestn_mcmc_lower),\ float(bestT_mcmc_val),float(bestT_mcmc_upper),float(bestT_mcmc_lower),\ float(bestW_mcmc_val),float(bestW_mcmc_upper),float(bestW_mcmc_lower),obstrans]) do_this_plot = True ################################################################### ################################################################### else: do_this_plot = False ############################################ ################ Make Figures ############## ############################################ # Plotting if SNR > snr_lim and plotting == True and bestn > 0 and do_this_plot: #### Create directory for output png files ### if not os.path.exists('./results/'): os.makedirs('./results/') # zoom-in variables idx = np.where(chi2 < bestchi2 + deltachi2) zoom_n = n[idx].compressed() zoom_chi2 = chi2[idx].compressed() zoom_width = width[idx].compressed() ########################## PLOT 1 ############################# # combine 4 plots to a single file fig, ax = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(11.5, 8)) # Chi2 vs n plot ax[0, 0].scatter(chi2, np.log10(n), c=width, cmap='Accent', marker=',', s=4, vmin=width.min(), vmax=width.max()) ax[0, 0].set_ylabel('$log\ n$') pl1 = ax[0, 1].scatter(zoom_chi2, np.log10(zoom_n), c=zoom_width, cmap='Accent', marker=',', s=9, vmin=width.min(), vmax=width.max()) fig.colorbar(pl1, ax=ax[0, 1], label='$\mathsf{width}$') # Chi2 vs T plot ax[1, 0].scatter(chi2, np.log10(T), c=width, cmap='Accent', marker=',', s=4, vmin=width.min(), vmax=width.max()) ax[1, 0].set_xlabel('$\chi^2$') ax[1, 0].set_ylabel('$log\ T$') # Chi2 vs T plot zoom-in zoom_T = T[chi2 < bestchi2 + deltachi2].compressed() pl2 = ax[1, 1].scatter(zoom_chi2, np.log10(zoom_T), c=zoom_width, cmap='Accent', marker=',', s=9, vmin=width.min(), vmax=width.max()) ax[1, 1].set_xlabel('$\chi^2}$') fig.colorbar(pl2, ax=ax[1, 1], label='$\mathsf{width}$') # plot fig.subplots_adjust(left=0.06, bottom=0.06, right=1, top=0.96, wspace=0.04, hspace=0.04) fig = gcf() fig.suptitle('Pixel: (' + str(p) + ') SNR(' + snr_line + '): ' + str(SNR), fontsize=14, y=0.99) chi2_filename = obsdata_file[:-4] + "_" + str(p + 1) + '_chi2.png' fig.savefig('./results/' + chi2_filename) #plt.show() plt.close() ########################## PLOT 2 ############################# # all parameters free: (n,T) vs. chi2 if userT == 0 and userWidth == 0: x = np.log10(zoom_n) y = np.log10(zoom_T) z = np.log10(zoom_chi2) this_slice = zoom_width this_bestval = bestwidth xlabel = '$log\ n\ [cm^{-3}]$' ylabel = '$log\ T\ [K]$' zlabel = '$\mathsf{log\ \chi^2}$' title = 'Pixel: ' + str( p + 1) + ' | SNR(' + snr_line + ')=' + str(SNR) pngoutfile = 'results/' + obsdata_file[:-4] + "_" + str( p + 1) + '_nT.png' makeplot(x, y, z, this_slice, this_bestval, xlabel, ylabel, zlabel, title, pngoutfile) ########################## PLOT 3 ############################# # all parameters free: (n,width) vs. chi2 x = np.log10(zoom_n) y = zoom_width z = np.log10(zoom_chi2) this_slice = zoom_T this_bestval = bestT xlabel = '$log\ n\ [cm^{-3}]$' ylabel = '$width\ [dex]$' zlabel = '$\mathsf{log\ \chi^2}$' title = 'Pixel: ' + str( p + 1) + ' | SNR(' + snr_line + ')=' + str(SNR) pngoutfile = 'results/' + obsdata_file[:-4] + "_" + str( p + 1) + '_nW.png' makeplot(x, y, z, this_slice, this_bestval, xlabel, ylabel, zlabel, title, pngoutfile) # width fixed: (n,T) vs. chi2 elif userT == 0 and userWidth > 0: x = np.log10(zoom_n) y = np.log10(zoom_T) z = np.log10(zoom_chi2) this_slice = zoom_width this_bestval = bestwidth xlabel = '$log\ n\ [cm^{-3}]$' ylabel = '$log\ T\ [K]$' zlabel = '$\mathsf{log\ \chi^2}$' title = 'Pixel: ' + str( p + 1) + ' | SNR(' + snr_line + ')=' + str(SNR) pngoutfile = 'results/' + obsdata_file[:-4] + "_" + str( p + 1) + '_nT_fixedW.png' makeplot(x, y, z, this_slice, this_bestval, xlabel, ylabel, zlabel, title, pngoutfile) # T fixed: (n,width) vs. chi2 elif userT > 0 and userWidth == 0: x = np.log10(zoom_n) y = zoom_width z = np.log10(zoom_chi2) this_slice = zoom_T this_bestval = bestT xlabel = '$log\ n\ [cm^{-3}]$' ylabel = '$width\ [dex]$' zlabel = '$\mathsf{log\ \chi^2}$' title = 'Pixel: ' + str( p + 1) + ' | SNR(' + snr_line + ')=' + str(SNR) pngoutfile = 'results/' + obsdata_file[:-4] + "_" + str( p + 1) + '_nW_fixedT.png' makeplot(x, y, z, this_slice, this_bestval, xlabel, ylabel, zlabel, title, pngoutfile) del diff, chi2, n, T, width, densefrac, mchi2, mchi2invalid, mwidth, m1, m, grid_n, grid_T ################################################ ################################################ # write result to a new output table if not domcmc: outtable = obsdata_file[:-4] + "_nT.txt" else: outtable = obsdata_file[:-4] + "_nT_mcmc.txt" resultfile = "./results/" + outtable write_result(result, resultfile, domcmc)
def _mask_or(a, b): return ma.mask_or(a, b, shrink=True)
def expand_value_coverage(input_raster, expand_raster, output_raster, union=False, compress='DEFLATE', verbose=False, logger=None): """ Expand a raster based on occurrence of informative cells in another. Argument "intersect" can be used to define if only the mask of the expand raster should be used, or an union between masks of input and expand raster. :param input_raster: String path to input raster. :param expand_raster: String path to mask raster. :param output_raster: String path to output raster. :param union: Boolean should masks of input_raster and expand_raster be unioned. :param compress: String compression level used for the output raster. :param verbose: Boolean indicating how much information is printed out. :param logger: logger object to be used. :return Boolean success. """ # 1. Setup -------------------------------------------------------------- all_start = timer() if not logger: logging.basicConfig() llogger = logging.getLogger('maskvalue_coverage') llogger.setLevel(logging.DEBUG if verbose else logging.INFO) else: llogger = logger # 2. Read and process raster --------------------------------------------- # First, get the mask and dtype from the mask raster expand_raster = rasterio.open(expand_raster) expand_raster_src = expand_raster.read(1, masked=True) expand_mask = expand_raster_src.mask # Read raster bands directly to Numpy arrays. with rasterio.open(input_raster) as raster: llogger.info("Reading and processing raster {}".format(input_raster)) input_nodata = raster.nodata # Read in the data src = raster.read(1, masked=True) src_dtype = src.dtype src_mask = src.mask # Perform a union on the masks if needed if union: llogger.info("[NOTE] Using union of masks") expand_mask = ma.mask_or(expand_mask, src_mask) llogger.debug("Number of informative cells in the data: {}".format(np.sum(~src_mask))) llogger.debug("Number of informative cells in the expand mask: {}".format(np.sum(~expand_mask))) # Change the mask and the underlying values src.mask = expand_mask src.data[src.mask] = input_nodata # There might be some NoData values lurking around, replace them with # zero. src.data[src == input_nodata] = 0.0 profile = raster.profile profile.update(dtype=src_dtype, count=1, compress=compress, nodata=input_nodata) with rasterio.open(output_raster, 'w', **profile) as dst: llogger.info("Writing output raster {}".format(output_raster)) #import pdb; pdb.set_trace() dst.write(src.astype(src_dtype), 1) all_end = timer() all_elapsed = round(all_end - all_start, 2) llogger.info(" [TIME] Masking took {} sec".format(all_elapsed))