def _save_img(self, fnames, refs, example_file): g = gdal.Open(example_file) projection = g.GetProjection() geotransform = g.GetGeoTransform() bands_refs = zip(fnames, refs) f = lambda band_ref: self._save_band(band_ref, projection = projection, geotransform = geotransform) parmap(f, bands_refs)
def _get_boa(self,): pix_mem = 180. av_ram = psutil.virtual_memory().available needed = np.array([i.RasterXSize * i.RasterYSize * pix_mem for i in self._toa_bands]) u_need = np.unique(needed) procs = av_ram / u_need if av_ram > sum(needed): #ret = parmap(self._do_band, range(len(self.toa_bands))) self._chunks = 1 ret = parmap(self._do_chunk, range(len(self.toa_bands))) else: ret = [] index = [] for i, proc in enumerate(procs): bands_to_do = np.arange(len(self.toa_bands))[needed==u_need[i]] if int(proc) >= 1: self._chunks = 1 re = parmap(self._do_chunk, bands_to_do, min(int(proc), len(bands_to_do))) else: self._chunks = int(np.ceil(1. / proc)) re = map(self._do_band) ret += re index += (np.where(needed==u_need[i])[0]).tolist() ret = zip(*sorted(zip(index, ret)))[1] self.boa_rgb = ret[self.ri], ret[self.gi],ret[self.bi]
def _doing_one_file(self, modis_file, timestamp): self.modis_logger.info('Doing %s.' % modis_file.b1.split('/')[-1].split('_EV_')[0]) band_files = [ getattr(modis_file, 'b%d' % band) for band in range(1, 8) ] angle_files = [ getattr(modis_file, ang) for ang in ['vza', 'sza', 'vaa', 'saa'] ] modis_toa = [] modis_angle = [] f = lambda fname: gdal.Open(fname).ReadAsArray() self.modis_logger.info('Reading in MODIS TOA.') modis_toa = parmap(f, band_files) self.modis_logger.info('Reading in angles.') modis_angle = parmap(f, angle_files) scale = np.array(modis_file.scale) offset = np.array(modis_file.offset) self.modis_toa = np.array(modis_toa) * np.array( scale)[:, None, None] + offset[:, None, None] self.modis_angle = np.array(modis_angle) / 100. self.example_file = band_files[0] self.sen_time = timestamp self.solving_modis_aerosol()
def _save_img(self, refs, bands): g = gdal.Open(self.s2.s2_file_dir + '/%s.jp2' % bands[0]) projection = g.GetProjection() geotransform = g.GetGeoTransform() bands_refs = zip(bands, refs) f = lambda band_ref: self._save_band( band_ref, projection=projection, geotransform=geotransform) parmap(f, bands_refs)
def __init__( self, toa_dir, tile, year, month, day, bands=None, angle_exe='/home/ucfafyi/DATA/S2_MODIS/l_data/l8_angles/l8_angles' ): self.toa_dir = toa_dir self.tile = tile self.year = year self.month = month self.day = day if bands is None: self.bands = np.arange(1, 8) else: self.bands = np.array(bands) self.angle_exe = angle_exe composite = glob(self.toa_dir + '/LC08_L1TP_%03d%03d_%04d%02d%02d_*_01_??_b1.tif' \ % ( self.tile[0], self.tile[1], self.year, self.month, self.day))[0].split('/')[-1].split('_')[:-1] self.header = '_'.join(composite) self.toa_file = [ self.toa_dir + '/%s_b%d.tif' % (self.header, i) for i in self.bands ] self.mete_file = self.toa_dir + '/%s_MTL.txt' % self.header self.qa_file = self.toa_dir + '/%s_bqa.tif' % self.header try: self.saa_sza = [ glob(self.toa_dir + '/%s_solar_B%02d.img' % (self.header, i))[0] for i in self.bands ] self.vaa_vza = [ glob(self.toa_dir + '/%s_sensor_B%02d.img' % (self.header, i))[0] for i in self.bands ] except: ang_file = self.toa_dir + '/%s_ANG.txt' % self.header cwd = os.getcwd() os.chdir(self.toa_dir) f = lambda band: subprocess.call([self.angle_exe, ang_file, \ 'BOTH', '1', '-f', '-32768', '-b', str(band)]) parmap(f, self.bands) os.chdir(cwd) self.saa_sza = [ self.toa_dir + '/%s_solar_B%02d.img' % (self.header, i) for i in self.bands ] self.vaa_vza = [ self.toa_dir + '/%s_sensor_B%02d.img' % (self.header, i) for i in self.bands ] try: scale, offset = self._get_scale() except: raise IOError, 'Failed read in scalling factors.'
def _get_angles(self, ): saa, sza = np.array(parmap(gdal_reader, self.saa_sza)).astype(float).transpose( 1, 0, 2, 3) / 100. vaa, vza = np.array(parmap(gdal_reader, self.vaa_vza)).astype(float).transpose( 1, 0, 2, 3) / 100. saa = np.ma.array(saa, mask=((saa > 180) | (saa < -180))) sza = np.ma.array(sza, mask=((sza > 90) | (sza < 0))) vaa = np.ma.array(vaa, mask=((vaa > 180) | (vaa < -180))) vza = np.ma.array(vza, mask=((vza > 90) | (vza < 0))) saa.mask = sza.mask = vaa.mask = vza.mask = (saa.mask | sza.mask | vaa.mask | vza.mask) return saa, sza, vaa, vza
def fire_shift_optimize(self, ): #self.S2_PSF_optimization() self._preprocess() if self.lh_mask.sum() == 0: self.costs = np.array([ 100000000000., ]) return 0, 0 min_val = [-50, -50] max_val = [50, 50] ps, distributions = create_training_set(['xs', 'ys'], min_val, max_val, n_train=50) self.shift_solved = parmap(self.shift_optimize, ps) self.paras, self.costs = np.array([i[0] for i in self.shift_solved]), \ np.array([i[1] for i in self.shift_solved]) if (1 - self.costs.min()) >= 0.6: xs, ys = self.paras[self.costs == np.nanmin(self.costs)][0].astype( int) else: xs, ys = 0, 0 #print 'Best shift is ', xs, ys, 'with the correlation of', 1-self.costs.min() return xs, ys
def fire_correction(self, toa, sza, vza, saa, vaa, aod, tcwv, tco3, elevation, band_indexs): self._toa = toa self._sza = sza self._vza = vza self._saa = saa self._vaa = vaa self._aod = aod self._tcwv = tcwv self._tco3 = tco3 self._elevation = elevation self._band_indexs = band_indexs rows = np.repeat(np.arange(self._num_blocks), self._num_blocks) columns = np.tile(np.arange(self._num_blocks), self._num_blocks) blocks = zip(rows, columns) #self._s2_block_correction_emus_xa_xb_xc([1, 1]) ret = parmap(self._s2_block_correction_emus_xa_xb_xc, blocks) #ret = parmap(self._s2_block_correction_6s, blocks) #ret = parmap(self._s2_block_correction_emus, blocks) self.boa = np.array([i[2] for i in ret]).reshape(self._num_blocks, self._num_blocks, toa.shape[0], \ self._block_size, self._block_size).transpose(2,0,3,1,4).reshape(toa.shape[0], \ self._num_blocks*self._block_size, self._num_blocks*self._block_size) del self._toa del self._sza del self._vza del self._saa del self._vaa del self._aod del self._tcwv del self._tco3 del self._elevation
def _get_ddv_aot(self, toa, l8, tcwv, tco3, ele_data): ndvi_mask = (((toa[3] - 0.5*toa[5])/(toa[3] + 0.5*toa[5])) > 0.5) & (toa[5] > 0.01) & (toa[5] < 0.25) & (~self.dcloud) if ndvi_mask.sum() < 100: self.logger.info('No enough DDV found in this sence for aot restieval, and only cams prediction used.') else: Hx, Hy = np.where(ndvi_mask) if ndvi_mask.sum() > 1000: random_choice = np.random.choice(len(Hx), 1000, replace=False) random_choice.sort() Hx, Hy = Hx[random_choice], Hy[random_choice] ndvi_mask[:] = False ndvi_mask[Hx, Hy] = True Hx, Hy = np.where(ndvi_mask) blue_vza = np.cos(np.deg2rad(self.vza[0, Hx, Hy])) blue_sza = np.cos(np.deg2rad(self.sza[0, Hx, Hy])) red_vza = np.cos(np.deg2rad(self.vza[2, Hx, Hy])) red_sza = np.cos(np.deg2rad(self.sza[2, Hx, Hy])) blue_raa = np.cos(np.deg2rad(self.vaa[0, Hx, Hy] - self.saa[0, Hx, Hy])) red_raa = np.cos(np.deg2rad(self.vaa[2, Hx, Hy] - self.saa[2, Hx, Hy])) red, blue = toa[2, Hx, Hy], toa[0, Hx, Hy] swif = toa[5, Hx, Hy] red_emus = np.array(self.emus)[:, 3] blue_emus = np.array(self.emus)[:, 1] zero_aod = np.zeros_like(red) red_inputs = np.array([red_sza, red_vza, red_raa, zero_aod, tcwv[Hx, Hy], tco3[Hx, Hy], ele_data[Hx, Hy]]) blue_inputs = np.array([blue_sza, blue_vza, blue_raa, zero_aod, tcwv[Hx, Hy], tco3[Hx, Hy], ele_data[Hx, Hy]]) p = np.r_[np.arange(0, 1., 0.02), np.arange(1., 1.5, 0.05), np.arange(1.5, 2., 0.1)] f = lambda aot: self._ddv_cost(aot, blue, red, swif, blue_inputs, red_inputs, blue_emus, red_emus) costs = parmap(f, p) min_ind = np.argmin(costs) self.logger.info('DDV solved aod is %.02f, and it will used as the mean value for cams prediction.'% p[min_ind]) self.aot[:] = p[min_ind]
def fire_gaus_optimize(self, ): xs, ys = self.fire_shift_optimize() if self.costs.min() < 0.1: min_val = [4, 4, -15, xs - 2, ys - 2] max_val = [40, 40, 15, xs + 2, ys + 2] self.bounds = [4, 40], [4, 40], [-15, 15], [xs - 2, xs + 2], [ys - 2, ys + 2] ps, distributions = create_training_set(self.parameters, min_val, max_val, n_train=50) print('Start solving...') self.gaus_solved = parmap(self.gaus_optimize, ps, nprocs=5) result = np.array( [np.hstack((i[0], i[1])) for i in self.gaus_solved]) print( 'solved psf', dict( zip(self.parameters + [ 'cost', ], result[np.argmin(result[:, -1])]))) return result[np.argmin(result[:, -1]), :] else: print('Cost is too large, plese check!') return []
def _fill_nan(self, ): def fill_nan(array): x_shp, y_shp = array.shape mask = ~np.isnan(array) valid = np.array(np.where(mask)).T value = array[mask] mesh = np.repeat(range(x_shp), y_shp).reshape(x_shp, y_shp), \ np.tile (range(y_shp), x_shp).reshape(x_shp, y_shp) array = griddata(valid, value, mesh, method='nearest') return array self._vza = np.array(parmap(fill_nan, list(self._vza))) self._vaa = np.array(parmap(fill_nan, list(self._vaa))) self._saa, self._sza, self._ele, self._aot, self._tcwv, self._tco3 = \ parmap(fill_nan, [self._saa, self._sza, self._ele, self._aot, self._tcwv, self._tco3]) self._aot = self._aot * 1.3 - 0.08 self._aot = np.maximum(self._aot, 0)
def _fill_nan(self,): def fill_nan(array): x_shp, y_shp = array.shape mask = ~np.isnan(array) valid = np.array(np.where(mask)).T value = array[mask] mesh = np.repeat(range(x_shp), y_shp).reshape(x_shp, y_shp), \ np.tile (range(y_shp), x_shp).reshape(x_shp, y_shp) array = griddata(valid, value, mesh, method='nearest') return array self._vza = np.array(parmap(fill_nan, list(self._vza))) self._vaa = np.array(parmap(fill_nan, list(self._vaa))) self._saa, self._sza, self._ele, self._aot, self._tcwv, self._tco3, self._aot_unc, self._tcwv_unc, self._tco3_unc = \ parmap(fill_nan, [self._saa, self._sza, self._ele, self._aot, self._tcwv, self._tco3, self._aot_unc, self._tcwv_unc, self._tco3_unc]) self._aot_unc = array_to_raster(self._aot_unc, self.example_file) self._tcwv_unc = array_to_raster(self._tcwv_unc, self.example_file) self._tco3_unc = array_to_raster(self._tco3_unc, self.example_file)
def _load_xa_xb_xc_emus(self, ): xap_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xap.pkl' % (self.s2_sensor))[0] xbp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xbp.pkl' % (self.s2_sensor))[0] xcp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xcp.pkl' % (self.s2_sensor))[0] f = lambda em: pkl.load(open(em, 'rb')) self.emus = parmap(f, [xap_emu, xbp_emu, xcp_emu])
def _load_xa_xb_xc_emus(self,): xap_emu = glob(self.emus_dir + '/isotropic_%s_emulators_correction_xap_%s.pkl'%(self.sensor, self.satellite))[0] xbp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_correction_xbp_%s.pkl'%(self.sensor, self.satellite))[0] xcp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_correction_xcp_%s.pkl'%(self.sensor, self.satellite))[0] if sys.version_info >= (3,0): f = lambda em: pkl.load(open(em, 'rb'), encoding = 'latin1') else: f = lambda em: pkl.load(open(em, 'rb')) self.xap_emus, self.xbp_emus, self.xcp_emus = parmap(f, [xap_emu, xbp_emu, xcp_emu])
def fire_shift_optimize(self,): #self.S2_PSF_optimization() min_val = [-50,-50] max_val = [50,50] ps, distributions = create_training_set([ 'xs', 'ys'], min_val, max_val, n_train=50) self.shift_solved = parmap(self.shift_optimize, ps, nprocs=10) self.paras, self.costs = np.array([i[0] for i in self.shift_solved]),np.array([i[1] for i in self.shift_solved]) xs, ys = self.paras[self.costs==self.costs.min()][0].astype(int) print 'Best shift is ', xs, ys, 'with the correlation of', 1-self.costs.min() return xs, ys
def atmospheric_correction(self, ): self.logger.propagate = False self.sensor = 'OLI' self.logger.info('Loading emulators.') self._load_xa_xb_xc_emus() l8 = read_l8(self.l8_toa_dir, self.l8_tile, self.year, self.month, self.day, bands=self.bands) self.l8_header = l8.header self.example_file = self.l8_toa_dir + '/%s_b%d.tif' % (l8.header, 1) self.logger.info('Reading in the reflectance.') self.toa = l8._get_toa() self.logger.info('Reading in the angles') self.saa, self.sza, self.vaa, self.vza = l8._get_angles() self.saa[self.saa.mask] = self.sza[self.sza.mask] = \ self.vaa[self.vaa.mask] = self.vza[self.vza.mask] = np.nan self.aot, self.tcwv, self.tco3, self.ele = self._get_control_variables( ) self.shape = self.toa.shape[1:3] self._block_size = 3000 self._num_blocks_x, self._num_blocks_y = int( np.ceil(1. * self.shape[0] / self._block_size)), int( np.ceil(1. * self.shape[1] / self._block_size)) self._mean_size = 30 rows = np.repeat(np.arange(self._num_blocks_x), self._num_blocks_y) columns = np.tile(np.arange(self._num_blocks_y), self._num_blocks_x) blocks = zip(rows, columns) self.logger.info('Doing correction') ret = parmap(self._block_correction_emus_xa_xb_xc, blocks) self.boa = np.array([i[2] for i in ret]).reshape(self._num_blocks_x, self._num_blocks_y, self.toa.shape[0], \ self._block_size, self._block_size).transpose(2,0,3,1,4).reshape(self.toa.shape[0], \ self._num_blocks_x*self._block_size, self._num_blocks_y*self._block_size)[:, : self.shape[0], : self.shape[1]] self.boa[:, gdal.Open(self.l8_toa_dir + '/%s_bqa.tif' % l8.header).ReadAsArray() == 1] = np.nan self.toa[:, gdal.Open(self.l8_toa_dir + '/%s_bqa.tif' % l8.header).ReadAsArray() == 1] = np.nan self.boa_rgb = np.clip( self.boa[[3, 2, 1]].transpose(1, 2, 0) * 255 / 0.255, 0, 255).astype(uint8) self.toa_rgb = np.clip( self.toa[[3, 2, 1]].data.transpose(1, 2, 0) * 255 / 0.255, 0, 255).astype(uint8) self.logger.info('Saving corrected results') self._save_rgb(self.toa_rgb, 'TOA_RGB', self.example_file) self._save_rgb(self.boa_rgb, 'BOA_RGB', self.example_file) self._save_img(self.boa, self.bands)
def _optimization(self, ): #p0 = np.zeros((self.num_blocks, self.num_blocks)).ravel() #bot = np.zeros((self.num_blocks, self.num_blocks)).ravel() #up = np.zeros((self.num_blocks, self.num_blocks)).ravel() #up[:] = 2 #bounds = np.array([bot, up]).T #p0[:] = 0.3 p = np.r_[np.arange(0, 1., 0.02), np.arange(1., 1.5, 0.05), np.arange(1.5, 2., 0.1)] costs = parmap(self._cost, p) min_ind = np.argmin(costs) return p[min_ind], costs[min_ind]
def _obs_cost_test(self, p, is_full=True): p = np.array(p).reshape(3, -1) X = self.control_variables.reshape(self.boa.shape[0], 7, -1) X[:, 3:6, :] = np.array(p) xap_H, xbp_H, xcp_H = [], [], [] xap_dH, xbp_dH, xcp_dH = [], [], [] emus = list(self.xap_emus) + list(self.xbp_emus) + list(self.xcp_emus) Xs = list(X) + list(X) + list(X) inps = zip(emus, Xs) ret = np.array(parmap(self._helper, inps)) xap_H, xbp_H, xcp_H = ret[:, :, 0].reshape(3, self.boa.shape[0], len(self.resample_hx)) xap_dH, xbp_dH, xcp_dH = ret[:, :, 1:].reshape(3, self.boa.shape[0], len(self.resample_hx), 3) y = xap_H * self.toa - xbp_H sur_ref = y / (1 + xcp_H * y) diff = sur_ref - self.boa full_J = np.nansum(0.5 * self.band_weights[..., None] * (diff)**2 / self.boa_unc**2, axis=0) J = np.zeros(self.full_res) J[self.Hx, self.Hy] = full_J J = np.nansum(J.reshape(self.num_blocks_x, self.block_size, \ self.num_blocks_y, self.block_size).sum(axis=(3,1))*self.mask) #dH = -1 * (-self.toa[...,None] * xap_dH + xcp_dH * (xbp_H[...,None] - xap_H[...,None] * self.toa[...,None])**2 + \ # xbp_dH) /(self.toa[...,None] * xap_H[...,None] * xcp_H[...,None] - xbp_H[...,None] * xcp_H[...,None] + 1)**2 dH = -1 * (-self.toa[...,None] * xap_dH - \ 2 * self.toa[...,None] * xap_H[...,None] * xbp_H[...,None] * xcp_dH + \ self.toa[...,None]**2 * xap_H[...,None]**2 * xcp_dH + \ xbp_dH + \ xbp_H[...,None]**2 * xcp_dH) / \ (self.toa[...,None] * xap_H[...,None] * xcp_H[...,None] - \ xbp_H[...,None] * xcp_H[...,None] + 1)**2 full_dJ = [ self.band_weights[..., None] * dH[:, :, i] * diff / (self.boa_unc**2) for i in range(3) ] if is_full: dJ = np.nansum(np.array(full_dJ), axis=(1, )) J_ = np.zeros((3, ) + self.full_res) J_[:, self.Hx, self.Hy] = dJ J_ = np.nansum(J_.reshape(3, self.num_blocks_x, self.block_size, \ self.num_blocks_y, self.block_size), axis=(4,2)) J_[:, ~self.mask] = 0 J_ = J_.reshape(3, -1) else: J_ = np.nansum(np.array(full_dJ), axis=(1, 2)) return J, J_
def _doing_one_file(self, modis_file, timestamp): self.logger.info('Doing %s.' % modis_file.b1.split('/')[-1].split('_EV_')[0]) band_files = [ getattr(modis_file, 'b%d' % band) for band in range(1, 8) ] angle_files = [ getattr(modis_file, ang) for ang in ['sza', 'vza', 'saa', 'vaa'] ] modis_toa = [] modis_angle = [] f = lambda fname: gdal.Open(fname).ReadAsArray() self.logger.info('Reading in MODIS TOA.') modis_toa = parmap(f, band_files) self.logger.info('Reading in angles.') modis_angle = parmap(f, angle_files) scale = np.array(modis_file.scale) offset = np.array(modis_file.offset) self.modis_toa = np.array(modis_toa) * np.array( scale)[:, None, None] + offset[:, None, None] self.modis_angle = np.array(modis_angle) / 100. self.example_file = band_files[0] self.sen_time = timestamp self.logger.info('Getting control variables') self.aod, self.tcwv, self.tco3, self.ele = self.get_control_variables() self._block_size = 480 self._num_blocks = 2400 / self._block_size self._mean_size = 6 self.band_indexs = [0, 1, 2, 3, 4, 5, 6] self.logger.info('Fire correction and splited into %d blocks.' % self._num_blocks**2) self.fire_correction(self.modis_toa, self.modis_angle[0], self.modis_angle[1], self.modis_angle[2], \ self.modis_angle[3], self.aod, self.tcwv, self.tco3, self.ele, self.band_indexs)
def _get_toa(self, ): try: scale, offset = self._get_scale() except: raise IOError, 'Failed read in scalling factors.' bands_scale = scale[self.bands - 1] bands_offset = offset[self.bands - 1] toa = np.array(parmap(gdal_reader, self.toa_file)).astype(float) * \ bands_scale[...,None, None] + bands_offset[...,None, None] qa_mask = self._get_qa() sza = self._get_angles()[1] toa = toa / np.cos(np.deg2rad(sza)) toa_mask = toa < 0 mask = qa_mask | toa_mask | sza.mask toa = np.ma.array(toa, mask=mask) return toa
def _read_MCD43(self, fnames): def warp_data(fname, aoi, xRes, yRes): g = gdal.Warp('',fname, format = 'MEM', dstNodata=0, cutlineDSName=aoi, xRes = \ xRes, yRes = yRes, cropToCutline=True, resampleAlg = gdal.GRIORA_NearestNeighbour) return g.ReadAsArray() par = partial(warp_data, aoi=self.aoi, xRes=self.aero_res * 0.5, yRes=self.aero_res * 0.5) n_files = int(len(fnames) / 2) ret = parmap(par, fnames) das = np.array(ret[:n_files]) qas = np.array(ret[n_files:]) ws = 0.618034**qas ws[qas == 255] = 0 das[das == 32767] = 0 return das, ws
def _load_xa_xb_xc_emus(self, ): if self.blue_emus is None: xap_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xap.pkl' % (self.sensor))[0] xbp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xbp.pkl' % (self.sensor))[0] xcp_emu = glob(self.emus_dir + '/isotropic_%s_emulators_*_xcp.pkl' % (self.sensor))[0] f = lambda em: pkl.load(open(em, 'rb')) self.xap_emus, self.xbp_emus, self.xcp_emus = parmap( f, [xap_emu, xbp_emu, xcp_emu]) self.blue_xap_emu, self.blue_xbp_emu, self.blue_xcp_emu = self.xap_emus[self.blue_in], \ self.xbp_emus[self.blue_in], self.xcp_emus[self.blue_in] self.red_xap_emu, self.red_xbp_emu, self.red_xcp_emu = self.xap_emus[self.red_in], \ self.xbp_emus[self.red_in], self.xcp_emus[self.red_in] else: self.blue_xap_emu, self.blue_xbp_emu, self.blue_xcp_emu = self.blue_emus self.red_xap_emu, self.red_xbp_emu, self.red_xcp_emu = self.red_emus
def fire_correction(self, toa, sza, vza, saa, vaa, aod, tcwv, tco3, elevation, band_indexs): self._toa = toa self._sza = sza self._vza = vza self._saa = saa self._vaa = vaa self._aod = aod self._tcwv = tcwv self._tco3 = tco3 self._elevation = elevation self._band_indexs = band_indexs rows = np.repeat(np.arange(self._num_blocks), self._num_blocks) columns = np.tile(np.arange(self._num_blocks), self._num_blocks) blocks = zip(rows, columns) ret = parmap(self._block_correction_emus_xa_xb_xc, blocks) self.sur_ref = np.array([i[2] for i in ret]).reshape(self._num_blocks, self._num_blocks, toa.shape[0], \ self._block_size, self._block_size).transpose(2,0,3,1,4).reshape(toa.shape[0], \ self._num_blocks*self._block_size, self._num_blocks*self._block_size) self._save_img(self.sur_ref, [1, 2, 3, 4, 5, 6, 7]) self.boa_rgb = clip( self.sur_ref[[0, 3, 2], ...].transpose(1, 2, 0) * 255 / 0.25, 0, 255.).astype(uint8) self.toa_rgb = clip( self._toa[[0, 3, 2], ...].transpose(1, 2, 0) * 255 / 0.25, 0, 255.).astype(uint8) self._save_rgb(self.boa_rgb, 'BOA_RGB') self._save_rgb(self.toa_rgb, 'TOA_RGB') del self._toa del self._sza del self._vza del self._saa del self._vaa del self._aod del self._tcwv del self._tco3 del self._elevation
def _get_convolved_toa(self, ): imgs = [band_g.ReadAsArray() for band_g in self._toa_bands] self.bad_pixs = self.bad_pix[self.hx, self.hy] xgaus = np.exp( -2. * (np.pi**2) * (self.psf_xstd**2) * ((0.5 * np.arange(self.full_res[0]) / self.full_res[0])**2)) ygaus = np.exp( -2. * (np.pi**2) * (self.psf_ystd**2) * ((0.5 * np.arange(self.full_res[1]) / self.full_res[1])**2)) gaus_2d = np.outer(xgaus, ygaus) def convolve(img, gaus_2d, hx, hy): dat = idct(idct(dct(dct(img, axis=0, norm = 'ortho'), axis=1, \ norm='ortho') * gaus_2d, axis=1, norm='ortho'), axis=0, norm='ortho')[hx, hy] return dat par = partial(convolve, gaus_2d=gaus_2d, hx=self.hx, hy=self.hy) if np.array(self.ref_scale).ndim == 2: self.ref_scale = self.ref_scale[self.hx, self.hy] if np.array(self.ref_off).ndim == 2: self.ref_off = self.ref_off[self.hx, self.hy] self.toa = np.array(parmap(par, imgs)) * self.ref_scale + self.ref_off
def solving_s2_aerosol(self, ): self.s2_logger = logging.getLogger('Sentinel 2 Atmospheric Correction') self.s2_logger.setLevel(logging.INFO) if not self.s2_logger.handlers: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.s2_logger.addHandler(ch) self.s2_logger.propagate = False self.s2_sensor = 'MSI' self.s2_logger.info('Doing Sentinel 2 tile: %s on %d-%02d-%02d.' % (self.s2_tile, self.year, self.month, self.day)) self._s2_aerosol() self.s2_solved = [] if self.aero_res < 500: self.s2_logger.warning( 'The best resolution of aerosol should be larger \ than 500 meters (inlcude), so it is set to 500 meters.' ) self.aero_res = 500 self.block_size = int(self.aero_res / 10) num_blocks = int(np.ceil(10980 / self.block_size)) self.s2_logger.info('Start solving...') #for i in range(num_blocks): # for j in range(num_blocks): # self.s2_logger.info('Doing block %03d-%03d.'%(i+1,j+1)) # self._s2_block_solver([i,j]) blocks = zip(np.repeat(range(num_blocks), num_blocks), np.tile(range(num_blocks), num_blocks)) self.s2_solved = parmap(self._s2_block_solver, blocks) inds = np.array([[i[0], i[1]] for i in self.s2_solved]) rets = np.array([i[2][0] for i in self.s2_solved]) aod_map = np.zeros((num_blocks, num_blocks)) aod_map[:] = np.nan tcwv_map = aod_map.copy() tco3_map = aod_map.copy() aod_map[inds[:, 0], inds[:, 1]] = rets[:, 0] tcwv_map[inds[:, 0], inds[:, 1]] = rets[:, 1] tco3_map[inds[:, 0], inds[:, 1]] = rets[:, 2] aod_map, tcwv_map, tco3_map = np.where(~np.isnan(aod_map), aod_map, np.nanmean(aod_map)), \ np.where(~np.isnan(tcwv_map), tcwv_map, np.nanmean(tcwv_map)), \ np.where(~np.isnan(tco3_map), tco3_map, np.nanmean(tco3_map)) para_names = 'aot', 'tcwv', 'tco3' g = gdal.Open(self.s2_file_dir + '/B04.jp2') xmin, ymax = g.GetGeoTransform()[0], g.GetGeoTransform()[3] projection = g.GetProjection() results = [] self.s2_logger.info( 'Finished retrieval and saving them into local files.') for i, para_map in enumerate([aod_map, tcwv_map, tco3_map]): s = smoothn(para_map.copy(), isrobust=True, verbose=False)[1] smed = smoothn(para_map.copy(), isrobust=True, verbose=False, s=s)[0] xres, yres = self.block_size * 10, self.block_size * 10 geotransform = (xmin, xres, 0, ymax, 0, -yres) nx, ny = smed.shape dst_ds = gdal.GetDriverByName('GTiff').Create(self.s2_file_dir + \ '/%s.tif'%para_names[i], ny, nx, 1, gdal.GDT_Float32) dst_ds.SetGeoTransform(geotransform) dst_ds.SetProjection(projection) dst_ds.GetRasterBand(1).WriteArray(smed) dst_ds.FlushCache() dst_ds = None results.append(smed) self.aot_map, self.tcwv_map, self.tco3_map = results
def _s2_aerosol(self, ): self.s2_logger.propagate = False self.s2_logger.info('Start to retrieve atmospheric parameters.') self.s2 = read_s2(self.s2_toa_dir, self.s2_tile, self.year, self.month, self.day, self.s2_u_bands) self.s2_logger.info('Reading in TOA reflectance.') selected_img = self.s2.get_s2_toa() self.s2_file_dir = self.s2.s2_file_dir self.s2.get_s2_cloud() self.s2_logger.info( 'Find corresponding pixels between S2 and MODIS tiles') tiles = Find_corresponding_pixels(self.s2.s2_file_dir + '/B04.jp2', destination_res=500) if len(tiles.keys()) > 1: self.s2_logger.info('This sentinel 2 tile covers %d MODIS tile.' % len(tiles.keys())) self.mcd43_files = [] szas, vzas, saas, vaas, raas = [], [], [], [], [] boas, boa_qas, brdf_stds, Hxs, Hys = [], [], [], [], [] for key in tiles.keys(): #h,v = int(key[1:3]), int(key[-2:]) self.s2_logger.info('Getting BOA from MODIS tile: %s.' % key) mcd43_file = glob(self.mcd43_tmp % (self.mcd43_dir, self.year, self.doy, key))[0] self.mcd43_files.append(mcd43_file) self.H_inds, self.L_inds = tiles[key] Lx, Ly = self.L_inds Hx, Hy = self.H_inds Hxs.append(Hx) Hys.append(Hy) self.s2_logger.info( 'Getting the angles and simulated surface reflectance.') self.s2.get_s2_angles(self.reconstruct_s2_angle) self.s2_angles = np.zeros((4, 6, len(Hx))) for j, band in enumerate(self.s2_u_bands[:-2]): self.s2_angles[[0,2],j,:] = self.s2.angles['vza'][band][Hx, Hy], \ self.s2.angles['vaa'][band][Hx, Hy] self.s2_angles[[1,3],j,:] = self.s2.angles['sza'][Hx, Hy], \ self.s2.angles['saa'][Hx, Hy] #use mean value to fill bad values for i in range(4): mask = ~np.isfinite(self.s2_angles[i]) if mask.sum() > 0: self.s2_angles[i][mask] = np.interp(np.flatnonzero(mask), \ np.flatnonzero(~mask), self.s2_angles[i][~mask]) # simple interpolation vza, sza = self.s2_angles[:2] vaa, saa = self.s2_angles[2:] raa = vaa - saa szas.append(sza) vzas.append(vza) raas.append(raa) vaas.append(vaa) saas.append(saa) # get the simulated surface reflectance s2_boa, s2_boa_qa, brdf_std = get_brdf_six(mcd43_file, angles=[vza, sza, raa],\ bands=(3,4,1,2,6,7), Linds= [Lx, Ly]) boas.append(s2_boa) boa_qas.append(s2_boa_qa) brdf_stds.append(brdf_std) self.s2_boa = np.hstack(boas) self.s2_boa_qa = np.hstack(boa_qas) self.brdf_stds = np.hstack(brdf_stds) self.Hx = np.hstack(Hxs) self.Hy = np.hstack(Hys) vza = np.hstack(vzas) sza = np.hstack(szas) vaa = np.hstack(vaas) saa = np.hstack(saas) raa = np.hstack(raas) self.s2_angles = np.array([vza, sza, vaa, saa]) #self.s2_boa, self.s2_boa_qa = self.s2_boa.flatten(), self.s2_boa_qa.flatten() self.s2_logger.info('Applying spectral transform.') self.s2_boa = self.s2_boa*np.array(self.s2_spectral_transform)[0,:-1][...,None] + \ np.array(self.s2_spectral_transform)[1,:-1][...,None] self.s2_logger.info('Getting elevation.') ele_data = reproject_data(self.global_dem, self.s2.s2_file_dir + '/B04.jp2', outputType=gdal.GDT_Float32).data mask = ~np.isfinite(ele_data) ele_data = np.ma.array(ele_data, mask=mask) / 1000. self.elevation = ele_data[self.Hx, self.Hy] self.s2_logger.info('Getting pripors from ECMWF forcasts.') sen_time_str = json.load( open(self.s2.s2_file_dir + '/tileInfo.json', 'r'))['timestamp'] self.sen_time = datetime.datetime.strptime(sen_time_str, u'%Y-%m-%dT%H:%M:%S.%fZ') example_file = self.s2.s2_file_dir + '/B04.jp2' aod, tcwv, tco3 = np.array(self._read_cams(example_file))[:, self.Hx, self.Hy] self.s2_aod550 = aod #* (1-0.14) # validation of +14% biase self.s2_tco3 = tco3 * 46.698 #* (1 - 0.05) tcwv = tcwv / 10. self.s2_tco3_unc = np.ones(self.s2_tco3.shape) * 0.2 self.s2_aod550_unc = np.ones(self.s2_aod550.shape) * 0.5 self.s2_logger.info( 'Trying to get the tcwv from the emulation of sen2cor look up table.' ) #try: self._get_tcwv(selected_img, vza, sza, raa, ele_data) #except: # self.s2_logger.warning('Getting tcwv from the emulation of sen2cor look up table failed, ECMWF data used.') # self.s2_tcwv = tcwv # self.s2_tcwv_unc = np.ones(self.s2_tcwv.shape) * 0.2 self.s2_logger.info('Trying to get the aod from ddv method.') try: solved = self._get_ddv_aot(self.s2.angles, example_file, self.s2_tcwv, ele_data, selected_img) if solved[0] < 0: self.s2_logger.warning( 'DDV failed and only cams data used for the prior.') else: self.s2_logger.info( 'DDV solved aod is %.02f, and it will used as the mean value of cams prediction.' % solved[0]) self.s2_aod550 += (solved[0] - self.s2_aod550.mean()) except: self.s2_logger.warning('Getting aod from ddv failed.') self.s2_logger.info('Applying PSF model.') if self.s2_psf is None: self.s2_logger.info('No PSF parameters specified, start solving.') high_img = np.repeat( np.repeat(selected_img['B11'], 2, axis=0), 2, axis=1) * 0.0001 high_indexs = self.Hx, self.Hy low_img = self.s2_boa[4] qa, cloud = self.s2_boa_qa[4], self.s2.cloud psf = psf_optimize(high_img, high_indexs, low_img, qa, cloud, 2) xs, ys = psf.fire_shift_optimize() xstd, ystd = 29.75, 39 ang = 0 self.s2_logger.info('Solved PSF parameters are: %.02f, %.02f, %d, %d, %d, and the correlation is: %f.' \ %(xstd, ystd, 0, xs, ys, 1-psf.costs.min())) else: xstd, ystd, ang, xs, ys = self.s2_psf # apply psf shifts without going out of the image extend shifted_mask = np.logical_and.reduce( ((self.Hx + int(xs) >= 0), (self.Hx + int(xs) < self.s2_full_res[0]), (self.Hy + int(ys) >= 0), (self.Hy + int(ys) < self.s2_full_res[0]))) self.Hx, self.Hy = self.Hx[shifted_mask] + int( xs), self.Hy[shifted_mask] + int(ys) #self.Lx, self.Ly = self.Lx[shifted_mask], self.Ly[shifted_mask] self.s2_boa = self.s2_boa[:, shifted_mask] self.s2_boa_qa = self.s2_boa_qa[:, shifted_mask] self.s2_angles = self.s2_angles[:, :, shifted_mask] self.elevation = self.elevation[shifted_mask] self.s2_aod550 = self.s2_aod550[shifted_mask] self.s2_tcwv = self.s2_tcwv[shifted_mask] self.s2_tco3 = self.s2_tco3[shifted_mask] self.s2_aod550_unc = self.s2_aod550_unc[shifted_mask] self.s2_tcwv_unc = self.s2_tcwv_unc[shifted_mask] self.s2_tco3_unc = self.s2_tco3_unc[shifted_mask] self.brdf_stds = self.brdf_stds[:, shifted_mask] self.s2_logger.info('Getting the convolved TOA reflectance.') self.valid_pixs = sum( shifted_mask) # count how many pixels is still within the s2 tile ker_size = 2 * int(round(max(1.96 * xstd, 1.96 * ystd))) self.bad_pixs = np.zeros(self.valid_pixs).astype(bool) imgs = [] for i, band in enumerate(self.s2_u_bands[:-2]): if selected_img[band].shape != self.s2_full_res: selected_img[band] = self.repeat_extend(selected_img[band], shape=self.s2_full_res) else: pass selected_img[band][0, :] = -9999 selected_img[band][-1, :] = -9999 selected_img[band][:, 0] = -9999 selected_img[band][:, -1] = -9999 imgs.append(selected_img[band]) # filter out the bad pixels self.bad_pixs |= cloud_dilation(self.s2.cloud |\ (selected_img[band] <= 0) | \ (selected_img[band] >= 10000),\ iteration= ker_size/2)[self.Hx, self.Hy] del selected_img del self.s2.selected_img del high_img del self.s2.angles del self.s2.sza del self.s2.saa del self.s2 ker = self.gaussian(xstd, ystd, ang) f = lambda img: signal.fftconvolve(img, ker, mode='same')[self.Hx, self .Hy] * 0.0001 half = parmap(f, imgs[:3]) self.s2_toa = np.array(half + parmap(f, imgs[3:])) #self.s2_toa = np.array(parmap(f,imgs)) del imgs # get the valid value masks qua_mask = np.all(self.s2_boa_qa <= self.qa_thresh, axis=0) boa_mask = np.all(~self.s2_boa.mask,axis = 0 ) &\ np.all(self.s2_boa > 0, axis = 0) &\ np.all(self.s2_boa < 1, axis = 0) toa_mask = (~self.bad_pixs) &\ np.all(self.s2_toa > 0, axis = 0) &\ np.all(self.s2_toa < 1, axis = 0) self.s2_mask = boa_mask & toa_mask & qua_mask & (~self.elevation.mask) self.s2_AEE, self.s2_bounds = self._load_emus(self.s2_sensor)
aod_unc[:] = 0.5 tcwv_unc[:] = 0.2 tco3_unc[:] = 0.2 toa = np.random.rand(6, 50000) y = toa * 2.639794 - 0.038705 boa = y / (1 + 0.068196 * y) boa_unc = np.ones(50000) * 0.05 Hx = np.random.choice(10980, 50000) Hy = np.random.choice(10980, 50000) full_res = (10980, 10980) aero_res = 3050 emus_dir = '/home/ucfafyi/DATA/Multiply/emus/' sensor = 'msi' xap_emu = glob(emus_dir + '/isotropic_%s_emulators_*_xap.pkl' % (sensor))[0] xbp_emu = glob(emus_dir + '/isotropic_%s_emulators_*_xbp.pkl' % (sensor))[0] xcp_emu = glob(emus_dir + '/isotropic_%s_emulators_*_xcp.pkl' % (sensor))[0] f = lambda em: pkl.load(open(em, 'rb')) emus = parmap(f, [xap_emu, xbp_emu, xcp_emu]) band_indexs = [1, 2, 3, 7, 11, 12] band_wavelength = [469, 555, 645, 869, 1640, 2130] mask = np.zeros((10980, 10980)).astype(bool) mask[1, 1] = True aero = solving_atmo_paras(boa, toa, sza, vza, saa, vaa, aod, tcwv, tco3, ele, aod_unc, tcwv_unc, tco3_unc, boa_unc, Hx, Hy, mask, full_res, aero_res, emus, band_indexs, band_wavelength) solved = aero._optimization()
def solving_modis_aerosol(self, ): self.modis_logger = logging.getLogger('MODIS Atmospheric Correction') self.modis_logger.setLevel(logging.INFO) if not self.modis_logger.handlers: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.modis_logger.addHandler(ch) self.modis_logger.propagate = False self.modis_sensor = 'TERRA' self._modis_aerosol() self.modis_solved = [] if self.aero_res < 500: self.modis_logger.warning( 'The best resolution of aerosol should be larger \ than 500 meters (inlcude), so it is set to 500 meters.' ) self.aero_res = 500 self.block_size = int(self.aero_res / 500) num_blocks = int(np.ceil(2400 / self.block_size)) self.modis_logger.info('Start solving......') blocks = zip(np.repeat(range(num_blocks), num_blocks), np.tile(range(num_blocks), num_blocks)) self.modis_solved = parmap(self._m_block_solver, blocks) #for i in range(num_blocks): # for j in range(num_blocks): # self.modis_logger.info('Doing block %03d-%03d.'%(i+1,j+1)) # self._m_block_solver([i,j]) inds = np.array([[i[0], i[1]] for i in self.modis_solved]) rets = np.array([i[2][0] for i in self.modis_solved]) aod_map = np.zeros((num_blocks, num_blocks)) aod_map[:] = np.nan tcwv_map = aod_map.copy() tco3_map = aod_map.copy() aod_map[inds[:, 0], inds[:, 1]] = rets[:, 0] tcwv_map[inds[:, 0], inds[:, 1]] = rets[:, 1] tco3_map[inds[:, 0], inds[:, 1]] = rets[:, 2] para_names = 'aod550', 'tcwv', 'tco3' g = gdal.Open(self.example_file) xmin, ymax = g.GetGeoTransform()[0], g.GetGeoTransform()[3] projection = g.GetProjection() results = [] self.modis_logger.info( 'Finished retrieval and saving them into local files.') for i, para_map in enumerate([aod_map, tcwv_map, tco3_map]): s = smoothn(para_map.copy(), isrobust=True, verbose=False)[1] smed = smoothn(para_map.copy(), isrobust=True, verbose=False, s=s)[0] xres, yres = self.block_size * 500, self.block_size * 500 geotransform = (xmin, xres, 0, ymax, 0, -yres) nx, ny = smed.shape dst_ds = gdal.GetDriverByName('GTiff').Create(self.mod_l1b_dir + '/atmo_paras/' + \ self.example_file.split('/')[-1].split('_EV_')[0] \ + '_EV_%s.tif'%para_names[i], ny, nx, 1, gdal.GDT_Float32) dst_ds.SetGeoTransform(geotransform) dst_ds.SetProjection(projection) dst_ds.GetRasterBand(1).WriteArray(smed) dst_ds.FlushCache() dst_ds = None results.append(smed) self.aod550_map, self.tcwv_map, self.tco3_map = results
def get_s2_angles(self, reconstruct=True, slic=None): tree = ET.parse(self.s2_file_dir + '/metadata.xml') root = tree.getroot() #Sun_Angles_Grid saa = [] sza = [] msz = [] msa = [] #Viewing_Incidence_Angles_Grids vza = {} vaa = {} mvz = {} mva = {} for child in root: for j in child: for k in j.findall('Sun_Angles_Grid'): for l in k.findall('Zenith'): for m in l.findall('Values_List'): for x in m.findall('VALUES'): sza.append(x.text.split()) for n in k.findall('Azimuth'): for o in n.findall('Values_List'): for p in o.findall('VALUES'): saa.append(p.text.split()) for ms in j.findall('Mean_Sun_Angle'): self.msz = float(ms.find('ZENITH_ANGLE').text) self.msa = float(ms.find('AZIMUTH_ANGLE').text) for k in j.findall('Viewing_Incidence_Angles_Grids'): for l in k.findall('Zenith'): for m in l.findall('Values_List'): vza_sub = [] for x in m.findall('VALUES'): vza_sub.append(x.text.split()) bi, di, angles = k.attrib['bandId'], \ k.attrib['detectorId'], np.array(vza_sub).astype(float) vza[(int(bi), int(di))] = angles for n in k.findall('Azimuth'): for o in n.findall('Values_List'): vaa_sub = [] for p in o.findall('VALUES'): vaa_sub.append(p.text.split()) bi, di, angles = k.attrib['bandId'],\ k.attrib['detectorId'], np.array(vaa_sub).astype(float) vaa[(int(bi), int(di))] = angles for mvia in j.findall('Mean_Viewing_Incidence_Angle_List'): for i in mvia.findall('Mean_Viewing_Incidence_Angle'): mvz[int(i.attrib['bandId'])] = float( i.find('ZENITH_ANGLE').text) mva[int(i.attrib['bandId'])] = float( i.find('AZIMUTH_ANGLE').text) sza = np.array(sza).astype(float) saa = np.array(saa).astype(float) saa[saa > 180] = saa[saa > 180] - 360 mask = np.isnan(sza) sza = griddata(np.array(np.where(~mask)).T, sza[~mask], \ (np.repeat(range(23), 23).reshape(23,23), \ np.tile (range(23), 23).reshape(23,23)), method='nearest') mask = np.isnan(saa) saa = griddata(np.array(np.where(~mask)).T, saa[~mask], \ (np.repeat(range(23), 23).reshape(23,23), \ np.tile (range(23), 23).reshape(23,23)), method='nearest') self.saa, self.sza = np.repeat(np.repeat(np.array(saa), 500, axis = 0), 500, axis = 1)[:10980, :10980], \ np.repeat(np.repeat(np.array(sza), 500, axis = 0), 500, axis = 1)[:10980, :10980] dete_id = np.unique([i[1] for i in vaa.keys()]) band_id = range(13) bands_vaa = [] bands_vza = [] for i in band_id: band_vaa = np.zeros((23, 23)) band_vza = np.zeros((23, 23)) band_vaa[:] = np.nan band_vza[:] = np.nan for j in dete_id: try: good = ~np.isnan(vaa[(i, j)]) band_vaa[good] = vaa[(i, j)][good] good = ~np.isnan(vza[(i, j)]) band_vza[good] = vza[(i, j)][good] except: pass bands_vaa.append(band_vaa) bands_vza.append(band_vza) bands_vaa, bands_vza = np.array(bands_vaa), np.array(bands_vza) vaa = {} vza = {} mva_ = {} mvz_ = {} for i, band in enumerate(self.s2_bands): vaa[band] = bands_vaa[i] vza[band] = bands_vza[i] try: mva_[band] = mva[i] mvz_[band] = mvz[i] except: mva_[band] = np.nan mvz_[band] = np.nan if self.bands is None: bands = self.s2_bands else: bands = self.bands self.vza = {} self.vaa = {} self.mvz = {} self.mva = {} for band in bands: mask = np.isnan(vza[band]) g_vza = griddata(np.array(np.where(~mask)).T, vza[band][~mask], \ (np.repeat(range(23), 23).reshape(23,23), \ np.tile (range(23), 23).reshape(23,23)), method='nearest') mask = np.isnan(vaa[band]) g_vaa = griddata(np.array(np.where(~mask)).T, vaa[band][~mask], \ (np.repeat(range(23), 23).reshape(23,23), \ np.tile (range(23), 23).reshape(23,23)), method='nearest') self.vza[band] = np.repeat(np.repeat(g_vza, 500, axis=0), 500, axis=1)[:10980, :10980] g_vaa[g_vaa > 180] = g_vaa[g_vaa > 180] - 360 self.vaa[band] = np.repeat(np.repeat(g_vaa, 500, axis=0), 500, axis=1)[:10980, :10980] self.mvz[band] = mvz_[band] self.mva[band] = mva_[band] self.angles = {'sza':self.sza, 'saa':self.saa, 'msz':self.msz, 'msa':self.msa,\ 'vza':self.vza, 'vaa': self.vaa, 'mvz':self.mvz, 'mva':self.mva} if reconstruct: try: if len(glob(self.s2_file_dir + '/angles/VAA_VZA_*.img')) == 13: pass else: #print 'Reconstructing Sentinel 2 angles...' subprocess.call(['python', './python/s2a_angle_bands_mod.py', \ self.s2_file_dir+'/metadata.xml', '10']) if self.bands is None: bands = self.s2_bands else: bands = self.bands self.vaa = {} self.vza = {} fname = [ self.s2_file_dir + '/angles/VAA_VZA_%s.img' % band for band in bands ] if len(glob(self.s2_file_dir + '/angles/VAA_VZA_*.img')) == 13: f = lambda fn: reproject_data( fn, self.s2_file_dir + '/B04.jp2', outputType=gdal.GDT_Float32).data ret = parmap(f, fname) for i, angs in enumerate(ret): #angs[0][angs[0]<0] = (36000 + angs[0][angs[0]<0]) angs = angs.astype(float) / 100. if slic is None: self.vaa[bands[i]] = angs[0] self.vza[bands[i]] = angs[1] else: x_ind, y_ind = np.array(slic) self.vaa[bands[i]] = angs[0][x_ind, y_ind] self.vza[bands[i]] = angs[1][x_ind, y_ind] self.angles = {'sza':self.sza, 'saa':self.saa, 'msz':self.msz, 'msa':self.msa,\ 'vza':self.vza, 'vaa': self.vaa, 'mvz':self.mvz, 'mva':self.mva} else: print 'Reconstruct failed and original angles are used.' except: print 'Reconstruct failed and original angles are used.'
def _l8_aerosol(self,): self.logger.propagate = False self.logger.info('Start to retrieve atmospheric parameters.') l8 = read_l8(self.l8_toa_dir, self.l8_tile, self.year, self.month, self.day, bands = self.bands) self.l8_header = l8.header self.logger.info('Loading emulators.') self._load_xa_xb_xc_emus() self.logger.info('Find corresponding pixels between L8 and MODIS tiles') self.example_file = self.l8_toa_dir + '/%s_b%d.tif'%(l8.header, 1) if len(glob(self.l8_toa_dir + '/MCD43_%s.npz'%(l8.header))) == 0: boa, unc, hx, hy, lx, ly, flist = MCD43_SurRef(self.mcd43_dir, self.example_file, \ self.year, self.doy, [l8.saa_sza, l8.vaa_vza], sun_view_ang_scale=[0.01, 0.01], bands = [3,4,1,2,6,7], tolz=0.003) np.savez(self.l8_toa_dir + 'MCD43_%s.npz'%l8.header, boa=boa, unc=unc, hx=hx, hy=hy, lx=lx, ly=ly, flist=flist) else: f = np.load(self.l8_toa_dir + 'MCD43_%s.npz'%l8.header) boa, unc, hx, hy, lx, ly, flist = f['boa'], f['unc'], f['hx'], f['hy'], f['lx'], f['ly'], f['flist'] self.Hx, self.Hy = hx, hy self.logger.info('Applying spectral transform.') self.boa_qa = np.ma.array(unc) self.boa = np.ma.array(boa)*np.array(self.spectral_transform)[0][...,None] + \ np.array(self.spectral_transform)[1][...,None] self.logger.info('Reading in TOA reflectance.') self.sen_time = l8.sen_time self.cloud = l8._get_qa() self.full_res = self.cloud.shape self.ecloud = binary_erosion(self.cloud, structure=np.ones((3,3)).astype(bool), iterations=10).astype(bool) border_mask = np.zeros(self.full_res).astype(bool) border_mask[[0, -1], :] = True border_mask[:, [0, -1]] = True xstd, ystd = 12., 20. ker_size = 2*int(round(max(1.96*xstd, 1.96*ystd))) self.dcloud = binary_dilation(self.ecloud | border_mask, structure=np.ones((3,3)).astype(bool), iterations=ker_size/2+10).astype(bool) self.logger.info('Getting elevation.') ele_data = reproject_data(self.global_dem, self.example_file, outputType = gdal.GDT_Float32).data/1000. mask = ~np.isfinite(ele_data) self.ele = np.ma.array(ele_data, mask = mask) self.ele[mask] = np.nan self.logger.info('Getting pripors from ECMWF forcasts.') self.aot, self.tcwv, self.tco3 = np.array(self._read_cams(self.example_file)) self.logger.info('Mean values of priors are: %.02f, %.02f, %.02f'%\ (np.nanmean(self.aot), np.nanmean(self.tcwv), np.nanmean(self.tco3))) self.toa = l8._get_toa() self.saa, self.sza, self.vaa, self.vza = l8._get_angles() self.saa[self.saa.mask] = self.sza[self.sza.mask] = \ self.vaa[self.vaa.mask] = self.vza[self.vza.mask] = np.nan self.logger.info('Getting DDV aot prior') self._get_ddv_aot(self.toa, l8, self.tcwv, self.tco3, ele_data) self.logger.info('Sorting data.') self.block_size = int(np.ceil(1. * self.aero_res / 30.)) self.num_blocks = int(np.ceil(max(self.full_res) / (1. * self.block_size))) self.efull_res = self.block_size * self.num_blocks shape1 = (self.num_blocks, self.block_size, self.num_blocks, self.block_size) shape2 = (self.vza.shape[0], self.num_blocks, self.block_size, self.num_blocks, self.block_size) self.ele = np.nanmean(self._extend_vals(self.ele ).reshape(shape1), axis=(3,1)) self.aot = np.nanmean(self._extend_vals(self.aot ).reshape(shape1), axis=(3,1)) self.tcwv = np.nanmean(self._extend_vals(self.tcwv).reshape(shape1), axis=(3,1)) self.tco3 = np.nanmean(self._extend_vals(self.tco3).reshape(shape1), axis=(3,1)) self.saa = np.nanmean(self._extend_vals(self.saa ).reshape(shape2), axis=(4,2)) self.sza = np.nanmean(self._extend_vals(self.sza ).reshape(shape2), axis=(4,2)) self.vaa = np.nanmean(self._extend_vals(self.vaa ).reshape(shape2), axis=(4,2)) self.vza = np.nanmean(self._extend_vals(self.vza ).reshape(shape2), axis=(4,2)) self.aot_unc = np.ones(self.aot.shape) * 0.8 self.tcwv_unc = np.ones(self.tcwv.shape) * 0.2 self.tco3_unc = np.ones(self.tco3.shape) * 0.2 #mod08_aot, myd08_aot = self._mcd08_aot() #self.logger.info('Mean values for priors are: %.02f, %.02f, %.02f and mod08 and myd08 aot are: %.02f, %.02f'%\ # (np.nanmean(self.aot), np.nanmean(self.tcwv), np.nanmean(self.tco3), mod08_aot, myd08_aot)) #if np.isnan(mod08_aot): self.aot[:] = np.nanmean(self.aot) #else: # temp = np.zeros_like(self.aot) # temp[:] = mod08_aot # self.aot = temp self.logger.info('Applying PSF model.') if self.l8_psf is None: xstd, ystd, ang, xs, ys = self._get_psf() else: xstd, ystd, ang, xs, ys = self.l8_psf shifted_mask = np.logical_and.reduce(((self.Hx+int(xs)>=0), (self.Hx+int(xs)<self.full_res[0]), (self.Hy+int(ys)>=0), (self.Hy+int(ys)<self.full_res[1]))) self.Hx, self.Hy = self.Hx[shifted_mask]+int(xs), self.Hy[shifted_mask]+int(ys) self.boa = self.boa [:, shifted_mask] self.boa_qa = self.boa_qa[:, shifted_mask] self.logger.info('Getting the convolved TOA reflectance.') self.bad_pixs = self.dcloud[self.Hx, self.Hy] ker = self.gaussian(xstd, ystd, ang) f = lambda img: signal.fftconvolve(img, ker, mode='same')[self.Hx, self.Hy] self.toa = np.array(parmap(f, list(self.toa))) qua_mask = np.all(self.boa_qa <= self.qa_thresh, axis = 0) boa_mask = np.all(~self.boa.mask,axis = 0 ) &\ np.all(self.boa >= 0.001, axis = 0) &\ np.all(self.boa < 1, axis = 0) toa_mask = (~self.bad_pixs) &\ np.all(self.toa >= 0.0001, axis = 0) &\ np.all(self.toa < 1., axis = 0) self.l8_mask = boa_mask & toa_mask & qua_mask self.Hx = self.Hx [self.l8_mask] self.Hy = self.Hy [self.l8_mask] self.toa = self.toa [:, self.l8_mask] self.boa = self.boa [:, self.l8_mask] self.boa_unc = self.boa_qa[:, self.l8_mask] self.logger.info('Solving...') tempm = np.zeros((self.efull_res, self.efull_res)) tempm[self.Hx, self.Hy] = 1 tempm = tempm.reshape(self.num_blocks, self.block_size, \ self.num_blocks, self.block_size).astype(int).sum(axis=(3,1)) self.mask = np.nansum(self._extend_vals((~self.dcloud).astype(int)).reshape(shape1), axis=(3,1)) self.mask = ((self.mask/((1.*self.block_size)**2)) > 0.) & ((tempm/((self.aero_res/500.)**2)) > 0.) & \ (np.any(~np.isnan([self.aot, self.tcwv, self.tco3, self.sza[0]]), axis = 0)) self.mask = binary_erosion(self.mask, structure=np.ones((5, 5)).astype(bool)) self.tcwv[~self.mask] = np.nanmean(self.tcwv) self.tco3[~self.mask] = np.nanmean(self.tco3) self.aero = solving_atmo_paras(self.boa, self.toa, self.sza, self.vza, self.saa, self.vaa, self.aot, self.tcwv, self.tco3, self.ele, self.aot_unc, self.tcwv_unc, self.tco3_unc, self.boa_unc, self.Hx, self.Hy, self.mask, (self.efull_res, self.efull_res), self.aero_res, self.emus, self.band_indexs, self.boa_bands, gamma = 2., alpha = -1.2, pix_res = 30) solved = self.aero._optimization() return solved