def drown_2d_field_gridfill(array, mask=None, spval=None, periodic=True, itermax=100, relax=.6): """ fill land values from array, with optional mask, periodicity, ... """ if len(array.shape) != 2: raise ValueError('array should be 2d') if mask is not None: if mask.shape != data.shape: raise ValueError('mask should be 2d') if mask.min() < 0 or mask.max() > 1: raise ValueError('mask values should be 0/1') else: mask = logic_mask_from_missing_value(array, spval=spval) grids = np.ma.masked_array(data=array, mask=mask) xdim = 1 ydim = 0 eps = 1e-4 drowned, conv = gridfill.fill(grids, xdim, ydim, eps, relax=.6, itermax=100, initzonal=True,\ cyclic=periodic, verbose=False) return drowned
def test_multi_grid(self): filled, c = fill(self.grid, 2, 1, self.eps, relax=self.relax, itermax=self.itermax, initzonal=self.initzonal, cyclic=self.cyclic, verbose=False) self.assert_array_almost_equal(filled, self.soln)
def test_not_masked(self): filled, c = fill(self.grid.filled(fill_value=np.nan), self.eps, relax=self.relax, itermax=self.itermax, initzonal=self.initzonal, cyclic=self.cyclic, verbose=False)
def damp(trend_io, lon, lat): #Vertical damping from -35.5 to -49.5 lat #Find latlon indices ind_start_lat = int( (np.abs(lat - (-35.5))).argmin()) #start damping from here ind_end_lat = int( (np.abs(lat - (-49.5))).argmin()) #last lat with trend data ind_start_lon = int((np.abs(lon - (20.5))).argmin()) #IO lon1 ind_end_lon = int((np.abs(lon - (146.5))).argmin()) #IO lon2 #Select latlon box to modify to_modify = trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] dmp = np.zeros(to_modify.shape) c = dmp.shape[0] #number of decrements for i in range(dmp.shape[1]): dmp[:, i] = np.linspace(1, 0., c) modified = np.multiply(to_modify, dmp) #replace modifed boxes in original dataset trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] = modified #dmp_flip #Horizontal damping from 15.5 to 20.5 longitude #Find latlon indices ind_start_lat = int( (np.abs(lat - (-35.5))).argmin()) #start damping from here ind_end_lat = int( (np.abs(lat - (-49.5))).argmin()) #last lat with trend data ind_start_lon = int((np.abs(lon - (15.5))).argmin()) #IO lon1 ind_end_lon = int((np.abs(lon - (20.5))).argmin()) #IO lon2 #Select latlon box to modify to_modify = trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] dmp = np.zeros(to_modify.shape) c = dmp.shape[1] #number of decrements for i in range(dmp.shape[0]): dmp[i, :] = np.flip(np.linspace(to_modify[i, -1], 0., c), axis=0) #replace modifed boxes in original dataset trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] = dmp #dmp_flip #Horizontal damping from 141.5 to 146.5 longitude #Find latlon indices ind_start_lat = int( (np.abs(lat - (-35.5))).argmin()) #start damping from here ind_end_lat = int( (np.abs(lat - (-49.5)) ).argmin()) #last lat with trend data lat with trend data ind_start_lon = int((np.abs(lon - (146.5))).argmin()) #IO lon1 ind_end_lon = int((np.abs(lon - (151.5))).argmin()) #IO lon2 #Select latlon box to modify to_modify = trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] # #Interpolate masked values kw = dict(eps=1e-4, relax=0.6, itermax=1e4, initzonal=False, cyclic=False, verbose=True) to_modify_int, converged = fill(np.ma.masked_invalid(to_modify), 1, 0, **kw) dmp = np.zeros(to_modify.shape) c = dmp.shape[1] for i in range(dmp.shape[0]): dmp[i, :] = np.linspace(to_modify_int[i, 0], 0., c) #replace modifed boxes in original dataset trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] = dmp #dmp_flip #Damping near ITF #Find latlon indices ind_start_lat = int((np.abs(lat - (20.5))).argmin()) ind_end_lat = int((np.abs(lat - (-20.5))).argmin()) ind_start_lon = int((np.abs(lon - (91.5))).argmin()) ind_end_lon = int((np.abs(lon - (138.5))).argmin()) #Select latlon box to modify to_modify = trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] #Find the coastline endpoints = np.zeros(to_modify.shape[0], dtype=int) for i in range(to_modify.shape[0]): endpoints[i] = np.max(np.where(to_modify[i] > 0)) #Find all end points to damp from lists = [[] for i in range(len(endpoints))] for i in range(len(endpoints)): if i == 0: lists[i] = [2] elif i != 0: if endpoints[i] > endpoints[i - 1]: lists[i] = (np.arange(endpoints[i - 1], endpoints[i] + 1, 1)) else: lists[i] = [endpoints[i]] #damp along diagonals for i in range(len(lists)): for j in range(len(lists[i])): b = i if i > 7: a = b - 7 else: a = 0 c = lists[i][j] d = c + (b - a) box = np.nan_to_num(to_modify[a:b + 1, c:d + 1]) box_flip = box[::-1] dmp = np.linspace(box_flip[0, 0], 0., len(box_flip[0])) np.fill_diagonal(box_flip, dmp) box_back = box_flip[::-1] to_modify[a:b + 1, c:d + 1] = box_back #mask zeros to_modify = ma.masked_where(to_modify == 0., to_modify) #interpolate values kw = dict(eps=1e-4, relax=0.6, itermax=1e4, initzonal=False, cyclic=False, verbose=True) to_modify_int, converged = fill(np.ma.masked_invalid(to_modify), 1, 0, **kw) #replace modified box in original dataset trend_io[ind_start_lat:ind_end_lat + 1, ind_start_lon:ind_end_lon + 1] = to_modify_int return trend_io
def compute(self, pos, coarse_disp, fovea_disp, fovea_ij, disp2imu, imu2disp): assert all(coarse_disp.shape == self.coarse_shape) assert all(fovea_disp.shape == self.fovea_shape) # --- translate fovea_ij from full_shape into fine_shape fovea_ij = np.round(np.asarray(fovea_ij) * self.fine_full_ratio) assert all(fovea_ij >= 0) assert all(fovea_ij + self.fovea_shape < self.fine_shape) # --- store incoming data self.data.appendleft(( np.array(pos), np.array(coarse_disp), np.array(fovea_disp), np.array(fovea_ij), )) # --- project old disparities into new frame # tic("project") cur_pos = pos c_disps_raw = [] c_disps = [] cf_disps = [] cf_foveanesses = [] for k, [pos, coarse, fovea, fovea_ij] in enumerate(self.data): # --- coarse points if k == 0: # current frame c_disp = np.array(coarse) c_disps_raw.append(np.array(c_disp)) else: c_xyd = get_shifted_points( coarse, pos, cur_pos, self.cX, self.cY, disp2imu, imu2disp) c_xyd[:, :2] = np.round(c_xyd[:, :2]) c_disp = -np.ones(self.coarse_shape) points2disp_max(c_xyd, c_disp) c_disps_raw.append(c_disp) # fill missing values c_disp_masked = np.ma.masked_array(c_disp, c_disp < 0) # c_disp, _ = gridfill.fill(mdisp, 1, 0, eps=1, itermax=100) c_disp, _ = gridfill.fill(c_disp_masked, 1, 0, eps=0.1, itermax=1000) c_disps.append(c_disp) # scale up coarse cf_disp = cv2.resize(c_disp, tuple(self.fine_shape[::-1])) cf_foveaness = np.zeros_like(cf_disp) # --- fovea points if fovea.shape[0] > 0 and fovea.shape[1] > 0: #import pdb; pdb.set_trace() fm, fn = fovea.shape fi, fj = fovea_ij # account for zero-valued edge around fovea, and edge effects fmm, fmn = self.fovea_margin fm = fm - 2*fmm fn = fn - 2*fmn fi = fi + fmm fj = fj + fmn fovea = fovea[fmm:-fmm, fmn:-fmn] #import pdb; pdb.set_trace() if k == 0: # current frame cf_disp[fi:fi+fm, fj:fj+fn] = fovea cf_foveaness[fi:fi+fm, fj:fj+fn] = 1 else: fX, fY = self.fX[fi:fi+fm, fj:fj+fn], self.fY[fi:fi+fm, fj:fj+fn] f_xyd = get_shifted_points(fovea, pos, cur_pos, fX, fY, disp2imu, imu2disp, final_shape=self.fine_shape) f_xyd[:, :2] = np.round(f_xyd[:, :2]) f_disp = -np.ones(self.fine_shape) points2disp_max(f_xyd, f_disp) cf_disp[f_disp >= 0] = f_disp[f_disp >= 0] cf_foveaness[f_disp >= 0] = 1 cf_disps.append(cf_disp) cf_foveanesses.append(cf_foveaness) # toc() if 0: plt.figure(101) plt.clf() for k, [c_disp_raw, cf_disp, cf_foveaness] in enumerate( zip(c_disps, cf_disps, cf_foveanesses)): r = len(self.data) c = 3 plt.subplot(r, c, c*k+1) plt.imshow(c_disp_raw, vmin=-1, vmax=n_disp) plt.subplot(r, c, c*k+2) plt.imshow(cf_disp, vmin=-1, vmax=n_disp) plt.subplot(r, c, c*k+3) plt.imshow(cf_foveaness, vmin=0, vmax=1) # --- compute best disparity estimate # tic("estimate") self.disp[:] = 0 n = len(cf_disps) # c_stds = 3 * 2**np.arange(n) # f_stds = 1 * 2**np.arange(n) # c_stds = 5 + 5 * np.arange(n) # f_stds = 0.5 + 5 * np.arange(n) c_stds = np.array(self.coarse_stds)[:n] f_stds = np.array(self.fovea_stds)[:n] # quadratic cost function (estimate is weighted mean of means) disps = np.array(cf_disps) foveanesses = np.array(cf_foveanesses) c_stds.shape = (-1, 1, 1) f_stds.shape = (-1, 1, 1) stds = c_stds + foveanesses * (f_stds - c_stds) w = 1. / stds w /= w.sum(axis=0, keepdims=True) self.disp[:] = (w * disps).sum(0) error = self.disp - disps # self.cost[:] = (error**2).sum(0) + (stds**2).sum(0) self.cost[:] = self.disp * ((error**2).sum(0) + (stds**2).sum(0)) # self.cost[:] = (w * (error**2 + stds**2)).sum(0) # else: # # compute analytically cost function convolved with each gaussian # disps = cf_disps # cs1, cs2 = 1, 0.1 # factors of piecewise quadratic cost function # costs = np.zeros(self.disp.shape + (n_disp,)) # x = np.arange(n_disp, dtype=float) # for disp, std in zip(disps, stds): # # compute three integral points: -inf, x, inf # a = # # integral before possible disp # # integral after possible disp # # costs += # make cost edges zero fm, fn = (10, 10) mask = np.ones(self.cost.shape, dtype=bool) mask[fm:-fm, fn:-fn] = 0 self.cost[mask] = 0 # toc() return self.disp
#trend_io_3m = xr.full_like(trend_io,3*np.mean(trend_io)) #trend_io_3m = mask_oceans(args.imask, trend_io_3m, lon, lat) # Damp the edges trend_io_dmp = damp(trend_io, lon, lat) #trend_io_dmp = trend_io_dmp.values #Calculate global climatology clim = clim_anom_xr(sst_t, time_t, start=None)[0] clim = clim.values #clip T clim[clim < -1.77] = -1.77 #Interpolate sst clim_int = np.zeros(clim.shape) for i in range(clim.shape[0]): clim_int[i], converged = fill(ma.masked_invalid(clim[i]), 1, 0, **kw) #Repeat the climatology 68 times (1950-2017) nyears = (end_time - start_time) + 1 clim_rpt = np.repeat(clim_int[np.newaxis,:,:,:],nyears,axis=0)\ .reshape(clim_int.shape[0]*nyears,clim_int.shape[1],clim_int.shape[2]) # Repeat IO trend 68 times (1950-1975) trend_io_rpt = np.repeat(trend_io_int[np.newaxis, :, :], nyears * 12, axis=0) # Create an array to multiply rates with multiplier = np.arange(1, (nyears * 12) + 1, 1)[:, np.newaxis, np.newaxis] # Multiply rate with multiplier to get transient trends trend_io_trans = np.multiply(trend_io_rpt, multiplier)
def interpolate_to_pressure(data, presion, niveles, extrapolate=False, decode_times=True): #interpolar datos a presion try: data = xray.open_dataset(data, decode_times=decode_times) data = data[list(data.data_vars.keys())[0]] #data=data[var] except: print( "No se encontraron archivos con datos, procediendo a buscar variables" ) data = data try: presion = xray.open_dataset(presion, decode_times=decode_times) presion = presion[list(presion.data_vars.keys())[0]] except: print( "No se encontraron archivos de presion,procediendo a buscar variables" ) presion = presion n = 0 for valor in niveles: pres_eval_u = presion.where(presion <= valor) pres_eval_l = presion.where(presion > valor) temp_u = data.where(pres_eval_u == pres_eval_u.max('lev')).min('lev') temp_l = data.where(pres_eval_l == pres_eval_l.min('lev')).min('lev') pres_u = pres_eval_u.max('lev') pres_l = pres_eval_l.min('lev') interpolados = (((valor - pres_u) / (pres_l - pres_u)) * (temp_l - temp_u)) + temp_u if n == 0: datos_interp = interpolados.copy() n += 1 else: datos_interp = xray.concat([datos_interp, interpolados], dim='nivel') shape_cont = len(datos_interp.shape) datos_interp['nivel'] = niveles if extrapolate == True: #extrapolar variables try: from gridfill import fill except ImportError: raise ImportError( 'Se necesida del modulo gridfill, Repositorio-> https://github.com/ajdawson/gridfill' ) datos_mascara = np.ma.masked_invalid(datos_interp.values) kw = dict(eps=1e-4, relax=0.6, itermax=1e4, initzonal=False, cyclic=False, verbose=True) filled, converged = fill(datos_mascara, shape_cont - 1, shape_cont - 2, **kw) #datos_interp2=datos_interp.copy() datos_interp.values = filled return convertido(datos_interp, data.isel(time=0)) else: print(datos_interp) return convertido(datos_interp, data.isel(time=0))
flag = True except: new_idx = new_idx - 1 aux_in = data.variables['esi'][dic_esi[newmonth], :, :] aux_in = ma.masked_where(aux_in <= -6., aux_in) aux_in = ma.masked_where(aux_in >= 6., aux_in) #Filling Gap kw = dict(eps=1e-4, relax=0.6, itermax=1e4, initzonal=False, cyclic=False, verbose=True) aux_in, converged = fill(aux_in, 1, 0, **kw) aux_in = np.expand_dims(aux_in, axis=0) aux_in = np.ma.array(aux_in, mask=mask_sa) aux_in = aux_in[:, min_lat_index:max_lat_index, min_lon_index:max_lon_index] Dmasked, mask = caso_shape(aux_in[:], lats, lons, '/nordeste_do_brasil/nordeste_do_brasil.txt') # Ploting caso_shape_so_plot('/estados/alagoas/alagoas.txt') caso_shape_so_plot('/estados/bahia/bahia.txt') caso_shape_so_plot('/estados/ceara/ceara.txt') caso_shape_so_plot('/estados/maranhao/maranhao.txt') caso_shape_so_plot('/estados/paraiba/paraiba.txt') caso_shape_so_plot('/estados/pernambuco/pernambuco.txt') caso_shape_so_plot('/estados/piaui/piaui.txt')
new_v = rg_tv(new_vt) # set options for poisson_grid_fill guess = True # use zonal means is_cyclic = True # cyclic [global] eps = 1.e-2 # variable dependent opt = 0 # not used relc = 0.6 # relaxation coefficient nscan = 1500 # usually much less than this # fill u v t etc. ma = np.ma.masked_where(lmask_interp_3d == 0, new_t[0]) filled, converged = gridfill.fill(ma, 2, 1, eps, relax=relc, itermax=nscan, initzonal=guess, cyclic=True) filled = np.where(lmask_out_3d == 0.0, 0.0, filled) new_t = filled.reshape([1, nz, ny, nx]) ma = np.ma.masked_where(lmask_interp_3d == 0, new_h[0]) filled, converged = gridfill.fill(ma, 2, 1, eps, relax=relc, itermax=nscan, initzonal=guess, cyclic=True)
camera = ConnectCam() views = ["Test"] Create_Views(views) from gridfill import fill kw = dict(eps=1e-4, relax=0.6, itermax=1e4, initzonal=False, cyclic=False, verbose=False) while True: key = cv2.waitKey(1) & 0xFF img = cv2.cvtColor(camera.read(), cv2.COLOR_BGR2GRAY) idx = random_mask(img, frac=0.05) img = mask_data(img, idx) img, _ = fill(img, 1, 0, **kw) print(len(img), len(img[0])) img = np.array([[int(img[a][i]) for i in range(len(img[0]))] for a in range(len(img))], dtype=np.uint8) print(*img[0]) if key == 'q': Log_Image("Test", img) #Update_Views(views,[img]) camera.release() cv2.destroyAllWindows() #Start the window thread for the two windows we are using