def transform(sta, dlon=None, dlat=None): #将站点形式的规则网格的数据转化为格点数据 slon = np.min(sta['lon']) elon = np.max(sta['lon']) slat = np.min(sta['lat']) elat = np.max(sta['lat']) nsta = len(sta.index) if (dlon is None): for i in range(nsta - 1): dlon = sta.ix[i, 'lon'] - sta.ix[i + 1, 'lon'] if dlon != 0: dlon = math.fabs(dlon) break if (dlat is None): for i in range(nsta - 1): dlat = sta.ix[i, 'lat'] - sta.ix[i + 1, 'lat'] if dlat != 0: dlat = math.fabs(dlat) break ig = ((sta.ix[:, 'lon'] - slon) // dlon).astype(dtype='int16') jg = ((sta.ix[:, 'lat'] - slat) // dlat).astype(dtype='int16') grid0 = bd.grid([slon, elon, dlon], [slat, elat, dlat]) dat = np.zeros((grid0.nlat, grid0.nlon)) data_name = bd.get_data_names(sta)[0] dat[jg, ig] = sta.ix[:, data_name] grd = bd.grid_data(grid0, dat) return grd
def sta_to_grid_idw(sta, grid0, background=None, effectR=1000, nearNum=16, other_info='left'): data_name = bd.get_data_names(sta) if other_info == 'left': grid = bd.grid(grid0.glon, grid0.glat, [sta.ix[0, 'time']], [sta.ix[0, 'dtime']], [sta.ix[0, 'level']], data_name) else: grid = grid0 xyz_sta = lon_lat_to_cartesian(sta.ix[:, 'lon'], sta.ix[:, 'lat'], R=bd.const.ER) lon = np.arange(grid.nlon) * grid.dlon + grid.slon lat = np.arange(grid.nlat) * grid.dlat + grid.slat grid_lon, grid_lat = np.meshgrid(lon, lat) xyz_grid = lon_lat_to_cartesian(grid_lon.flatten(), grid_lat.flatten(), R=bd.const.ER) tree = cKDTree(xyz_sta) d, inds = tree.query(xyz_grid, k=nearNum) d += 1e-6 w = 1.0 / d**2 input_dat = sta.ix[:, 'data0'].values dat = np.sum(w * input_dat[inds], axis=1) / np.sum(w, axis=1) bg = bd.grid_data(grid) if (background is not None): bg = fun.gxy_gxy.linearInterpolation(background, grid) bg_dat = bg.values.flatten() dat = np.where(d[:, 0] > effectR, bg_dat, dat) grd = bd.grid_data(grid, dat) return grd
def interpolation_linear(grd, sta, other_info='left'): grid = bd.get_grid_of_data(grd) sta1 = fun.get_from_sta.sta_in_grid_xy(sta, grid) dat0 = grd.values dat = np.squeeze(dat0) ig = ((sta1['lon'] - grid.slon) // grid.dlon).astype(dtype='int16') jg = ((sta1['lat'] - grid.slat) // grid.dlat).astype(dtype='int16') dx = (sta1['lon'] - grid.slon) / grid.dlon - ig dy = (sta1['lat'] - grid.slat) / grid.dlat - jg c00 = (1 - dx) * (1 - dy) c01 = dx * (1 - dy) c10 = (1 - dx) * dy c11 = dx * dy ig1 = np.minimum(ig + 1, grid.nlon - 1) jg1 = np.minimum(jg + 1, grid.nlat - 1) dat_sta = c00 * dat[jg, ig] + c01 * dat[jg, ig1] + c10 * dat[ jg1, ig] + c11 * dat[jg1, ig1] data_name = bd.get_data_names(sta)[0] sta1.loc[:, data_name] = dat_sta[:] if other_info == 'left': sta1['time'] = grid.stime sta1['dtime'] = grid.sdtimedelta sta1['level'] = grid.levels[0] bd.set_data_name(sta1, grid.members[0]) return sta1
def ts(sta, threshold): data_names = bd.get_data_names(sta) ob = sta[data_names[0]].values fo_num = len(data_names) - 1 ts_list = [] for i in range(fo_num): fo = sta[data_names[i + 1]].values ts = yes_or_no.threshold_one.ts(ob, fo, threshold) ts_list.append(ts) if len(ts_list) == 1: ts_list = ts_list[0] return ts_list
def interpolation_nearest(grd, sta, other_info='left'): grid = bd.get_grid_of_data(grd) sta1 = fun.get_from_sta.sta_in_grid_xy(sta, grid) dat0 = grd.values dat = np.squeeze(dat0) ig = np.round((sta1['lon'] - grid.slon) // grid.dlon).astype(dtype='int16') jg = np.round((sta1['lat'] - grid.slat) // grid.dlat).astype(dtype='int16') dat_sta = dat[jg, ig] data_name = bd.get_data_names(sta)[0] sta1.loc[:, data_name] = dat_sta[:] if other_info == 'left': sta1['time'] = grid.stime sta1['dtime'] = grid.sdtimedelta sta1['level'] = grid.levels[0] bd.set_data_name(sta1, grid.members[0]) return sta1
def write_to_micaps3(sta0, filename="a.txt", type=1, effectiveNum=4): try: sta = copy.deepcopy(sta0) dir = os.path.split(os.path.abspath(filename))[0] if os.path.isdir(dir): br = open(filename, 'w') end = len(filename) start = max(0, end - 16) nsta = len(sta.index) time = sta['time'].iloc[0] if isinstance(time, np.datetime64) or isinstance( time, datetime.datetime): time_str = method.time_tools.time_to_str(time) time_str = time_str[0:4] + " " + time_str[ 4:6] + " " + time_str[6:8] + " " + time_str[8:10] + " " else: time_str = "2099 01 01 0 0 " level = int(sta['level'].iloc[0]) if type < 0 or level == np.NaN or level == pd.NaT: level = int(type) str1 = ("diamond 3 " + filename[start:end] + "\n" + time_str + str(level) + " 0 0 0 0\n1 " + str(nsta) + "\n") br.write(str1) br.close() data_name = bd.get_data_names(sta)[0] df = sta[['id', 'lon', 'lat', 'alt', data_name]] effectiveNum_str = "%." + '%d' % effectiveNum + "f" df.to_csv(filename, mode='a', header=None, sep="\t", float_format=effectiveNum_str, index=None) except (Exception, BaseException) as e: exstr = traceback.format_exc() print(exstr) return None
def cubicInterpolation(grd, sta, other_info='left'): grid = bd.get_grid_of_data(grd) sta1 = fun.get_from_sta_data.sta_in_grid_xy(sta, grid) dat0 = grd.values dat = np.squeeze(dat0) ig = ((sta1['lon'] - grid.slon) // grid.dlon).astype(dtype='int16') jg = ((sta1['lat'] - grid.slat) // grid.dlat).astype(dtype='int16') dx = (sta1['lon'] - grid.slon) / grid.dlon - ig dy = (sta1['lat'] - grid.slat) / grid.dlat - jg data_name = bd.get_data_names(sta1)[0] for p in range(-1, 3, 1): iip = np.minimum(np.maximum(ig + p, 0), grid.nlon - 1) fdx = cubic_f(p, dx) for q in range(-1, 3, 1): jjq = np.minimum(np.maximum(jg + q, 0), grid.nlat - 1) fdy = cubic_f(q, dy) fdxy = fdx * fdy sta1[data_name] += fdxy * dat[jjq, iip] if other_info == 'left': sta1['time'] = grid.stime sta1['dtime'] = grid.sdtimedelta sta1['level'] = grid.levels[0] bd.set_data_name(sta1, grid.members[0]) return sta1
def sta_to_grid_oa2(sta0, background, sm=1, effect_R=1000, rate_of_model=0): sta = fun.sxy_sxy.drop_nan(sta0) data_name = bd.get_data_names(sta)[0] grid = bd.get_grid_of_data(background) sta = fun.get_from_sta.sta_in_grid_xy(sta, grid) #print(sta) grd = background.copy() dat = np.squeeze(grd.values) ig = ((sta.ix[:, 'lon'] - grid.slon) // grid.dlon).astype(dtype='int16') jg = ((sta.ix[:, 'lat'] - grid.slat) // grid.dlat).astype(dtype='int16') dx = (sta.ix[:, 'lon'] - grid.slon) / grid.dlon - ig dy = (sta.ix[:, 'lat'] - grid.slat) / grid.dlat - jg c00 = (1 - dx) * (1 - dy) c01 = dx * (1 - dy) c10 = (1 - dx) * dy c11 = dx * dy lat = np.arange(grid.nlat) * grid.dlat + grid.slat sr = 1 / np.power(np.cos(lat * math.pi / 180), 4) def targe(x): grdv = x.reshape(grid.nlat, grid.nlon) dx = grdv[:, :-2] + grdv[:, 2:] - 2 * grdv[:, 1:-1] cost1 = np.sum(dx * dx) dy = grdv[:-2, :] + grdv[2:, :] - 2 * grdv[1:-1, :] dy2 = dy * dy sum_dy = np.sum(dy2, axis=1) cost1 = cost1 + np.dot(sum_dy, sr[1:-1]) sta_g = c00 * dat[jg, ig] + c01 * dat[jg, ig + 1] + c10 * dat[ jg + 1, ig] + c11 * dat[jg + 1, ig + 1] error = sta.ix[:, 7] - sta_g cost2 = np.sum(error * error) cost = sm * cost1 + cost2 return cost def grads(x): grdv = x.reshape(grid.nlat, grid.nlon) g1 = np.zeros(grdv.shape) dx = 2 * (grdv[:, :-2] + grdv[:, 2:] - 2 * grdv[:, 1:-1]) g1[:, :-2] = dx g1[:, 2:] += dx g1[:, 1:-1] -= 2 * dx sr_expend = np.tile(sr[1:-1], [grid.nlon, 1]).T dy = 2 * (grdv[:-2, :] + grdv[2:, :] - 2 * grdv[1:-1, :]) dy_sr = dy * sr_expend g1[:-2, :] += dy_sr g1[2:, :] += dy_sr g1[1:-1, :] -= 2 * dy_sr g2 = np.zeros(grdv.shape) sta_g = c00 * dat[jg, ig] + c01 * dat[jg, ig + 1] + c10 * dat[ jg + 1, ig] + c11 * dat[jg + 1, ig + 1] d = 2 * (sta_g - sta.ix[:, 7]) g2[jg, ig] += d * c00 g2[jg, ig + 1] += d * c01 g2[jg + 1, ig] += d * c10 g2[jg + 1, ig + 1] += d * c11 g = sm * g1 + g2 return g.reshape(-1) x = grd.values.reshape(-1) x_oa = frprmn2(x, targe, grads) grd.values = x_oa.reshape(1, 1, 1, 1, grid.nlat, grid.nlon) return grd