コード例 #1
0
def open_and_apply_bias(obs_ds, hist_p, sen245_p, sen585_p):
    model = xr.open_dataarray(hist_p)
    sen245 = xr.open_dataarray(sen245_p)
    sen585 = ut.select_year(xr.open_dataarray(sen585_p), 2015, 2100)
    print('\t', 'convert km*m^-2*s^-1 to mm/day ...', '\r', flush=True, end='')
    model_m = ut.km_m2_s__to__mm_day(model).resample(time='MS').sum()
    sen245_m = ut.km_m2_s__to__mm_day(sen245).resample(time='MS').sum()
    sen585_m = ut.km_m2_s__to__mm_day(sen585).resample(time='MS').sum()

    model_m.attrs = pr_attr
    sen245_m.attrs = pr_attr
    sen585_m.attrs = pr_attr
    print('\t', 'merging observe, model, 245, 585 ...', '\r', flush=True, end='')
    # ds = xr.Dataset({'model_data': model_m, 'obs_data': obs_ds, 'sce245': sen245_m, 'sce585': sen585_m})
    ds = xr.merge([obs_ds.rename('obs_data'),
                   model_m.rename('model_data').assign_coords(
                       time=pd.date_range('1998-01-01', '2014-12-01', freq='MS')),
                   sen245_m.rename('sce245').assign_coords(time=pd.date_range('2015-01-01', '2100-12-01', freq='MS')),
                   sen585_m.rename('sce585').assign_coords(time=pd.date_range('2015-01-01', '2100-12-01', freq='MS'))
                   ])
    ds_chunk = ds.chunk({'lat': 16, 'lon': 16})
    # print('\t', 'apply basic quantile:', end='')
    apply_bias(ds_chunk, basic_quantile, out_basic, hist_p=hist_p, _245_p=sen245_p, _585_p=sen585_p)
    # print('\t', 'apply modified quantile:', end='')
    apply_bias(ds_chunk, modified_quantile, out_mod, hist_p=hist_p, _245_p=sen245_p, _585_p=sen585_p)
コード例 #2
0
ファイル: jra55.py プロジェクト: SavannaZombie/OperateNC
def make_sum2(ds_day, ds_name, save_var):
    ds_day_cut = ut.select_year(ds_day, 1998, 2014)
    ds_monthly = ds_day_cut.resample(time='M').mean(keep_attrs=True)
    ds_mean = ds_monthly.mean(dim='time', keep_attrs=True)

    ds_day.to_netcdf(clean_dir / save_var / f'{ds_name}_{save_var}_daily_1998_2018.nc')
    ds_day_cut.to_netcdf(clean_dir / save_var / daily / f'{ds_name}_{save_var}_daily_1998_2014.nc')
    ds_monthly.to_netcdf(clean_dir / save_var / monthly / f'{ds_name}_{save_var}_monthly_1998_2014.nc')
    ds_mean.to_netcdf(clean_dir / save_var / mean / f'{ds_name}_{save_var}_annual_1998_2014.nc')
コード例 #3
0
def open_and_format(paths: list):
    l = len(paths)
    for i, path in enumerate(paths):
        print(i + 1, l, path.name)
        da = xr.load_dataarray(path).resample(time='MS').sum()
        da_m = da.where(obs_land.notnull())
        time_var = path.parent.parent.name
        folder_name = path.name.split('_')[4]
        for i in range(2015, 2100, 17):
            r = ut.select_year(da_m, i, i + 16)
            out_path = ut.save_file(out / time_var / folder_name /
                                    f'{folder_name}_{time_var}_{i}-{i+16}.csv')
            print('\tsaving', out_path, '\r', flush=True, end='')
            format_cdbc(r).to_csv(out_path, index=False, header=False)
        print()
コード例 #4
0
        SortedCorr = sorted(DateCorr_Dict.items())
        CorrectedData.append([lat[j], lon[j]] + [v for k, v in SortedCorr])

    csv_ls = [
        (','.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
        for c in range(len(CorrectedData[0]))
    ]
    csv_io = StringIO('\n'.join(csv_ls))
    return pd.read_csv(csv_io, header=None)


# %%
obs = r'H:\CMIP6 - SEA\csv\pr\new_sa_obs.csv'
coor_nc = ut.select_year(
    xr.open_dataset(r'H:\CMIP6 - Test\ssp_coords_2015-01-01_2100-12-01.nc'),
    2015, 2099)
hist = ut.lsdir(r'H:\CMIP6 - SEA\csv\pr\historical')
ssp245 = ut.lsdir(r'H:\CMIP6 - SEA\csv\pr\ssp245')
ssp585 = ut.lsdir(r'H:\CMIP6 - SEA\csv\pr\ssp585')
out_nc = Path(r'H:\CMIP6 - Biased\pr_gamma\nc')
out_csv = Path(r'H:\CMIP6 - Biased\pr_gamma\csv')


# %%
@ray.remote
def run(ssp_p):
    for i, h in enumerate(hist):
        time_var = ssp_p[i].parent.name
        name = ssp_p[i].name
        print(i, time_var, name)
コード例 #5
0
from mpl_toolkits.axes_grid1 import make_axes_locatable

aph = xr.open_dataset(
    r'H:\Observation\Raw Data (SEA)\[SEA] Aphrodite 1901 & 1808\APHRO_MA_TAVE_025deg_V1808_1961-2015_Temp.nc'
)
sa = xr.open_dataset(
    r'H:\Observation\Raw Data (SEA)\[SEA] SA-OBS\tg_0.25deg_reg_v2.0_saobs.nc')

aph_da = aph.tave
sa_da = sa.tg
sa_da = sa_da.rename({'longitude': 'lon', 'latitude': 'lat'})
crop_sa_da = pre.crop_sea(sa_da)
crop_aph_da = pre.crop_sea(aph_da)

time1 = [
    ut.select_year(crop_sa_da, 1998, 2014),
    ut.select_year(crop_aph_da, 1998, 2014)
]
time2 = [
    ut.select_year(crop_sa_da, 1981, 2007),
    ut.select_year(crop_aph_da, 1981, 2007)
]
#%%
mean_time1 = [pre.crop_sea(i.mean(dim='time')) for i in time1]
mean_time2 = [pre.crop_sea(i.mean(dim='time')) for i in time2]
#%%
time = mean_time2
#%%
fig, axes = plt.subplots(
    ncols=3,
    nrows=1,
コード例 #6
0
    m_5_10_mean = m_5_10.groupby('time.month').mean()
    m_11_4_mean = m_11_4.groupby('time.month').mean()

    m_5_10_sum = m_5_10_mean.sum(dim='month', skipna=False)
    m_11_4_sum = m_11_4_mean.sum(dim='month', skipna=False)

    return m_5_10_sum, m_11_4_sum


# %%
mf_hist = ut.get_mfds(pr_bias_hist_path)
mf_245 = ut.get_mfds(pr_bias_245_path)
mf_585 = ut.get_mfds(pr_bias_585_path)

obs, mod = mean_pr_mjj_ndj(cru), mean_pr_mjj_ndj(mf_hist)
near = mean_pr_mjj_ndj(ut.select_year(mf_245, 2015, 2039)), mean_pr_mjj_ndj(
    ut.select_year(mf_585, 2015, 2039))
mid = mean_pr_mjj_ndj(ut.select_year(mf_245, 2040, 2069)), mean_pr_mjj_ndj(
    ut.select_year(mf_585, 2040, 2069))
far = mean_pr_mjj_ndj(ut.select_year(mf_245, 2070, 2099)), mean_pr_mjj_ndj(
    ut.select_year(mf_585, 2070, 2099))

# %%
# mme_hist = mf_hist.mean(dim='id', skipna=True)
# mme_245 = mf_245.mean(dim='id', skipna=True)
# mme_585 = mf_585.mean(dim='id', skipna=True)

# %%
mjj = [
    float(i[0].mean().values) for i in ([obs, mod] + list(near + mid + far))
]
コード例 #7
0
sen585_m = mm_day(sen585).resample(time='MS').sum()

model_m.attrs = pr_attr
sen245_m.attrs = pr_attr
sen585_m.attrs = pr_attr
# %%
# ds = xr.Dataset({'model_data': model_m, 'obs_data': obs, 'sce245': sen245_m, 'sce585': sen585_m})
# print('\t', 'apply basic quantile:', end='')
# apply_bias(ds_chunk, basic_quantile, out_basic, hist_p=hist_p, _245_p=sen245_p, _585_p=sen585_p)
# # print('\t', 'apply modified quantile:', end='')
# apply_bias(ds_chunk, modified_quantile, out_mod, hist_p=hist_p, _245_p=sen245_p, _585_p=sen585_p)
# %%
ds = xr.merge([obs.rename('obs_data'),
               model_m.rename('model_data').assign_coords(time=pd.date_range('1998-01-01', '2014-12-01', freq='MS')),
               sen245_m.rename('sce245').assign_coords(time=pd.date_range('2015-01-01', '2100-12-01', freq='MS')),
               ut.select_year(sen585_m, 2015, 2100).rename('sce585').assign_coords(
                   time=pd.date_range('2015-01-01', '2100-12-01', freq='MS'))
               ])
ds_chunk = ds.chunk({'lat': 16, 'lon': 16})


# %%
def basic_quantile2(obs_data, mod_data, sce_data):
    cdf = ECDF(mod_data)
    p = cdf(sce_data) * 100
    cor = np.subtract(*[np.nanpercentile(x, np.arange(10, 100, 10)) for x in [obs_data, mod_data]])
    return sce_data + cor


def modified_quantile(obs_data, mod_data, sce_data):
    cdf = ECDF(mod_data)
    p = cdf(sce_data) * 100
コード例 #8
0
        da_m = da.where(obs_land.notnull())
        time_var = path.parent.parent.name
        folder_name = path.name.split('_')[4]
        for i in range(2015, 2100, 17):
            r = ut.select_year(da_m, i, i + 16)
            out_path = ut.save_file(out / time_var / folder_name /
                                    f'{folder_name}_{time_var}_{i}-{i+16}.csv')
            print('\tsaving', out_path, '\r', flush=True, end='')
            format_cdbc(r).to_csv(out_path, index=False, header=False)
        print()


#%%

da = xr.load_dataarray(sce245_path[0]).resample(time='MS').sum()
format_cdbc(ut.select_year(da, 2032, 2047).where(obs_land.notnull())).to_csv(
    ut.test_path('ssp245_2032-2047.csv'), index=False, header=False)
#%%
# print('Hist')
# print('------------------')
# open_and_format(hist_path)
print('ssp245')
print('------------------')
open_and_format(sce245_path)
print('ssp585')
print('------------------')
open_and_format(sce585_path)
#%%w
ls = [
    Path(r'H:\CMIP6 - Biased\pr_gamma\csv\ssp245'),
    Path(r'H:\CMIP6 - Biased\pr_gamma\csv\ssp585')
コード例 #9
0
#%%
name = path.name.split('_')[0]
attr1 = {
    'units': 'degree centigrade',
    'long_name': 'Minimum temperature at 2 metres'
}
print(name)
#%%
ds_tmin = ds['tmean']
# ds_tmin = ds_tmin.rename('tasmin')
# ds_tmin = ds_tmin.rename({'initial_time0_hours': 'time'})
# y1, y2 = get_year_bounds(ds_tmin)
# ds_tmin.to_netcdf(clean_dir / f'_tasmin_daily_{y1}_{y2}.nc')
print(ds_tmin)
#%%
# 'JRA55 h to 1D'
# ds_d = ds_tmin.resample(time='1D').mean(keep_attrs=True)
# y1, y2 = get_year_bounds(ds_d)
# ds_d.attrs = attr1
# ds_d.to_netcdf(clean_dir / f'{name}_tasmin_daily_{y1}_{y2}.nc')
#%%
ds_d = ut.select_year(ds_tmin, 1998, 2014)
# ds_d.to_netcdf(daily / f'{name}_tmean_daily_{1998}_{2014}.nc')
#%%
ds_m = ut.select_year(ds_tmin, 1998,
                      2014).resample(time='M').mean(keep_attrs=True)
# ds_m.to_netcdf(monthly / f'{name}_tmean_monthly_{1998}_{2014}.nc')
#%%
ds_mean = ds_m.mean(dim='time', keep_attrs=True)
ds_mean.to_netcdf(mean / f'{name}_tmean_annual_{1998}_{2014}.nc')
コード例 #10
0
ds = xr.open_dataset(
    r'H:\Observation\Raw Data (SEA)\[SEA] SA-OBS\tg_0.25deg_reg_v2.0_saobs.nc')
#%%
ds = ds.rename({'longitude': 'lon', 'latitude': 'lat', 'tg': 'tmean'})
da = ds.tmean

#%%
clean_dir = Path(r'H:\Observation\Cleaned Data')
daily = 'daily 1998-2014'
monthly = 'monthly 1998-2014'
mean = 'mean annual 1998-2014'
save_var = 'tmean'
ds_name = 'sa-obs'.lower()

ds_day_cut = pre.crop_sea(ut.select_year(da, 1998, 2014))
# %%
# ds_monthly = ds_day_cut.resample(time='M').sum(skipna=False)
# ds_mean = ds_monthly.resample(time='A').sum(skipna=False).mean(dim='time', skipna=True)

ds_monthly = ds_day_cut.resample(time='M').mean(skipna=True)
ds_mean = ds_monthly.mean(dim='time', skipna=True)

#%%
ds_day_cut.to_netcdf(clean_dir / save_var / daily /
                     f'{ds_name}_{save_var}_daily_1998_2014.nc')
ds_monthly.to_netcdf(clean_dir / save_var / monthly /
                     f'{ds_name}_{save_var}_monthly_1998_2014.nc')
ds_mean.to_netcdf(clean_dir / save_var / mean /
                  f'{ds_name}_{save_var}_annual_1998_2014.nc')
コード例 #11
0
ファイル: diff_graph.py プロジェクト: SavannaZombie/OperateNC
            id=i, time=pd.date_range('1998-01-01', '2014-12-01', freq='MS')))
hist = xr.concat(mf, dim='id')

mf = []
for i, p in enumerate(ssp245_pr):
    mf.append(
        xr.load_dataarray(p).resample(time='MS').sum().assign_coords(
            id=i, time=pd.date_range('2015-01-01', '2100-12-01', freq='MS')))
ssp245 = xr.concat(mf, dim='id')

mf = []
for i, p in enumerate(ssp585_pr):
    mf.append(
        ut.select_year(
            xr.load_dataarray(p).resample(time='MS').sum(), 2015,
            2100).assign_coords(id=i,
                                time=pd.date_range('2015-01-01',
                                                   '2100-12-01',
                                                   freq='MS')))
ssp585 = xr.concat(mf, dim='id')
#%%
name = [i.name.split('_')[4] for i in hist_pr]
hist = hist.assign_coords(id=name)
#%%
ssp245 = ssp245.assign_coords(id=name)
ssp585 = ssp585.assign_coords(id=name)
#%%
hist.to_netcdf(r'H:\CMIP6 - SEA\mme\pr_hist_monthly_1998_2014.nc')
ssp245.to_netcdf(r'H:\CMIP6 - SEA\mme\pr_ssp245_monthly_2015_2100.nc')
ssp585.to_netcdf(r'H:\CMIP6 - SEA\mme\pr_ssp585_monthly_2015_2100.nc')
#%%
hist_y = hist.resample(time='AS').sum(skipna=False)