Esempio n. 1
0
def main(window):
    window = int(window)
    years = range(2006, 2015)
    fullvcm = []
    fullnprof = []
    for year in years:
        mask = 'out.{:02d}/{:04d}/*.nc4'.format(window, year)
        print mask
        try:
            vcm = da.read_nc(mask, vcm_name, axis='file')
        except ValueError:
            print 'No monthlies for {:04d}, skipping'.format(year)
            continue
        # vcm.reset_axis(filename_to_datetime, 'file')
        nprof = da.read_nc(mask, 'nprof', axis='file')
        # nprof.reset_axis(filename_to_datetime, 'file')
        fullvcm.append(vcm)
        fullnprof.append(nprof)
    vcm = da.concatenate(fullvcm, axis='file')
    nprof = da.concatenate(fullnprof, axis='file')
    print vcm, nprof

    np.savez('series_%d.npz' % window,
             vcm=vcm,
             nprof=nprof,
             altmin=vcm.labels[1],
             time=vcm.labels[0],
             lon=vcm.labels[2])
Esempio n. 2
0
def test_concatenate_2d():

    a = DimArray([[ 1,  2,  3],
                  [11, 22, 33]], axes=[[0,1],[2,1,0]])

    b = DimArray([[44, 55,  66],
                  [4,   5,   6]], axes=[[1,0],[2,1,0]])

    c0_got = concatenate((a, b), axis=0)
    c0_got_ds = concatenate_ds(_make_datasets(a, b), axis=0)

    c0 = DimArray([[ 1,  2,  3],
                   [11, 22, 33],  
                   [44, 55, 66],  
                   [4,   5,  6]], axes=[[0,1,1,0],[2,1,0]]) 

    assert_equal_dimarrays(c0_got, c0)
    assert_equal_dimarrays(c0_got_ds['a'], c0)
                  
    # axis "x0" is not aligned !
    with pytest.raises(ValueError):
        c1_got = concatenate((a, b), axis=1)
        print(c1_got)

    c1_got = concatenate((a, b), axis=1, align=True, sort=True)
    c1_got_ds = concatenate_ds(_make_datasets(a, b), axis=1, align=True, sort=True)

    c1 = DimArray([[ 1,  2,  3,  4,  5,  6],
                   [11, 22, 33, 44, 55, 66]], axes=[[0,1],[2,1,0,2,1,0]])

    assert_equal_dimarrays(c1_got, c1)
    assert_equal_dimarrays(c1_got_ds['a'], c1)
Esempio n. 3
0
def test_concatenate_1d():
    a = DimArray([1,2,3], dims=['x0'])
    b = DimArray([11,22,33], dims=['x0'])
    c = DimArray([1,  2,  3, 11, 22, 33], axes=[[0, 1, 2, 0, 1, 2]], dims=['x0'])
    # a = DimArray([1,2,3], axes=[['a','b','c']])
    # b = DimArray([4,5,6], axes=[['d','e','f']])
    # c = DimArray([1, 2, 3, 4, 5, 6], axes=[['a','b','c','d','e','f']])

    c_got = concatenate((a, b))
    c_got_ds = concatenate_ds(_make_datasets(a, b))

    assert_equal_dimarrays(c_got, c)
    assert_equal_dimarrays(c_got_ds['a'], c)
Esempio n. 4
0
def main(window):
    window = int(window)
    years = range(2006,2015)
    fullvcm = []
    fullnprof = []
    for year in years:
        mask = 'out.{:02d}/{:04d}/*.nc4'.format(window, year)
        print mask
        try:
            vcm = da.read_nc(mask, vcm_name, axis='file')
        except ValueError:
            print 'No monthlies for {:04d}, skipping'.format(year)
            continue
        # vcm.reset_axis(filename_to_datetime, 'file')
        nprof = da.read_nc(mask, 'nprof', axis='file')
        # nprof.reset_axis(filename_to_datetime, 'file')
        fullvcm.append(vcm)
        fullnprof.append(nprof)
    vcm = da.concatenate(fullvcm, axis='file')
    nprof = da.concatenate(fullnprof, axis='file')
    print vcm, nprof
    
    np.savez('series_%d.npz' % window, vcm=vcm, nprof=nprof, altmin=vcm.labels[1], time=vcm.labels[0], lon=vcm.labels[2])
import __settings

model_dict = __settings.model_dict
masks = da.read_nc('masks/srex_mask_' + model_dict[model]['grid'] + '.nc')

all_files = sorted(
    glob.glob(in_path + scenario + '/monEKE*_' + scenario + '*.nc'))

big_merge = {}
big_merge['eke'] = da.read_nc(all_files[0])['eke'][:, 0:, :]
big_merge['run_id'] = da.read_nc(all_files[0])['eke'][:, 0:, :].copy()
big_merge['run_id'].values = 0

for i_run, file_name in enumerate(all_files[1:]):
    print(file_name)
    big_merge['eke'] = da.concatenate(
        (big_merge['eke'], da.read_nc(file_name)['eke'][:, 0:, :]))
    tmp = da.read_nc(file_name)['eke'][:, 0:, :].copy()
    tmp.values = i_run + 1
    big_merge['run_id'] = da.concatenate((big_merge['run_id'], tmp))

for region in [
        'EAS', 'TIB', 'CAS', 'WAS', 'MED', 'CEU', 'ENA', 'CNA', 'WNA', 'NAS',
        'NEU', 'CGI', 'ALA'
]:
    mask = masks[region][0:, :]
    lats = np.where(np.nanmax(mask, axis=1) != 0)[0]
    lons = np.where(np.nanmax(mask, axis=0) != 0)[0]

    da.Dataset({
        key: val.ix[:, lats, lons]
        for key, val in big_merge.items()
Esempio n. 6
0
                                      '/stats_' + corWith_name + '*' +
                                      scenario + '*_' + state + '.nc')
                tmp_4 = {}
                for file_name in all_files:
                    region = file_name.split('_')[-2]
                    if region != 'NHml':
                        tmp = da.stack(da.read_nc(file_name),
                                       axis='statistic',
                                       align=True)
                        tmp_4[region] = tmp.mean(axis=(-2, -1))
                        tmp_4[region].values = np.nanmean(tmp, axis=(-2, -1))

                tmp_3_ = da.stack(tmp_3, align=True, axis='region')
                tmp_4_ = da.stack(tmp_4, align=True, axis='region')

                tmp_2[corWith_name] = da.concatenate((tmp_3_, tmp_4_),
                                                     align=True,
                                                     axis='statistic')

            tmp_1[state] = da.stack(tmp_2, axis='corWith', align=True)

        tmp_0[model] = da.stack(tmp_1, axis='state', align=True)

    result[scenario] = da.stack(tmp_0, axis='model', align=True)

da.Dataset({
    'summary_cor': da.stack(result, axis='sceanrio', align=True)
}).write_nc('data/cor_reg_summary.nc')

#
Esempio n. 7
0
    'grid_60.0_244.0', 'grid_60.0_246.0', 'grid_60.0_250.0', 'grid_60.0_252.0',
    'grid_64.0_238.0', 'grid_58.0_248.0', 'grid_58.0_250.0', 'grid_56.0_256.0',
    'grid_56.0_258.0', 'grid_58.0_258.0', 'grid_54.0_260.0', 'grid_54.0_262.0',
    'grid_52.0_262.0', 'grid_50.0_262.0', 'grid_52.0_264.0', 'grid_50.0_264.0',
    'grid_48.0_268.0', 'grid_48.0_270.0', 'grid_48.0_272.0', 'grid_48.0_274.0',
    'grid_48.0_276.0', 'grid_50.0_272.0', 'grid_46.0_268.0', 'grid_46.0_270.0',
    'grid_46.0_272.0', 'grid_46.0_274.0', 'grid_46.0_276.0', 'grid_46.0_278.0',
    'grid_46.0_280.0', 'grid_44.0_272.0', 'grid_44.0_274.0', 'grid_44.0_276.0',
    'grid_44.0_278.0', 'grid_44.0_280.0', 'grid_44.0_282.0', 'grid_44.0_284.0',
    'grid_42.0_272.0', 'grid_42.0_274.0', 'grid_42.0_276.0', 'grid_42.0_278.0',
    'grid_42.0_280.0', 'grid_42.0_282.0'
]

slr = da.read_nc('data/slr.nc')['slr']
slr_copied = da.read_nc('data/slr_copied.nc')['slr']
slr = da.concatenate((slr, slr_copied), axis='ID')

stations = da.read_nc('data/stations.nc')['stations']
stations_copied = da.read_nc('data/stations_copied.nc')['stations']
stations = da.concatenate((stations, stations_copied), axis='name')

station_lons, station_lats, station_names = [], [], []
grid_xmin, grid_xmax, grid_ymin, grid_ymax, grid_names = [], [], [], [], []
for name in list(set(stations.name)):
    if stations[name, 'tide']:
        station_names.append(name)
        station_lons.append(float(stations[name, 'lon']))
        station_lats.append(float(stations[name, 'lat']))

    if stations[name, 'tide'] == False:
        if name not in inland_grids:
Esempio n. 8
0
import matplotlib.pylab as plt
import dimarray as da
from statsmodels.sandbox.stats import multicomp

os.chdir(
    '/Users/peterpfleiderer/Documents/Projects/Scripts/allgemeine_scripte')
import plot_map as plot_map
reload(plot_map)
from plot_map import col_conv
os.chdir('/Users/peterpfleiderer/Documents/Projects/HAPPI_persistence')

sum_dict = {}

tmp_1 = da.read_nc('data/MIROC5_SummaryMeanQu.nc')['SummaryMeanQu']
tmp_2 = da.read_nc('data/MIROC5_SummaryKS.nc')['SummaryKS']
sum_dict['MIROC5'] = da.concatenate((tmp_1, tmp_2), axis='type')

tmp_1 = da.read_nc('data/NORESM1_SummaryMeanQu.nc')['SummaryMeanQu']
tmp_2 = da.read_nc('data/NORESM1_SummaryKS.nc')['SummaryKS']
sum_dict['NORESM1'] = da.concatenate((tmp_1, tmp_2), axis='type')

tmp_1 = da.read_nc('data/ECHAM6-3-LR_SummaryMeanQu.nc')['SummaryMeanQu']
tmp_2 = da.read_nc('data/ECHAM6-3-LR_SummaryKS.nc')['SummaryKS']
sum_dict['ECHAM6-3-LR'] = da.concatenate((tmp_1, tmp_2), axis='type')

tmp_1 = da.read_nc('data/CAM4-2degree_SummaryMeanQu.nc')['SummaryMeanQu']
tmp_2 = da.read_nc('data/CAM4-2degree_SummaryKS.nc')['SummaryKS']
sum_dict['CAM4-2degree'] = da.concatenate((tmp_1, tmp_2), axis='type')

tmp_1 = da.read_nc(
    '../HadGHCND_persistence/data/HadGHCND_SummaryMeanQu.nc')['SummaryMeanQu']
				datevar.append(num2date(nc_rcp85.variables['time'][:],units = nc_rcp85.variables['time'].units,calendar = nc_rcp85.variables['time'].calendar))
				year=np.array([int(str(date).split("-")[0])	for date in datevar[0][:]])
				var_in=nc_rcp85.variables[var_name][:,:,:]
				if var_in.mean()>150:
					var_in-=273.15
				input_rcp85=da.DimArray(var_in[:,:,:].squeeze(), axes=[year, lat, lon],dims=['year', 'lat', 'lon'] )

				datevar = []
				datevar.append(num2date(nc_hist.variables['time'][:],units = nc_hist.variables['time'].units,calendar = nc_hist.variables['time'].calendar))
				year=np.array([int(str(date).split("-")[0])	for date in datevar[0][:]])
				var_in=nc_hist.variables[var_name][:,:,:]
				if var_in.mean()>150:
					var_in-=273.15
				input_hist=da.DimArray(var_in[:,:,:].squeeze(), axes=[year, lat, lon],dims=['year', 'lat', 'lon'] )

				input_data=da.concatenate((input_hist, input_rcp85), axis='year')

				cmip5_dict[model][var]=pdf.PDF_Processing(var)
				cmip5_dict[model][var].mask_for_ref_period_data_coverage(input_data,ref_period,check_ref_period_only=False,target_periods=target_periods)

				# Derive time slices
				cmip5_dict[model][var].derive_time_slices(ref_period,target_periods,period_names)
				cmip5_dict[model][var].derive_distributions()

				for change in levels:
					if len(cmip5_dict[model][var]._distributions['global'][str(change)]-cmip5_dict[model][var]._distributions['global']['ref'])>0:
						cmip5_dict[model][var].derive_pdf_difference('ref',str(change),pdf_method=pdf_method,bin_range=varin_dict[var]['cut_interval'],relative_diff=False)
					else:
						print(cmip5_dict[model][var]._distributions['global'][str(change)]-cmip5_dict[model][var]._distributions['global']['ref'])
						break