def _run_file_model(suffix, gdir, ye): """ Read FileModel and run it until ye """ rp = gdir.get_filepath('model_run', filesuffix=suffix) fmod = FileModel(rp) fmod.run_until(ye) return copy.deepcopy(fmod)
def plot_lenght(gdir, candidates_df, experiment): plot_dir = '/home/juliaeis/Dokumente/OGGM/work_dir/find_initial_state/past_state_information/plots' fig, ax = plt.subplots(figsize=(20, 10)) list = range(len(candidates_df)) pool = Pool() present_models = pool.map( partial(run_and_store_to_present, gdir=gdir, ys=1850, ye=2000, flowlines=candidates_df.model), list) pool.close() pool.join() first1 = 0 first2 = 0 for i in list: if candidates_df.iloc[i].objective <= 100: color = 'green' first1 = first1 + 1 else: color = 'red' first2 = first2 + 1 path = gdir.get_filepath('model_run', filesuffix='_past_' + str(i)) fmod = FileModel(path) if first1 == 1: p1 = fmod.volume_m3_ts().plot(ax=ax, color=color, label='possible') first1 = 10 elif first2 == 1: p2 = fmod.volume_m3_ts().plot(ax=ax, color=color, label='not possible') first2 = 10 else: fmod.volume_m3_ts().plot(ax=ax, color=color, label='') tasks.run_from_climate_data(gdir, ys=1850, ye=2000, init_model_fls=deepcopy( experiment['y_t0'].fls), output_filesuffix='_past_experiment') path = gdir.get_filepath('model_run', filesuffix='_past_experiment') fmod = FileModel(path) fmod.volume_m3_ts().plot(ax=ax, style='k:', linewidth=3, label='') plt.tick_params(axis='both', which='major', labelsize=25) plt.xlabel('time', fontsize=30) plt.ylabel(r'volume $(m^3)$', fontsize=30) plt.title(gdir.rgi_id, fontsize=30) plt.legend(fontsize=20) plt.savefig(os.path.join(plot_dir, 'lenght_' + str(gdir.rgi_id) + '.png'), dpi=200) plt.show()
def read_result_parallel(gdir): t_e = gdir.rgi_date ex = [f for f in os.listdir(gdir.dir) if f.startswith('model_run_ad')] if len(ex) == 1: path = os.path.join(gdir.dir, ex[0]) ex_mod = FileModel(path) ye = ex_mod.volume_km3_ts().index[-1] bias = float(ex[0].split('_')[-1].split('.nc')[0]) ts = ex_mod.volume_km3_ts() if ye < 2016: try: ex_mod.run_until(ye) tasks.run_from_climate_data(gdir, ys=ye, ye=2016, bias=bias, output_filesuffix='_to_2016', init_model_fls=copy.deepcopy( ex_mod.fls)) res_mod = FileModel( gdir.get_filepath('model_run', filesuffix='_to_2016')) ts2 = res_mod.volume_km3_ts() ts2 = ts2[ts2.index[1:]] ts = pd.concat([ts, ts2]) except: pass ts['rgi_id'] = gdir.rgi_id return ts else: return pd.Series({'rgi_id': gdir.rgi_id})
def _run_experiment(gdir, temp_bias, bias, ys, ye): """ Creates the synthetic experiment for one glacier. model_run_experiment.nc will be saved in working directory. """ # check, if this experiment already exists try: rp = gdir.get_filepath('model_run', filesuffix='_advanced_experiment_' + str(temp_bias) + '_' + str(bias)) model = FileModel(rp) # otherwise create experiment except: try: fls = gdir.read_pickle('model_flowlines') model = tasks.run_random_climate( gdir, nyears=600, y0=ys, bias=bias, seed=1, temperature_bias=temp_bias, init_model_fls=fls, output_filesuffix='_random_experiment_' + str(temp_bias) + '_' + str(bias)) # construct observed glacier, previous glacier will be run forward from # 1917 - rgi_date with past climate file fls = copy.deepcopy(model.fls) tasks.run_from_climate_data( gdir, ys=ys, ye=ye, init_model_fls=fls, bias=bias, output_filesuffix='_advanced_experiment_' + str(temp_bias) + '_' + str(bias)) # to return FileModel rp = gdir.get_filepath('model_run', filesuffix='_advanced_experiment_' + str(temp_bias) + '_' + str(bias)) model = FileModel(rp) except: with open(os.path.join(gdir.dir, 'log.txt')) as log: error = list(log)[-1].split(';')[-1] return error return model
def _single_calibration_run(gdir, mb_offset, ys, ye): """ Creates the synthetic experiment for one glacier. model_geometry_experiment.nc will be saved in working directory. """ # check, if this model_geometry already exists try: rp = gdir.get_filepath('model_geometry', filesuffix='_calibration_past_' + str(mb_offset)) model = FileModel(rp) # otherwise create calibration_run with mb_offset except: try: fls = gdir.read_pickle('model_flowlines') # run a 600 years random run with mb_offset tasks.run_random_climate(gdir, nyears=600, y0=ys, bias=mb_offset, seed=1, init_model_fls=fls, output_filesuffix='_calibration_random_' + str(mb_offset)) # construct s_OGGM --> previous glacier will be run forward from # ys - ye with past climate file tasks.run_from_climate_data( gdir, ys=ys, ye=ye, init_model_filesuffix='_calibration_random_' + str(mb_offset), bias=mb_offset, init_model_yr=600, output_filesuffix='_calibration_past_' + str(mb_offset)) # return FileModel rp = gdir.get_filepath('model_geometry', filesuffix='_calibration_past_' + str(mb_offset)) model = FileModel(rp) except: with open(os.path.join(gdir.dir, 'log.txt')) as log: error = list(log)[-1].split(';')[-1] return error return model
def find_residual(gdir, temp_bias_list, ys,a=-2000,b=2000): best_df = pd.DataFrame() fls = gdir.read_pickle('model_flowlines') mod = FluxBasedModel(flowlines=fls) for temp_bias in temp_bias_list: try: ye = gdir.rgi_date max_it = 15 i = 0 bounds = [a,b] df = pd.DataFrame() while i < max_it: bias = round((bounds[0] + bounds[1]) / 2,1) ex_mod2 = _run_experiment(gdir, temp_bias, bias, ys, ye) diff = mod.area_km2 - ex_mod2.area_km2_ts()[ye] df = df.append(pd.Series({'bias':bias,'area_diff':diff}),ignore_index=True) if (abs(diff)<1e-4) or bounds[1]-bounds[0]<=1: break elif ex_mod2.area_km2_ts()[ye] > mod.area_km2: bounds[0] = bias else: bounds[1] = bias i +=1 # best bias found bias = df.iloc[df.area_diff.abs().idxmin()].bias rp = gdir.get_filepath('model_run', filesuffix='_advanced_experiment_'+str(temp_bias)+'_'+str(bias)) model = FileModel(rp) diff = gdir.rgi_area_km2 - model.area_km2_ts()[gdir.rgi_date] series = pd.Series({'rgi_id':gdir.rgi_id,'bias':bias,'iterations':i, 'area_diff':diff, 'model':model, 'temp_bias':temp_bias}) except: series = pd.Series({'rgi_id':gdir.rgi_id, 'temp_bias':temp_bias}) best_df = best_df.append(series, ignore_index=True) return best_df
def _run_random_task(tupel, gdir, y0, bias): """ Run random model to create lots of possible states """ seed = tupel[0] temp_bias = tupel[1] fls = gdir.read_pickle('model_flowlines') suffix = str(y0) + '_random_' + str(seed) + '_' + str(temp_bias) #path = gdir.get_filepath('model_run', filesuffix=suffix) path = os.path.join(gdir.dir, str(y0), 'model_run' + suffix + '.nc') # does file already exists? if not os.path.exists(path): try: tasks.run_random_climate(gdir, nyears=600, y0=y0, bias=bias, seed=seed, temperature_bias=temp_bias, init_model_fls=copy.deepcopy(fls), output_filesuffix=suffix) return path # oggm failed --> probaly "glacier exeeds boundaries" except: return None else: # does file contain a model? try: fmod = FileModel(path) return path except: return None
def _run_to_present(tupel, gdir, ys, ye, bias): """ Run glacier candidates forwards. """ suffix = tupel[0] #path = gdir.get_filepath('model_run', filesuffix=suffix) path = os.path.join(gdir.dir, str(ys), 'model_run' + suffix + '.nc') # does file already exists? if not os.path.exists(path): try: tasks.run_from_climate_data(gdir, ys=ys, ye=ye, bias=bias, output_filesuffix=suffix, init_model_fls=copy.deepcopy( tupel[1].fls)) return suffix # oggm failed --> probaly "glacier exeeds boundaries" except: return None else: # does file contain a model? try: fmod = FileModel(path) return suffix except: return None
def run_random_task(tupel,gdir,y0): seed = tupel[0] temp_bias = tupel[1] fls = gdir.read_pickle('model_flowlines') suffix = str(y0)+'_random_'+str(seed)+'_'+ str(temp_bias) # test if file already exist: path = gdir.get_filepath('model_run', filesuffix=suffix) # does file already exists? if not os.path.exists(path): try: tasks.run_random_climate(gdir, nyears=400, y0=y0, bias=0, seed=seed, temperature_bias=temp_bias, init_model_fls=copy.deepcopy(fls), output_filesuffix=suffix) return path # oggm failed --> probaly "glacier exeeds boundaries" except: return None else: # does file contain a model? try: fmod = FileModel(path) return path except: return None
def _run_to_present(array, gdir, ys, ye, mb_offset): """ Run glacier candidates forwards. """ init_yr = array[0] init_filesuffix = array[1] s = init_filesuffix.split('_random')[-1] output_filesuffix = str(ys) + '_past' + s + '_' + str(int(init_yr)) path = os.path.join(gdir.dir, str(ys), 'model_geometry' + output_filesuffix + '.nc') # does file already exists? if not os.path.exists(path): try: tasks.run_from_climate_data(gdir, ys=ys, ye=ye, bias=mb_offset, init_model_filesuffix=init_filesuffix, init_model_yr=init_yr, output_filesuffix=output_filesuffix) return output_filesuffix # oggm failed --> probaly "glacier exeeds boundaries" except: return None else: # does file contain a model? try: fmod = FileModel(path) return suffix except: return None
def find_candidates(gdir, experiment, df, ys,ye,n): indices = [] for q in np.linspace(0,1,n): # indices of all to test index = df[df['ts_section'] >= (df['ts_section'].quantile(q))]['ts_section'].idxmin() indices = np.append(indices,int(index)) candidates = df.ix[indices] candidates = candidates.sort_values(['suffix','time']) candidates['fls_t0']=None for suffix in candidates['suffix'].unique(): rp = gdir.get_filepath('model_run', filesuffix=suffix) fmod = FileModel(rp) for i,t in candidates[candidates['suffix']==suffix]['time'].iteritems(): fmod.run_until(t) candidates.at[i,'random_model_t0'] = copy.deepcopy(fmod) candidates = candidates.drop_duplicates() fls_list =[] for i in candidates.index: s = candidates.loc[int(i),'suffix'].split('_random')[-1] suffix = str(ys)+'_past'+s+'_'+str(int(candidates.loc[int(i),'time'])) fls = candidates.loc[int(i),'random_model_t0'] fls_list.append([suffix,fls]) # run candidates until present pool = Pool() path_list = pool.map(partial(run_to_present, gdir=gdir, ys=ys, ye=2000), fls_list) pool.close() pool.join() candidates = candidates.assign(past_suffix=path_list) candidates['model_t0'] = candidates['past_suffix'].apply(read_file_model, args=([gdir])) candidates['model_t'] = candidates['past_suffix'].apply(run_file_model, args=([ye])) candidates['objective'] = candidates['model_t'].apply(objective_value, args=([experiment['y_t']])) return candidates
def generation(gdir, y0, mb_offset): """ creates a pandas.DataFrame() with ALL created states. A subset of them will be tested later :param gdir: oggm.GlacierDirectories :param y0: int year of searched glaciers :return: array """ t_stag = 0 # try range (2,-3) first --> 100 runs bias_list = [b.round(3) for b in np.arange(-3, 2, 0.05)] list = [(i**2, b) for i, b in enumerate(bias_list)] random_run_list = _run_random_parallel(gdir, y0, list, mb_offset) # if temp bias = -3 does not create a glacier that exceeds boundary, we test further up to -5 if random_run_list['temp_bias'].min() == -3: n = len(random_run_list) bias_list = [b.round(3) for b in np.arange(-5, -3, 0.05)] list = [((i + n + 1)**2, b) for i, b in enumerate(bias_list)] random_run_list = random_run_list.append(_run_random_parallel( gdir, y0, list, mb_offset), ignore_index=True) # check for zero glacier max_bias = random_run_list['temp_bias'].idxmax() max_suffix = random_run_list.loc[max_bias, 'suffix'] p = gdir.get_filepath('model_geometry', filesuffix=max_suffix) if not os.path.exists(p): p = os.path.join(gdir.dir, str(y0), 'model_geometry' + max_suffix + '.nc') fmod = FileModel(p) if not fmod.volume_m3_ts().min() == 0: n = len(random_run_list) list = [((i + n + 1)**2, b.round(3)) for i, b in enumerate(np.arange(2.05, 3, 0.05))] random_run_list = random_run_list.append(_run_random_parallel( gdir, y0, list, mb_offset), ignore_index=True) random_run_list = random_run_list.sort_values(by='temp_bias') return random_run_list
def plot_advanced_experiment(gdir): fig = plt.figure(figsize=(15, 14)) grid = plt.GridSpec(1, 2, hspace=0.2, wspace=0.2) ax1 = plt.subplot(grid[0]) ax2 = plt.subplot(grid[1], sharey=ax1) ax2.plot(gdir.rgi_date,gdir.rgi_area_km2,'o', label='RGI area '+ str(gdir.rgi_date)) mod = FluxBasedModel(flowlines=gdir.read_pickle('model_flowlines')) ax2.plot(gdir.rgi_date, mod.area_km2, 'o', label='RGI area ' + str(gdir.rgi_date)) for f in os.listdir(gdir.dir): if f.startswith('model_run_'): rp = os.path.join(gdir.dir,f) model = FileModel(rp) model.area_km2_ts().plot(ax=ax2, label=f) #ax2.set_xlim((1915,2005)) #ax2.legend() plt.show()
def find_temp_bias_range(gdir,y0): fls = gdir.read_pickle('model_flowlines') t_eq = 0 # try range (2,-2) first bias_list= [b.round(3) for b in np.arange(-2,2,0.05)] list = [(i**2, b) for i,b in enumerate(bias_list)] random_run_list = run_random_parallel(gdir,y0,list) # smaller temperature bias is still possible to test if random_run_list['temp_bias'].min()==-2: n = len(random_run_list) list = [((i+n+1)**2, b.round(3)) for i, b in enumerate(np.arange(-3,-2.05,0.05))] random_run_list = random_run_list.append(run_random_parallel(gdir, y0, list),ignore_index=True) # check for zero glacier max_bias = random_run_list['temp_bias'].idxmax() p = gdir.get_filepath('model_run', filesuffix=random_run_list.loc[max_bias,'suffix']) fmod = FileModel(p) if not fmod.volume_m3_ts().min()==0: n = len(random_run_list) list = [((i + n + 1) ** 2, b.round(3)) for i, b in enumerate(np.arange(2.05,3, 0.05))] random_run_list = random_run_list.append(run_random_parallel(gdir, y0, list), ignore_index=True) # find t_eq for suffix in random_run_list['suffix'].head(10).values: rp = gdir.get_filepath('model_run', filesuffix=suffix) fmod = FileModel(rp) try: t = _find_t_eq(fmod.volume_m3_ts()) if t > t_eq: t_eq = t except: pass all = pd.DataFrame() for suffix in random_run_list['suffix']: rp = gdir.get_filepath('model_run',filesuffix=suffix) fmod = FileModel(rp) v = pd.DataFrame(fmod.volume_m3_ts()).reset_index() v = v[v['time']>=t_eq] v = v.assign(suffix=lambda x: suffix) all = all.append(v, ignore_index=True) return all
def get_absolute_length(y0, y1, rgi, df, storage): rgipath = os.path.join(storage, rgi, '{:02d}'.format(0), rgi[:8], rgi[:11], rgi) mfile = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(0)) tmpmod = FileModel(mfile) absL = tmpmod.length_m deltaL = df.loc[int(tmpmod.yr.values), 0] abs_y0 = absL + (y0 - deltaL) abs_y1 = absL + (y1 - deltaL) return abs_y0, abs_y1
def plot_candidates(gdir, df, experiment, ys): plot_dir = '/home/juliaeis/Dokumente/OGGM/work_dir/find_initial_state/past_state_information/plots' fig, ax = plt.subplots() # plot random run for suffix in df['suffix'].unique(): rp = gdir.get_filepath('model_run', filesuffix=suffix) fmod = FileModel(rp) fmod.volume_m3_ts().plot(ax=ax, color='grey', label='', zorder=1) # last one again for labeling df['temp_bias'] = df['suffix'].apply(lambda x: float(x.split('_')[-1])) label = r'temperature bias $\in [$' + str( df['temp_bias'].min()) + ',' + str(df['temp_bias'].max()) + '$]$' fmod.volume_m3_ts().plot(ax=ax, color='grey', label=label, zorder=1) t_eq = df['time'].sort_values().iloc[0] ax.axvline(x=t_eq, color='k', zorder=1) df.plot.scatter(x='time', y='ts_section', ax=ax, c='objective', colormap='RdYlGn_r', norm=mpl.colors.LogNorm(vmin=0.1, vmax=1e5), s=40, zorder=2) plt.ylabel(r'Volume $(m^3)$') plt.title(gdir.rgi_id) plt.legend() plt.savefig(os.path.join( plot_dir, 'random' + str(ys) + '_' + str(gdir.rgi_id) + '.png'), dpi=200) plt.close()
def identification(gdir, list, ys, ye, n): """ Determine glacier candidates and run them to the date of observation :param gdir: oggm.GlacierDirectories :param df: pd.DataFrame (volume_m3_ts() from random climate runs) :param ys: starting year :param ye: year of observation :param n: number of candidates :return: """ i = 0 t_stag = 0 # find t_stag for suffix in list['suffix'].values: if i < 10: try: rp = gdir.get_filepath('model_geometry', filesuffix=suffix) if not os.path.exists(rp): rp = os.path.join(gdir.dir, str(ys), 'model_geometry' + suffix + '.nc') fmod = FileModel(rp) t = _find_extrema(fmod.volume_m3_ts()) if t > t_stag: t_stag = t i = i + 1 except: pass # make sure that t_stag is not close to the end if t_stag > 550: t_stag = 550 df = pd.DataFrame() for suffix in list['suffix']: try: rp = gdir.get_filepath('model_geometry', filesuffix=suffix) if not os.path.exists(rp): rp = os.path.join(gdir.dir, str(ys), 'model_geometry' + suffix + '.nc') fmod = FileModel(rp) v = pd.DataFrame( fmod.volume_m3_ts()).rename_axis('time').reset_index() v = v[v['time'] >= t_stag] v = v.assign(suffix=lambda x: suffix) df = df.append(v, ignore_index=True) except: pass indices = [] # find nearest glacier state for each of the n volume classes (equidistant) for val in np.linspace(df.volume_m3.min(), df.volume_m3.max(), n): index = df.iloc[(df['volume_m3'] - val).abs().argsort()][:1].index[0] if not index in indices: indices = np.append(indices, index) candidates = df.loc[indices] candidates = candidates.sort_values(['suffix', 'time']) candidates = candidates.drop_duplicates() return candidates[['time', 'suffix']]
def _run_experiment(gdir, temp_bias, bias, ys, ye): """ Creates the synthetic experiment for one glacier. model_run_experiment.nc will be saved in working directory. """ # check, if this experiment already exists try: rp = gdir.get_filepath('model_run', filesuffix='_advanced_experiment_' + str(temp_bias) + '_' + str(bias)) model = FileModel(rp) # otherwise create experiment except: fls = gdir.read_pickle('model_flowlines') try: model = tasks.run_random_climate(gdir, nyears=400, y0=ys, bias=bias, seed=1, temperature_bias=temp_bias, init_model_fls=fls) # construct observed glacier, previous glacier will be run forward from # 1917 - 2000 with past climate file fls = deepcopy(model.fls) model = tasks.run_from_climate_data( gdir, ys=ys, ye=ye, init_model_fls=fls, bias=bias, output_filesuffix='_advanced_experiment_' + str(temp_bias) + '_' + str(bias)) except: pass return model
def run_to_present(tupel,gdir,ys,ye): suffix = tupel[0] path = gdir.get_filepath('model_run',filesuffix=suffix) # does file already exists? if not os.path.exists(path): try: model = tasks.run_from_climate_data(gdir, ys=ys, ye=ye, output_filesuffix=suffix, init_model_fls=copy.deepcopy( tupel[1].fls)) return suffix # oggm failed --> probaly "glacier exeeds boundaries" except: return None else: # does file contain a model? try: fmod = FileModel(path) return suffix except: return None
def plot_volume_dif_time(gdir, dict, experiment): fig, axs = plt.subplots(len(dict), 1) try: rp = gdir.get_filepath('model_run', filesuffix='experiment') ex_mod = FileModel(rp) except: # create volume plot from experiment model = experiment['y_t0'] tasks.run_from_climate_data(gdir, ys=1850, ye=2000, init_model_fls=model.fls, output_filesuffix='experiment') rp = gdir.get_filepath('model_run', filesuffix='experiment') ex_mod = FileModel(rp) if gdir.name != '': plt.suptitle(gdir.rgi_id + ':' + gdir.name, fontsize=20) else: plt.suptitle(gdir.rgi_id, fontsize=20) import matplotlib as mpl import matplotlib.cm as cm norm = mpl.colors.LogNorm(vmin=0.1, vmax=1e5) cmap = matplotlib.cm.get_cmap('RdYlGn_r') for i, ax in enumerate(fig.axes): yr = list(dict.keys())[i] df = dict.get(yr) df = df.sort_values('objective', ascending=False) for i, model in df['model_t0'].iteritems(): color = cmap(norm(df.loc[i, 'objective'])) model.volume_m3_ts().plot(ax=ax, color=color, linewidth=2) ex_mod.volume_m3_ts().plot(ax=ax, color='k', linestyle=':', linewidth=3)
def plot_candidates(gdir, df, yr, plot_dir): plot_dir = os.path.join(plot_dir, '06_candidates') utils.mkdir(plot_dir) fig, ax = plt.subplots(figsize=(10, 5)) for file in os.listdir(os.path.join(gdir.dir, str(yr))): if file.startswith('model_run'+str(yr)+'_random'): suffix = file.split('model_run')[1].split('.nc')[0] rp = os.path.join(gdir.dir, str(yr), 'model_run'+suffix+'.nc') try: fmod = FileModel(rp) fmod.volume_km3_ts().plot(ax=ax, color='grey', label='', zorder=1) except: pass # last one again for labeling df.time = df.time.apply(lambda x: int(x)) t_eq = df['time'].sort_values().iloc[0] df['Fitness value'] = df.fitness plt.title(gdir.rgi_id) fmod.volume_km3_ts().plot(ax=ax, color='grey', label=None, zorder=1) ax.axvline(x=int(t_eq), color='k', zorder=1) cmap = matplotlib.cm.get_cmap('viridis') df.plot.scatter(x='time', y='volume', ax=ax, c='Fitness value', colormap='viridis', norm=mpl.colors.LogNorm(vmin=0.01, vmax=1e3, clip=True), s=250, edgecolors='k', zorder=2) # plot again points with objective == 0, without norm if len(df[df.fitness == 0]) > 0: df[df.fitness == 0].plot.scatter(x='time', y='volume', ax=ax, c=cmap(0), s=250, edgecolors='k', zorder=2) plt.xlabel('Time (years)') plt.ylabel(r'Volume $(km^3)$') plt.show()
def plot_modeloutput_map(gdirs, ax=None, smap=None, model=None, vmax=None, linewidth=3, filesuffix='', modelyr=None): """Plots the result of the model output.""" gdir = gdirs[0] with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc: topo = nc.variables['topo'][:] # Dirty optim try: smap.set_topography(topo) except ValueError: pass toplot_th = np.array([]) toplot_lines = [] toplot_crs = [] if model is None: models = [] for gdir in gdirs: model = FileModel( gdir.get_filepath('model_run', filesuffix=filesuffix)) model.run_until(modelyr) models.append(model) else: models = utils.tolist(model) for gdir, model in zip(gdirs, models): geom = gdir.read_pickle('geometries') poly_pix = geom['polygon_pix'] crs = gdir.grid.center_grid smap.set_geometry(poly_pix, crs=crs, fc='none', zorder=2, linewidth=.2) poly_pix = utils.tolist(poly_pix) for _poly in poly_pix: for l in _poly.interiors: smap.set_geometry(l, crs=crs, color='black', linewidth=0.5) # plot Centerlines cls = model.fls for l in cls: smap.set_geometry(l.line, crs=crs, color='gray', linewidth=1.2, zorder=50) toplot_th = np.append(toplot_th, l.thick) widths = l.widths.copy() widths = np.where(l.thick > 0, widths, 0.) for wi, cur, (n1, n2) in zip(widths, l.line.coords, l.normals): line = shpg.LineString([ shpg.Point(cur + wi / 2. * n1), shpg.Point(cur + wi / 2. * n2) ]) toplot_lines.append(line) toplot_crs.append(crs) dl = salem.DataLevels(cmap=OGGM_CMAPS['section_thickness'], data=toplot_th, vmin=0, vmax=vmax) colors = dl.to_rgb() for l, c, crs in zip(toplot_lines, colors, toplot_crs): smap.set_geometry(l, crs=crs, color=c, linewidth=linewidth, zorder=50) smap.plot(ax) return dict(cbar_label='Section thickness [m]', cbar_primitive=dl, title_comment=' -- year: {:d}'.format(np.int64(model.yr)))
def plot_modeloutput_map(gdirs, ax=None, smap=None, model=None, vmax=None, linewidth=3, filesuffix='', modelyr=None): """Plots the result of the model output.""" gdir = gdirs[0] with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc: topo = nc.variables['topo'][:] # Dirty optim try: smap.set_topography(topo) except ValueError: pass toplot_th = np.array([]) toplot_lines = [] toplot_crs = [] if model is None: models = [] for gdir in gdirs: model = FileModel(gdir.get_filepath('model_run', filesuffix=filesuffix)) model.run_until(modelyr) models.append(model) else: models = utils.tolist(model) for gdir, model in zip(gdirs, models): geom = gdir.read_pickle('geometries') poly_pix = geom['polygon_pix'] crs = gdir.grid.center_grid smap.set_geometry(poly_pix, crs=crs, fc='none', zorder=2, linewidth=.2) poly_pix = utils.tolist(poly_pix) for _poly in poly_pix: for l in _poly.interiors: smap.set_geometry(l, crs=crs, color='black', linewidth=0.5) # plot Centerlines cls = model.fls for l in cls: smap.set_geometry(l.line, crs=crs, color='gray', linewidth=1.2, zorder=50) toplot_th = np.append(toplot_th, l.thick) widths = l.widths.copy() widths = np.where(l.thick > 0, widths, 0.) for wi, cur, (n1, n2) in zip(widths, l.line.coords, l.normals): line = shpg.LineString([shpg.Point(cur + wi/2. * n1), shpg.Point(cur + wi/2. * n2)]) toplot_lines.append(line) toplot_crs.append(crs) dl = salem.DataLevels(cmap=SECTION_THICKNESS_CMAP, nlevels=256, data=toplot_th, vmin=0, vmax=vmax) colors = dl.to_rgb() for l, c, crs in zip(toplot_lines, colors, toplot_crs): smap.set_geometry(l, crs=crs, color=c, linewidth=linewidth, zorder=50) smap.plot(ax) return dict(cbar_label='Section thickness [m]', cbar_primitive=dl, title_comment=' -- year: {:d}'.format(np.int64(model.yr)))
cbar_pad=0.15, ) f.delaxes(axs[0]) f.delaxes(axs[1]) f.delaxes(axs[1].cax) tx, ty = 0.019, .975 letkm = dict(color='black', ha='left', va='top', fontsize=14, bbox=dict(facecolor='white', edgecolor='black')) llkw = {'interval': 0} fp = gdir.get_filepath('model_run', filesuffix='_2000_def') model = FileModel(fp) model.run_until(800) ax = axs[3] graphics.plot_modeloutput_map(gdir, model=model, ax=ax, title='', lonlat_contours_kwargs=llkw, cbar_ax=ax.cax, linewidth=1.5, add_scalebar=False, vmax=300) ax.text(tx, ty, 'c: [1985-2015]', transform=ax.transAxes, **letkm) fp = gdir.get_filepath('model_run', filesuffix='_1920_def') model = FileModel(fp)
def run_file_model(suffix,ye): rp = gdir.get_filepath('model_run', filesuffix=suffix) fmod = FileModel(rp) fmod.run_until(ye) return copy.deepcopy(fmod)
def read_file_model(suffix,gdir): rp = gdir.get_filepath('model_run',filesuffix=suffix) fmod = FileModel(rp) return copy.deepcopy(fmod)
def plot_candidates(gdir, df, yr, step, plot_dir): plot_dir = os.path.join(plot_dir, '06_candidates') utils.mkdir(plot_dir, reset=False) fig, ax = plt.subplots(figsize=(10, 10)) for file in os.listdir(os.path.join(gdir.dir, str(yr))): if file.startswith('model_geometry' + str(yr) + '_random'): suffix = file.split('model_geometry')[1].split('.nc')[0] rp = os.path.join(gdir.dir, str(yr), 'model_geometry' + suffix + '.nc') try: fmod = FileModel(rp) fmod.volume_km3_ts().plot(ax=ax, color='grey', label='', zorder=1) except: pass # last one again for labeling label = r'temperature bias $\in [$' + str( df['temp_bias'].min()) + ',' + str(df['temp_bias'].max()) + '$]$' df.time = df.time.apply(lambda x: int(x)) t_eq = df['time'].sort_values().iloc[0] df['Fitness value'] = df.fitness plt.title(gdir.rgi_id) if step == 'step1': fmod.volume_km3_ts().plot(ax=ax, color='grey', label=label, zorder=1) plt.legend(loc=0, fontsize=28) plt.xlabel('Time (years)') plt.ylabel(r'Volume $(km^3)$') plt.savefig(os.path.join( plot_dir, 'candidates1_' + str(yr) + '_' + str(gdir.rgi_id) + '.png'), dpi=300) elif step == 'step2': ax.axvline(x=int(t_eq), color='k', zorder=1, label=r'$t_{stag}$') fmod.volume_km3_ts().plot(ax=ax, color='grey', label='', zorder=1) # black points df.plot.scatter(x='time', y='volume', ax=ax, color='k', label='candidates', s=250, zorder=2) plt.legend(loc=0, fontsize=27.5) plt.xlabel('Time (years)') plt.ylabel(r'Volume $(km^3)$') plt.xlim((int(t_eq) - 10, 605)) plt.savefig(os.path.join( plot_dir, 'candidates2_' + str(yr) + '_' + str(gdir.rgi_id) + '.png'), dpi=300) elif step == 'step3': fmod.volume_km3_ts().plot(ax=ax, color='grey', label=None, zorder=1) ax.axvline(x=int(t_eq), color='k', zorder=1) cmap = matplotlib.cm.get_cmap('viridis') norm = mpl.colors.LogNorm(vmin=0.01 / 125, vmax=10) im = df.plot.scatter(x='time', y='volume', ax=ax, c='Fitness value', colormap='viridis', norm=mpl.colors.LogNorm(vmin=0.01 / 125, vmax=10, clip=True), s=250, edgecolors='k', zorder=2, colorbar=False) # plot again points with objective == 0, without norm if len(df[df.fitness == 0]) > 0: df[df.fitness == 0].plot.scatter(x='time', y='volume', ax=ax, c=cmap(0), s=250, edgecolors='k', zorder=2, colorbar=False) plt.xlim(int(t_eq) - 10, 605) plt.xlabel('Time (years)') plt.ylabel(r'Volume $(km^3)$') # add colorbar sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm.set_array([]) #cax, kw = mpl.colorbar.make_axes([ax1, ax2, ax3]) cbar = fig.colorbar(sm, extend='both') cbar.ax.tick_params(labelsize=30) cbar.set_label('Fitness value', fontsize=30) plt.savefig(os.path.join( plot_dir, 'candidates3_' + str(yr) + '_' + str(gdir.rgi_id) + '.png'), dpi=300) plt.show() #plt.close() plt.figure(figsize=(15, 14)) plt.hist(df.volume.values, bins=20) plt.xlabel(r'Volume $(km^3)$') plt.ylabel(r'Frequency') plt.title(gdir.rgi_id) plt.savefig(os.path.join( plot_dir, 'hist_candidates' + str(yr) + '_' + str(gdir.rgi_id) + '.png'), dpi=300) plt.close()
def initialise_DataLogger(gdir, inversion_input_filesuffix='_combine', init_model_filesuffix=None, init_model_fls=None, climate_filename='climate_historical', climate_filesuffix='', output_filesuffix='_combine', output_filepath=None): ''' extract information out of gdir and save in datalogger. TODO ''' if init_model_filesuffix is not None: fp = gdir.get_filepath('model_geometry', filesuffix=init_model_filesuffix) fmod = FileModel(fp) init_model_fls = fmod.fls if init_model_fls is None: fls_init = gdir.read_pickle('model_flowlines') elif type(init_model_fls) is str: fls_init = gdir.read_pickle('model_flowlines', filesuffix=init_model_fls) else: fls_init = copy.deepcopy(init_model_fls) if len(fls_init) > 1: raise NotImplementedError('COMBINE only works with single flowlines!') # include check if inversion_input file exist if not os.path.isfile( gdir.get_filepath('inversion_input', filesuffix=inversion_input_filesuffix)): raise AttributeError('inversion_input' + inversion_input_filesuffix + ' file not found!') inversion_input = gdir.read_pickle(filename='inversion_input', filesuffix=inversion_input_filesuffix) # here fill 'observations' with initial flowline values if needed rgi_yr = gdir.rgi_date def check_if_year_is_given(obs_check): if inversion_input['observations'][obs_check] == {}: inversion_input['observations'][obs_check] = {str(rgi_yr): []} return [ key for key in inversion_input['observations'][obs_check].keys() ][0] for obs in inversion_input['observations'].keys(): if obs == 'fl_surface_h:m': yr_obs = check_if_year_is_given(obs) inversion_input['observations'][obs][yr_obs] = fls_init[ 0].surface_h if obs == 'fl_widths:m': yr_obs = check_if_year_is_given(obs) inversion_input['observations'][obs][yr_obs] = fls_init[0].widths_m if obs == 'fl_total_area:m2': yr_obs = check_if_year_is_given(obs) inversion_input['observations'][obs][yr_obs] = fls_init[0].area_m2 if obs == 'fl_total_area:km2': yr_obs = check_if_year_is_given(obs) inversion_input['observations'][obs][yr_obs] = fls_init[0].area_km2 # save observations for scaling if wanted inversion_input['observations_for_scaling'] = None for reg_term in inversion_input['regularisation_terms']: if (reg_term in [ 'fl_surface_h_scale_1', 'fl_surface_h_scale_2', 'bed_h_grad_scale' ]): if ('fl_surface_h:m' not in inversion_input['observations'].keys() and (reg_term in ['fl_surface_h_scale_1', 'fl_surface_h_scale_2'])): raise NotImplementedError('fl_surface_h must be observations ' 'if scaling should be used!') if all(k in inversion_input['regularisation_terms'] for k in ['fl_surface_h_scale_1', 'fl_surface_h_scale_2']): raise NotImplementedError( 'only one of the two scaling for ' 'fl_surface_h can be used at a time!') inversion_input['observations_for_scaling'] = { 'fl_widths:m': fls_init[0].widths_m } if 'area_bed_h' in inversion_input['control_vars']: inversion_input['observations_for_scaling'] = { 'fl_widths:m': fls_init[0].widths_m } if all(k in inversion_input['control_vars'] for k in ['bed_h', 'area_bed_h']): raise NotImplementedError("It is not possible to define 'bed_h' and " "'area_bed_h' as control variables " "simultaneously!") data_logger = DataLogger(gdir, fls_init, inversion_input, climate_filename, climate_filesuffix, output_filesuffix, output_filepath) return data_logger
def spinup_plus_histalp(gdir, meta=None, mb_bias=None, runsuffix=''): # take care of merged glaciers rgi_id = gdir.rgi_id.split('_')[0] # select meta meta = meta.loc[rgi_id].copy() # we want to simulate as much as possible -> histalp till 2014 obs_ye = 2014 # --------- SPIN IT UP --------------- tbias = systematic_spinup(gdir, meta, mb_bias=mb_bias) if tbias == -999: rval = { 'rgi_id': gdir.rgi_id, 'name': meta['name'], 'histalp': np.nan, 'spinup': np.nan, 'tbias': np.nan, 'tmean': np.nan, 'pmean': np.nan } return rval # --------- GET SPINUP STATE --------------- tmp_mod = FileModel(gdir.get_filepath('model_run', filesuffix='_spinup')) tmp_mod.run_until(tmp_mod.last_yr) # --------- HIST IT DOWN -------------- try: run_from_climate_data(gdir, ys=meta['first'], ye=obs_ye, init_model_fls=tmp_mod.fls, climate_filename='climate_monthly', output_filesuffix='_histalp' + runsuffix, bias=mb_bias) except RuntimeError as err: if 'Glacier exceeds domain boundaries' in err.args[0]: log.info('(%s) histalp run exceeded domain bounds' % gdir.rgi_id) return else: raise RuntimeError('other error') ds1 = xr.open_dataset( gdir.get_filepath('model_diagnostics', filesuffix='_histalp' + runsuffix)) ds2 = xr.open_dataset( gdir.get_filepath('model_diagnostics', filesuffix='_spinup')) # store mean temperature and precipitation yindex = np.arange(meta['first'], obs_ye + 1) try: cm = xr.open_dataset(gdir.get_filepath('climate_monthly')) except FileNotFoundError: cm = xr.open_dataset( gdir.get_filepath('climate_monthly', filesuffix='_' + rgi_id)) tmean = cm.temp.groupby('time.year').mean().loc[yindex].to_pandas() pmean = cm.prcp.groupby('time.year').mean().loc[yindex].to_pandas() rval = { 'rgi_id': gdir.rgi_id, 'name': meta['name'], 'histalp': ds1.length_m.to_dataframe()['length_m'], 'spinup': ds2.length_m.to_dataframe()['length_m'], 'tbias': tbias, 'tmean': tmean, 'pmean': pmean } # relative length change rval['rel_dl'] = relative_length_change(meta, rval['spinup'], rval['histalp']) # if merged, store tributary flowline change as well if '_merged' in gdir.rgi_id: trib = rval['histalp'].copy() * np.nan # choose the correct flowline index, use model_fls as they have rgiids fls = gdir.read_pickle('model_flowlines') flix = np.where([fl.rgi_id != rgi_id for fl in fls])[0][-1] fmod = FileModel( gdir.get_filepath('model_run', filesuffix='_histalp' + runsuffix)) assert fmod.fls[flix].nx == fls[flix].nx, ('filemodel and gdir ' 'flowlines do not match') for yr in rval['histalp'].index: fmod.run_until(yr) trib.loc[yr] = fmod.fls[flix].length_m trib -= trib.iloc[0] rval['trib_dl'] = trib return rval
def run_ensemble(allgdirs, rgi_id, ensemble, tbiasdict, allmeta, storedir, runsuffix='', spinup_y0=1999): # default glena default_glena = 2.4e-24 # loop over all combinations for nr, run in enumerate(ensemble): pdict = ast.literal_eval('{' + run + '}') cfg.PARAMS['glen_a'] = pdict['glena_factor'] * default_glena cfg.PARAMS['inversion_glen_a'] = pdict['glena_factor'] * default_glena mbbias = pdict['mbbias'] cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor'] log.info('Current parameter combination: %s' % str(run)) log.info('This is combination %d out of %d.' % (nr + 1, len(ensemble))) # ok, we need the ref_glaciers here for calibration # they should be initialiced so, just recreate them from the directory ref_gdirs = [ GlacierDirectory(refid) for refid in preprocessing.ADDITIONAL_REFERENCE_GLACIERS ] # do the mass balance calibration compute_ref_t_stars(ref_gdirs + allgdirs) task_list = [ tasks.local_t_star, tasks.mu_star_calibration, tasks.prepare_for_inversion, tasks.mass_conservation_inversion, tasks.filter_inversion_output, tasks.init_present_time_glacier ] for task in task_list: execute_entity_task(task, allgdirs) # check for glaciers to merge: gdirs_merged = [] gdirs2sim = allgdirs.copy() for gid in allmeta.index: merg = merge_pair_dict(gid) if merg is not None: # main and tributary glacier gd2merge = [ gd for gd in allgdirs if gd.rgi_id in [gid] + merg[0] ] # actual merge task log.warning('DeprecationWarning: If downloadlink is updated ' + 'to gdirs_v1.2, remove filename kwarg') gdir_merged = merge_glacier_tasks(gd2merge, gid, buffer=merg[1], filename='climate_monthly') # remove the entity glaciers from the simulation list gdirs2sim = [ gd for gd in gdirs2sim if gd.rgi_id not in [gid] + merg[0] ] gdirs_merged.append(gdir_merged) # add merged glaciers to the left over entity glaciers gdirs2sim += gdirs_merged # now only select the 1 glacier gdir = [gd for gd in gdirs2sim if gd.rgi_id == rgi_id][0] rgi_id0 = rgi_id.split('_')[0] meta = allmeta.loc[rgi_id0].copy() # do the actual simulations # spinup fls = gdir.read_pickle('model_flowlines') tbias = tbiasdict[run] mb = MultipleFlowlineMassBalance(gdir, fls=fls, mb_model_class=ConstantMassBalance, filename='climate_monthly', y0=spinup_y0, bias=mbbias) minimize_dl(tbias, mb, fls, None, None, gdir, False, runsuffix='_{:02d}'.format(nr)) # histalp # --------- GET SPINUP STATE --------------- tmp_mod = FileModel( gdir.get_filepath('model_run', filesuffix='_spinup_{:02d}'.format(nr))) tmp_mod.run_until(tmp_mod.last_yr) # --------- HIST IT DOWN --------------- histrunsuffix = '_histalp{}_{:02d}'.format(runsuffix, nr) # now actual simulation run_from_climate_data(gdir, ys=meta['first'], ye=2014, init_model_fls=tmp_mod.fls, output_filesuffix=histrunsuffix, climate_filename='climate_monthly', bias=mbbias) # save the calibration parameter to the climate info file out = gdir.get_climate_info() out['ensemble_calibration'] = pdict gdir.write_json(out, 'climate_info') # copy stuff to storage basedir = os.path.join(storedir, rgi_id) ensdir = os.path.join(basedir, '{:02d}'.format(nr)) mkdir(ensdir, reset=True) deep_path = os.path.join(ensdir, rgi_id[:8], rgi_id[:11], rgi_id) # copy whole GDir copy_to_basedir(gdir, base_dir=ensdir, setup='run') # copy run results fn1 = 'model_diagnostics_spinup_{:02d}.nc'.format(nr) shutil.copyfile( gdir.get_filepath('model_diagnostics', filesuffix='_spinup_{:02d}'.format(nr)), os.path.join(deep_path, fn1)) fn2 = 'model_diagnostics{}.nc'.format(histrunsuffix) shutil.copyfile( gdir.get_filepath('model_diagnostics', filesuffix=histrunsuffix), os.path.join(deep_path, fn2)) fn3 = 'model_run_spinup_{:02d}.nc'.format(nr) shutil.copyfile( gdir.get_filepath('model_run', filesuffix='_spinup_{:02d}'.format(nr)), os.path.join(deep_path, fn3)) fn4 = 'model_run{}.nc'.format(histrunsuffix) shutil.copyfile( gdir.get_filepath('model_run', filesuffix=histrunsuffix), os.path.join(deep_path, fn4)) log.warning('DeprecationWarning: If downloadlink is updated to ' + 'gdirs_v1.2 remove this copyfile:') # copy (old) climate monthly files which for fn in os.listdir(gdir.dir): if 'climate_monthly' in fn: shutil.copyfile(os.path.join(gdir.dir, fn), os.path.join(deep_path, fn))
def plot_surface_col(gdir, df, experiment, ys): #df = df[df['objective']<=100] x = np.arange(experiment['y_t'].fls[-1].nx) * \ experiment['y_t'].fls[-1].dx * experiment['y_t'].fls[-1].map_dx fig = plt.figure(figsize=(20, 15)) grid = plt.GridSpec(2, 2, hspace=0.2, wspace=0.2) ax1 = plt.subplot(grid[0, 0]) ax2 = plt.subplot(grid[0, 1]) ax3 = plt.subplot(grid[1, :]) p2 = ax2.get_position() if gdir.name != '': plt.suptitle(gdir.rgi_id + ':' + gdir.name, x=p2.x1 / 2, fontsize=20) else: plt.suptitle(gdir.rgi_id, x=p2.x1 / 2, fontsize=20) import matplotlib as mpl import matplotlib.cm as cm norm = mpl.colors.LogNorm(vmin=0.1, vmax=1e5) cmap = matplotlib.cm.get_cmap('RdYlGn_r') df = df.sort_values('objective', ascending=False) for i, model in df['model_t0'].iteritems(): color = cmap(norm(df.loc[i, 'objective'])) ax1.plot(x, model.fls[-1].surface_h, color=color, linewidth=2) model.volume_m3_ts().plot(ax=ax3, color=color, linewidth=2) for i, model in df['model_t'].iteritems(): color = cmap(norm(df.loc[i, 'objective'])) ax2.plot(x, model.fls[-1].surface_h, color=color, linewidth=2) ax2.plot(x, experiment['y_t'].fls[-1].surface_h, 'k:', linewidth=3) ax2.plot(x, model.fls[-1].bed_h, 'k', linewidth=3) # create volume plot from experiment model = experiment['y_t0'] tasks.run_from_climate_data(gdir, ys=1850, ye=2000, init_model_fls=model.fls, output_filesuffix='experiment') rp = gdir.get_filepath('model_run', filesuffix='experiment') ex_mod = FileModel(rp) ex_mod.volume_m3_ts().plot(ax=ax3, color='k', linestyle=':', linewidth=3) ex_mod.run_until(ys) ax1.plot(x, ex_mod.fls[-1].surface_h, 'k:', linewidth=3) ax1.plot(x, ex_mod.fls[-1].bed_h, 'k', linewidth=3) ax1.annotate(r'$t = ' + str(ys) + '$', xy=(0.8, 0.9), xycoords='axes fraction', fontsize=15) ax2.annotate(r'$t = 2000$', xy=(0.8, 0.9), xycoords='axes fraction', fontsize=15) ax1.set_ylabel('Altitude (m)', fontsize=15) ax1.set_xlabel('Distance along the main flowline (m)', fontsize=15) ax2.set_ylabel('Altitude (m)', fontsize=15) ax2.set_xlabel('Distance along the main flowline (m)', fontsize=15) ax3.set_ylabel(r'Volume ($m^3$)', fontsize=15) ax3.set_xlabel('Time (years)', fontsize=15) sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm.set_array([]) cax, kw = mpl.colorbar.make_axes([ax1, ax2, ax3]) cbar = fig.colorbar(sm, cax=cax, **kw) cbar.ax.tick_params(labelsize=15) cbar.set_label('objective', fontsize=15) ax1.tick_params(axis='both', which='major', labelsize=15) ax2.tick_params(axis='both', which='major', labelsize=15) ax3.tick_params(axis='both', which='major', labelsize=15) ax3.yaxis.offsetText.set_fontsize(15) plot_dir = '/home/juliaeis/Dokumente/OGGM/work_dir/find_initial_state/past_state_information/plots' plt.savefig(os.path.join(plot_dir, 'surface_' + str(ys) + '_' + gdir.rgi_id + '.pdf'), dpi=200) plt.savefig(os.path.join(plot_dir, 'surface_' + str(ys) + '_' + gdir.rgi_id + '.png'), dpi=200)