Exemplo n.º 1
0
def oldglacier_to_table(outpath):

    df = pd.DataFrame([], index=GLCDICT.keys())

    poldict = {'Switzerland': 'CH',
               'Austria': 'AT',
               'Italy': 'IT',
               'France': 'FR'}

    rgidf = utils.get_rgi_glacier_entities(df.index)
    meta, _ = get_length_observations(df.index)

    for rgi, _ in df.iterrows():
        name = GLCDICT[rgi][2].split('(')[0]
        df.loc[rgi, 'name'] = name
        df.loc[rgi, 'state'] = poldict[GLCDICT[rgi][3]]
        df.loc[rgi, 'lat/lon'] = '{:.2f}/{:.2f}'.\
            format(rgidf.loc[rgidf.RGIId == rgi, 'CenLon'].iloc[0],
                   rgidf.loc[rgidf.RGIId == rgi, 'CenLat'].iloc[0])

        df.loc[rgi, 'merge'] = 'no'
        area = rgidf.loc[rgidf.RGIId == rgi, 'Area'].iloc[0]

        if MERGEDICT.get(rgi):
            df.loc[rgi, 'merge'] = 'yes'
            tribs = MERGEDICT[rgi][0]
            tribdf = utils.get_rgi_glacier_entities(tribs)
            for trib in tribs:
                area += tribdf.loc[tribdf.RGIId == trib, 'Area'].iloc[0]

        df.loc[rgi, 'area [insert km2]'] = '{:.1f}'.\
            format(area)
        df.loc[rgi, '1.obs'] = meta.loc[rgi, 'first']
        df.loc[rgi, '#obs'] = meta.loc[rgi, 'measurements']

    df.loc[:, '1.obs'] = df.loc[:, '1.obs'].astype(int)
    df.loc[:, '#obs'] = df.loc[:, '#obs'].astype(int)

    df = df.sort_values('name')

    # ---------------
    # generate table
    tbl = df.to_latex(na_rep='--', index=False, longtable=True,
                      column_format=2 * 'l' + 'r' + 'l' + 3 * 'r')
    # add title
    titl = ('\n\\caption{A list of all glaciers used for this study. '
            '\\emph{merge} indicates if'
            ' a glacier has additional tributary glaciers merged to it. '
            '\\emph{area} then does include these tributaries. '
            '\\emph{1.obs} refers to the first observation after 1850 and'
            ' the number of observations \\emph{\#obs} is counted until '
            '2020.}\\\\\n'
            '\\label{tbl:glaciers}\\\\\n')
    tbl = tbl.replace('\n', titl, 1)
Exemplo n.º 2
0
def hef_gdir(test_dir):
    cfg.initialize(logging_level='CRITICAL')
    cfg.PARAMS['use_multiple_flowlines'] = False
    cfg.PARAMS['use_rgi_area'] = False
    cfg.PATHS['working_dir'] = test_dir

    rgi_ids = ['RGI60-11.00897']
    gl = utils.get_rgi_glacier_entities(rgi_ids)
    gdirs = workflow.init_glacier_directories(gl, from_prepro_level=1)

    global_tasks.gis_prepro_tasks(gdirs)

    cfg.PARAMS['baseline_climate'] = 'CRU'
    global_tasks.climate_tasks(
        gdirs,
        base_url='https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params'
        '/oggm_v1.4/RGIV62/CRU/centerlines/qc3/pcp2.5')

    execute_entity_task(tasks.prepare_for_inversion,
                        gdirs,
                        invert_all_trapezoid=True)
    execute_entity_task(tasks.mass_conservation_inversion, gdirs)

    execute_entity_task(tasks.init_present_time_glacier, gdirs)

    return gdirs[0]
def test_geodetic_glaciologicalMB_HEF():

    cfg.initialize()
    cfg.PARAMS['use_multiprocessing'] = False

    working_dir = '/home/lilianschuster/Schreibtisch/PhD/oggm_files/oneFlowline'
    # this needs to be changed if working on another computer
    if not os.path.exists(working_dir):
        working_dir = utils.gettempdir(dirname='OGGM_mb_type_intercomparison', reset=True)
        
    cfg.PATHS['working_dir'] = working_dir
    # use Huss flowlines
    base_url = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
                'L1-L2_files/elev_bands')
    
    # get HEF glacier
    df = utils.get_rgi_glacier_entities(['RGI60-11.00897'])
    gdirs = workflow.init_glacier_directories(df, from_prepro_level=2,
                                              prepro_border=10,
                                            prepro_base_url=base_url,
                                      prepro_rgi_version='62')
    gd = gdirs[0]
    
    h, w = gd.get_inversion_flowline_hw()
    fls = gd.read_pickle('inversion_flowlines')
    
    cfg.PARAMS['baseline_climate'] = 'ERA5dr'
    oggm.shop.ecmwf.process_ecmwf_data(gd, dataset='ERA5dr')
    ######################
    
    # test area is similar to OGGM gdir HEF area
    np.testing.assert_allclose(pd_geodetic_alps.loc['RGI60-11.00897'].area, gd.rgi_area_km2, rtol=0.01)
    
    glaciological_mb_data_HEF_mean = gd.get_ref_mb_data()['ANNUAL_BALANCE'].loc[2000:].mean() #.sel(2000,2020)
    
    # test if mass balance in period 2000 and 2020 of glaciological method is similar to the geodetic one 
    np.testing.assert_allclose(pd_geodetic_alps.loc['RGI60-11.00897'].dmdtda*1000,
                               glaciological_mb_data_HEF_mean, rtol = 0.05 )
Exemplo n.º 4
0
def seek_start_area(rgi_id,
                    name,
                    show=False,
                    path='',
                    ref=np.NaN,
                    adjust_term_elev=False,
                    legend=True,
                    instant_geometry_change=False):
    """ Set up an VAS model from scratch and run/test the start area seeking
    tasks. The result is a plot showing the modeled glacier area evolution for
    different start values. The plots can be displayed and/or stored to file.

    Parameters
    ----------
    rgi_id: string
        RGI ID denoting the glacier on which to perform the tasks
    name: string
        Name og glacier, since it is not always given (or correct) in RGI
    show: bool, optional, default=False
        Flag deciding whether or not to show the created plots.
    path: string, optional, default=''
        Path under which the modeled area plot should be stored.
    ref: float, optional, default=np.NaN
        Historic (1851) reference area with which a reference model run is
        performed.

    """
    # Initialization and load default parameter file
    vascaling.initialize()

    # compute RGI region and version from RGI IDs
    # assuming they all are all the same
    rgi_region = (rgi_id.split('-')[-1]).split('.')[0]
    rgi_version = (rgi_id.split('-')[0])[-2:-1]

    # specify working directory and output directory
    working_dir = os.path.abspath('../working_directories/start_area/')
    # output_dir = os.path.abspath('./vas_run_output')
    output_dir = os.path.abspath('../data/vas_run_output')
    # create working directory
    utils.mkdir(working_dir, reset=False)
    utils.mkdir(output_dir)
    # set path to working directory
    cfg.PATHS['working_dir'] = working_dir
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 20
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    cfg.PARAMS['run_mb_calibration'] = False

    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = True

    # get/downlaod the rgi entity including the outline shapefile
    rgi_df = utils.get_rgi_glacier_entities([rgi_id])
    # set name, if not delivered with RGI
    if rgi_df.loc[int(rgi_id[-5:]) - 1, 'Name'] is None:
        rgi_df.loc[int(rgi_id[-5:]) - 1, 'Name'] = name

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdir = workflow.init_glacier_directories(rgi_df)[0]

    # # DEM and GIS tasks
    # # get the path to the DEM file (will download if necessary)
    # dem = utils.get_topo_file(gdir.cenlon, gdir.cenlat)
    # print('DEM source: {}, path to DEM file: {}'.format(dem[1], dem[0][0]))
    # # set path in config file
    # cfg.PATHS['dem_file'] = dem[0][0]
    # cfg.PARAMS['border'] = 10
    # cfg.PARAMS['use_intersects'] = False

    # run GIS tasks
    gis.define_glacier_region(gdir)
    gis.glacier_masks(gdir)

    # process climate data
    climate.process_climate_data(gdir)

    #  compute local t* and the corresponding mu*
    vascaling.local_t_star(gdir)

    # create mass balance model
    mb_mod = vascaling.VAScalingMassBalance(gdir)

    # look at specific mass balance over climate data period
    min_hgt, max_hgt = vascaling.get_min_max_elevation(gdir)
    y0 = 1851
    y1 = 2014

    # run scalar minimization
    minimize_res = vascaling.find_start_area(
        gdir,
        adjust_term_elev=adjust_term_elev,
        instant_geometry_change=instant_geometry_change)
    # print(minimize_res)

    # stop script if minimization was not successful
    if minimize_res.status and False:
        sys.exit(minimize_res.status)

    # instance glacier with today's values
    model_ref = vascaling.VAScalingModel(year_0=gdir.rgi_date,
                                         area_m2_0=gdir.rgi_area_m2,
                                         min_hgt=min_hgt,
                                         max_hgt=max_hgt,
                                         mb_model=mb_mod)

    # instance guessed starting areas
    num = 9
    area_guess = np.linspace(1e6,
                             np.floor(gdir.rgi_area_m2 * 2),
                             num,
                             endpoint=True)
    # create empty containers
    area_list = list()
    volume_list = list()
    spec_mb_list = list()

    # iterate over all starting areas
    for area_ in area_guess:
        # instance iteration model
        model_guess = vascaling.VAScalingModel(year_0=gdir.rgi_date,
                                               area_m2_0=gdir.rgi_area_m2,
                                               min_hgt=min_hgt,
                                               max_hgt=max_hgt,
                                               mb_model=mb_mod)
        # set new starting values
        model_guess.create_start_glacier(area_,
                                         y0,
                                         adjust_term_elev=adjust_term_elev)
        # run model and save years and area
        best_guess_ds = model_guess.run_until_and_store(
            year_end=model_ref.year,
            instant_geometry_change=instant_geometry_change)
        # create series and store in container
        area_list.append(best_guess_ds.area_m2.to_dataframe()['area_m2'])
        volume_list.append(best_guess_ds.volume_m3.to_dataframe()['volume_m3'])
        spec_mb_list.append(best_guess_ds.spec_mb.to_dataframe()['spec_mb'])

    # create DataFrame
    area_df = pd.DataFrame(
        area_list, index=['{:.2f}'.format(a / 1e6) for a in area_guess])
    area_df.index.name = 'Start Area [km$^2$]'

    volume_df = pd.DataFrame(
        volume_list, index=['{:.2f}'.format(a / 1e6) for a in area_guess])
    volume_df.index.name = 'Start Area [km$^2$]'

    # set up model with resulted starting area
    model = vascaling.VAScalingModel(year_0=model_ref.year_0,
                                     area_m2_0=model_ref.area_m2_0,
                                     min_hgt=model_ref.min_hgt_0,
                                     max_hgt=model_ref.max_hgt,
                                     mb_model=model_ref.mb_model)
    model.create_start_glacier(minimize_res.x,
                               y0,
                               adjust_term_elev=adjust_term_elev)

    # run model with best guess initial area
    best_guess_ds = model.run_until_and_store(
        year_end=model_ref.year,
        instant_geometry_change=instant_geometry_change)
    # run model with historic reference area
    if ref:
        model.reset()
        model.create_start_glacier(ref * 1e6,
                                   y0,
                                   adjust_term_elev=adjust_term_elev)
        ref_ds = model.run_until_and_store(
            year_end=model_ref.year,
            instant_geometry_change=instant_geometry_change)

    # create figure and add axes
    fig = plt.figure(figsize=[5, 5])
    ax = fig.add_axes([0.125, 0.075, 0.85, 0.9])

    # plot model output
    ax = (area_df / 1e6).T.plot(legend=False, colormap='Spectral', ax=ax)

    # plot best guess
    ax.plot(
        best_guess_ds.time,
        best_guess_ds.area_m2 / 1e6,
        color='k',
        ls='--',
        lw=1.2,
        label=
        f'{best_guess_ds.area_m2.isel(time=0).values/1e6:.2f} km$^2$ (best result)'
    )
    # plot reference
    if ref:
        ax.plot(
            ref_ds.time,
            ref_ds.area_m2 / 1e6,
            color='k',
            ls='-.',
            lw=1.2,
            label=
            f'{ref_ds.area_m2.isel(time=0).values/1e6:.2f} km$^2$ (1850 ref.)')

    # plot 2003 reference line
    ax.axhline(
        model_ref.area_m2_0 / 1e6,
        c='k',
        ls=':',
        label=f'{model_ref.area_m2_0/1e6:.2f} km$^2$ ({gdir.rgi_date} obs.)')

    # add legend
    if legend:
        handels, labels = ax.get_legend_handles_labels()
        labels[:-3] = [r'{} km$^2$'.format(l) for l in labels[:-3]]
        leg = ax.legend(handels, labels, loc='upper right', ncol=2)
        # leg.set_title('Start area $A_0$', prop={'size': 12})

    # replot best guess estimate and reference (in case it lies below another
    # guess)
    ax.plot(best_guess_ds.time,
            best_guess_ds.area_m2 / 1e6,
            color='k',
            ls='--',
            lw=1.2)
    if ref:
        ax.plot(ref_ds.time, ref_ds.area_m2 / 1e6, color='k', ls='-.', lw=1.2)

    # labels, title
    ax.set_xlim([best_guess_ds.time.values[0], best_guess_ds.time.values[-1]])
    ax.set_xlabel('')
    ax.set_ylabel('Glacier area [km$^2$]')

    # save figure to file
    if path:
        fig.savefig(path)

    # show plot
    if show:
        plt.show()
    plt.clf()

    # plot and store volume evolution
    (volume_df / 1e9).T.plot(legend=False, colormap='viridis')
    plt.gcf().savefig(path[:-4] + '_volume.pdf')
Exemplo n.º 5
0
# test directory
idir = g2ti.geometry_dir
cfg.PATHS['working_dir'] = '/home/mowglie/disk/G2TI/oggm_cross_run'
utils.mkdir(cfg.PATHS['working_dir'], reset=True)

df = pd.read_csv(g2ti.index_file, index_col=0)
df = df.loc[[rid for rid in df.index if ('-19.' not in rid) and ('-05.' not in rid)]]

for exp, fa in zip([1, 2, 3], [1.7, 1.3, 1.0]):

    sel_ids = df.loc[df['p{}'.format(exp)] == 1].index

    # For testing
    sel_ids = sel_ids[[14, 15, 16]]

    rgidf = utils.get_rgi_glacier_entities(sel_ids, version=version)

    # Add intersects
    db = utils.get_rgi_intersects_region_file(version='61', rgi_ids=sel_ids)
    cfg.set_intersects_db(db)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.info('Starting OGGM run')
    log.info('Number of glaciers: {}'.format(len(rgidf)))

    # Go - initialize working directories
    gdirs = workflow.init_glacier_regions(rgidf)

    # Preprocessing tasks
Exemplo n.º 6
0
from oggm import utils
from oggm.core.flowline import (FileModel)
from gmd_analysis_scripts import PLOT_DIR
from oggm.utils import get_rgi_glacier_entities, get_rgi_intersects_entities, mkdir

fig_path = os.path.join(PLOT_DIR, 'hef_scenarios.pdf')

cfg.initialize()

cfg.PARAMS['border'] = 60

base_dir = os.path.join(os.path.expanduser('~/tmp'), 'OGGM_GMD', 'scenarios')
cfg.PATHS['working_dir'] = base_dir
mkdir(base_dir, reset=True)

entity = get_rgi_glacier_entities(['RGI60-11.00897']).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=base_dir)
cfg.set_intersects_db(get_rgi_intersects_entities(['RGI60-11.00897']))

tasks.define_glacier_region(gdir, entity=entity)
tasks.glacier_masks(gdir)
tasks.compute_centerlines(gdir)
tasks.initialize_flowlines(gdir)
tasks.compute_downstream_line(gdir)
tasks.compute_downstream_bedshape(gdir)
tasks.catchment_area(gdir)
tasks.catchment_intersections(gdir)
tasks.catchment_width_geom(gdir)
tasks.catchment_width_correction(gdir)
tasks.process_cru_data(gdir)
tasks.local_t_star(gdir)
Exemplo n.º 7
0
def equilibrium_run_vas(rgi_ids,
                        use_random_mb=True,
                        path=True,
                        temp_biases=(0, +0.5, -0.5),
                        use_bias_for_run=False,
                        suffixes=('_bias_zero', '_bias_p', '_bias_n'),
                        tstar=None,
                        vas_c_length_m=None,
                        vas_c_area_m2=None,
                        **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model:
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    vas_c_length_m: float, optional, default=None
        Scaling constant for volume/length scaling
    vas_c_area_m2: float, optional, default=None
        Scaling constant for volume/area scaling
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'test_cluster'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # shutil.rmtree(wdir)
    # os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # change scaling constants for lenght and area
    if vas_c_length_m:
        cfg.PARAMS['vas_c_length_m'] = vas_c_length_m
    if vas_c_area_m2:
        cfg.PARAMS['vas_c_area_m2'] = vas_c_area_m2
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(vascaling.local_t_star,
                                 gdirs,
                                 tstar=tstar,
                                 bias=0)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each (per default).
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(vascaling.run_random_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)
    else:
        # run ConstantMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each (per default).
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(vascaling.run_constant_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)

    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all temperature biases/suffixes
    for suffix in suffixes:
        # compile the output for each run
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       input_filesuffix=suffix,
                                       path=False)
        # add to container
        ds.append(ds_)

    # concat the single output datasets into one,
    # with temperature bias as coordinate
    ds = xr.concat(ds, pd.Index(temp_biases, name='temp_bias'))
    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # compute mean and sum over all glaciers
    ds_mean = ds.mean(dim='rgi_id')
    ds_mean.coords['rgi_id'] = 'mean'
    ds_sum = ds.sum(dim='rgi_id')
    ds_sum.coords['rgi_id'] = 'sum'
    # add to dataset
    ds = xr.concat([ds, ds_mean, ds_sum], dim='rgi_id')

    # normalize glacier geometries (length/area/volume) with start value
    ds_normal = normalize_ds_with_start(ds)
    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = int(False)
    ds_normal.coords['normalized'] = int(True)

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            mb = 'random' if use_random_mb else 'constant'
            path = os.path.join(cfg.PATHS['working_dir'],
                                'run_output_{}_vas.nc'.format(mb))

        ds.to_netcdf(path)

    return ds
Exemplo n.º 8
0
# load default parameter file
cfg.initialize()
# set path to working directory
cfg.PATHS['working_dir'] = testdir
# load and set path to intersects
path = utils.get_rgi_intersects_region_file('11', version='6')
cfg.set_intersects_db(path)

# change some default parameters
cfg.PARAMS['border'] = 50
cfg.PARAMS['baseline_climate'] = 'CRU'
cfg.PARAMS['use_multiprocessing'] = True

# get RGI entity for Hintereisferner
rgi_id = 'RGI60-11.00897'
entity = utils.get_rgi_glacier_entities([rgi_id]).iloc[0]

# initialize the GlacierDirectory
gdir = oggm.GlacierDirectory(entity, base_dir=testdir)

# define the local grid and glacier mask
gis.define_glacier_region(gdir, entity=entity)
gis.glacier_masks(gdir)

# process the given climate file
climate.process_cru_data(gdir)

# run center line preprocessing tasks
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.catchment_area(gdir)
Exemplo n.º 9
0
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
                      output_folder='', working_dir='', is_test=False,
                      demo=False, test_rgidf=None, test_intersects_file=None,
                      test_topofile=None, test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Time
    start = time.time()

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    climate_periods = [1920, 1960, 2000]

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    rgi_dir_name = 'RGI{}'.format(rgi_version)
    border_dir_name = 'b_{:03d}'.format(border)
    base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DEMO_GLACIERS.index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(4)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L2 - Tasks
    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    workflow.execute_entity_task(tasks.process_cru_data, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L3 - Tasks
    task_list = [
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
                                     path=opath)

    # L3 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L4 - Tasks
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Copy mini data to new dir
    base_dir = os.path.join(base_dir, 'L4')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
                                              base_dir=base_dir)

    # L4 OK - compress all in output directory
    workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
    utils.base_dir_to_tar(base_dir)

    # Log
    m, s = divmod(time.time() - start, 60)
    h, m = divmod(m, 60)
    log.workflow('OGGM prepro_levels is done! Time needed: '
                 '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
Exemplo n.º 10
0
def glacier_to_table2(histalp_storage, comit_storage, tbiasdictpath,
                      outpath):
    pd.options.display.max_colwidth = 100
    pd.options.display.float_format = '{:.2f}'.format
    wd = utils.get_temp_dir()
    tbiasdict = pickle.load(open(tbiasdictpath, 'rb'))

    # Initialize OGGM
    cfg.initialize()
    cfg.PATHS['working_dir'] = wd
    utils.mkdir(wd, reset=True)
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # and set standard histalp values
    cfg.PARAMS['temp_melt'] = -1.75

    rgis = list(GLCDICT.keys())

    allmeta, allobs = get_length_observations(rgis)
    gdirs = preprocessing.configure(wd, rgis, resetwd=True)

    # get glacier
    ref_gdirs = [GlacierDirectory(refid) for
                 refid in preprocessing.ADDITIONAL_REFERENCE_GLACIERS]

    # get glaciers up and running
    compute_ref_t_stars(ref_gdirs + gdirs)
    task_list = [tasks.local_t_star,
                 tasks.mu_star_calibration,
                 tasks.prepare_for_inversion,
                 tasks.mass_conservation_inversion,
                 tasks.filter_inversion_output,
                 tasks.init_present_time_glacier
                 ]

    for task in task_list:
        execute_entity_task(task, gdirs)

    dfout = pd.DataFrame([])
    dfplot = pd.DataFrame([])
    for gdir in gdirs:

        rgi = gdir.rgi_id

        try:
            meta = allmeta.loc[rgi]
        except:
            continue

        if preprocessing.merge_pair_dict(rgi) is not None:
            rgi += '_merged'

        l2003 = gdir.read_pickle('model_flowlines')[-1].length_m
        dfout.loc[rgi, 'ly0'] = (l2003 - meta['dL2003'])/1000

        fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
        df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85,
                                   meta)
        ensmean = df85.mean(axis=1)
        prelength = ensmean.dropna().iloc[-30:].mean()
        dfout.loc[rgi, 'dl 1885'] = prelength

        fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
        df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99,
                                   meta)
        ensmean = df99.mean(axis=1)
        postlength = ensmean.dropna().iloc[-30:].mean()
        dfout.loc[rgi, 'dl 1999'] = postlength

        fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
        df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70,
                                   meta)
        ensmean = df70.mean(axis=1)
        y70length = ensmean.dropna().iloc[-30:].mean()
        dfout.loc[rgi, 'dl 1970'] = y70length

        dfplot.loc[rgi, 'dl 1885-1970'] = (prelength - y70length)/1000

        rndic = pickle.load(
            open(os.path.join(histalp_storage, 'runs_{}.p'.format(rgi)), 'rb'))

        f = gdir.get_filepath('climate_historical')
        with utils.ncDataset(f) as nc:
            time = nc.variables['time'][:]
            clim = pd.DataFrame([], index=pd.Timestamp(
                '1801-01-01') + pd.TimedeltaIndex(time, 'day'))
            clim['prcp'] = nc.variables['prcp'][:]
            clim['temp'] = nc.variables['temp'][:]

        tempjja = clim.loc[clim.index.month.isin([6, 7, 8]), 'temp']
        tempjja = tempjja.groupby(tempjja.index.year).mean()

        tempsom = clim.loc[clim.index.month.isin([5, 6, 7, 8, 9]), 'temp']
        tempsom = tempsom.groupby(tempsom.index.year).mean()

        prcpjja = clim.loc[clim.index.month.isin([6, 7, 8]), 'prcp']
        prcpjja = prcpjja.groupby(prcpjja.index.year).sum()

        prcpsom = clim.loc[clim.index.month.isin([5, 6, 7, 8, 9]), 'prcp']
        prcpsom = prcpsom.groupby(prcpsom.index.year).sum()

        prcpdjf = clim.loc[clim.index.month.isin([1, 2, 12]), 'prcp']
        prcpdjf.index = prcpdjf.index + pd.offsets.MonthBegin(1)
        prcpdjf = prcpdjf.groupby(prcpdjf.index.year).sum()

        tempdjf = clim.loc[clim.index.month.isin([1, 2, 12]), 'temp']
        tempdjf.index = tempdjf.index + pd.offsets.MonthBegin(1)
        tempdjf = tempdjf.groupby(tempdjf.index.year).mean()

        prcpwin = clim.loc[
            clim.index.month.isin([1, 2, 12, 11, 10, 3, 4]), 'prcp']
        prcpwin.index = prcpwin.index + pd.offsets.MonthBegin(3)
        prcpwin = prcpwin.groupby(prcpwin.index.year).sum()

        tempwin = clim.loc[
            clim.index.month.isin([1, 2, 12, 11, 10, 3, 4]), 'temp']
        tempwin.index = tempwin.index + pd.offsets.MonthBegin(3)
        tempwin = tempwin.groupby(tempwin.index.year).mean()

        # reference: 1984-2014
        tjjaref = tempjja.loc[1984:2014].mean()
        tsomref = tempsom.loc[1984:2014].mean()
        twinref = tempwin.loc[1984:2014].mean()
        tdjfref = tempdjf.loc[1984:2014].mean()

        pdjfref = prcpdjf.loc[1984:2014].mean()
        pwinref = prcpwin.loc[1984:2014].mean()
        psomref = prcpsom.loc[1984:2014].mean()
        pjjaref = prcpjja.loc[1984:2014].mean()

        # preindust
        tjjapre = tempjja.loc[1870:1900].mean() - tjjaref
        tsompre = tempsom.loc[1870:1900].mean() - tsomref
        twinpre = tempwin.loc[1870:1900].mean() - twinref
        tdjfpre = tempdjf.loc[1870:1900].mean() - tdjfref

        pdjfpre = prcpdjf.loc[1870:1900].mean() - pdjfref
        pwinpre = prcpwin.loc[1870:1900].mean() - pwinref
        psompre = prcpsom.loc[1870:1900].mean() - psomref
        pjjapre = prcpjja.loc[1870:1900].mean() - pjjaref

        # 1960-1980
        tjja60 = tempjja.loc[1960:1980].mean() - tjjaref
        tsom60 = tempsom.loc[1960:1980].mean() - tsomref
        twin60 = tempwin.loc[1960:1980].mean() - twinref
        tdjf60 = tempdjf.loc[1960:1980].mean() - tdjfref

        pdjf60 = prcpdjf.loc[1960:1980].mean() - pdjfref
        pwin60 = prcpwin.loc[1960:1980].mean() - pwinref
        psom60 = prcpsom.loc[1960:1980].mean() - psomref
        pjja60 = prcpjja.loc[1960:1980].mean() - pjjaref

        # dfout.loc[rgi, 'T JJA 1984-2014'] = tref

        dfout.loc[rgi, 'tjja pre'] = tjjapre
        dfout.loc[rgi, 'tjja 70'] = tjja60
        dfout.loc[rgi, 'pdjf pre'] = pdjfpre
        dfout.loc[rgi, 'pdjf 70'] = pdjf60

        dfplot.loc[rgi, 'dt jja'] = tjjapre - tjja60
        dfplot.loc[rgi, 'dt som'] = tsompre - tsom60
        dfplot.loc[rgi, 'dt win'] = twinpre - twin60
        dfplot.loc[rgi, 'dt djf'] = tdjfpre - tdjf60

        dfplot.loc[rgi, 'dp djf'] = pdjfpre - pdjf60
        dfplot.loc[rgi, 'dp win'] = pwinpre - pwin60
        dfplot.loc[rgi, 'dp som'] = psompre - psom60
        dfplot.loc[rgi, 'dp jja'] = pjjapre - pjja60

        dfout.loc[rgi, 'dt spinup'] = tbiasdict[rgi].loc[
            rndic['ensemble']].mean()

        # dfout.loc[rgi, 'dT spin min'] = tbiasdict[rgi].loc[rndic['ensemble']].min()
        # dfout.loc[rgi, 'dT spin max'] = tbiasdict[rgi].loc[rndic['ensemble']].max(

        mu = pd.DataFrame()
        ela85 = pd.DataFrame()
        ela70 = pd.DataFrame()
        ela99 = pd.DataFrame()
        for i in np.arange(99):
            rgipath = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
                                   rgi[:8], rgi[:11], rgi)

            if 'merged' in rgi:
                fname = 'local_mustar_{}.json'.format(rgi.split('_')[0])
            else:
                fname = 'local_mustar.json'

            fp = os.path.join(rgipath, fname)
            try:
                with open(fp, 'r') as f:
                    out = json.load(f)

                # ela
                com85 = xr.open_dataset(
                    os.path.join(
                        comit_storage, rgi,
                        'model_diagnostics_commitment1885_{:02d}.nc'.format(
                            i)))
                com70 = xr.open_dataset(
                    os.path.join(
                        comit_storage, rgi,
                        'model_diagnostics_commitment1970_{:02d}.nc'.format(
                            i)))
                com99 = xr.open_dataset(
                    os.path.join(
                        comit_storage, rgi,
                        'model_diagnostics_commitment1999_{:02d}.nc'.format(
                            i)))

            except FileNotFoundError:
                break

            mu.loc[i, 'mu'] = out['mu_star_flowline_avg']
            ela85.loc[:, i] = com85.to_dataframe()['ela_m']
            ela70.loc[:, i] = com70.to_dataframe()['ela_m']
            ela99.loc[:, i] = com99.to_dataframe()['ela_m']

        dfout.loc[rgi, 'mu mean'] = mu['mu'].mean()
        dfout.loc[rgi, 'ELA 1885'] = int(ela85.mean(axis=1).iloc[-30:].mean())
        dfout.loc[rgi, 'ELA 1970'] = int(ela70.mean(axis=1).iloc[-30:].mean())
        dfout.loc[rgi, 'ELA 1999'] = int(ela99.mean(axis=1).iloc[-30:].mean())

    rgidf = utils.get_rgi_glacier_entities(rgis)
    for rgi, _ in dfout.iterrows():
        rgiid = rgi.split('_')[0]
        name = GLCDICT[rgiid][2].split('(')[0]
        dfout.loc[rgi, 'name'] = name
        dfout.loc[rgi, 'lon'] = rgidf.loc[rgidf.RGIId == rgiid,
                                          'CenLon'].iloc[0]
        dfplot.loc[rgi, 'lon'] = rgidf.loc[rgidf.RGIId == rgiid,
                                           'CenLon'].iloc[0]

    # plot climate vs lengthchange
    from relic.graphics import climate_vs_lengthchange
    climate_vs_lengthchange(dfplot, outpath)

    df = dfout.sort_values('lon')
    df.index = np.arange(1, 31)
    # ---------------
    # formaters

    def _str(x):
        return str(x)

    def _float2(x):
        return '{:.2f}'.format(x)

    def _f2int(x):
        return '{:.0f}'.format(x)

    def _(x):
        return x

    # generate table
    titl = ('$\\Delta l_{1885 (1970, 1999)}$ are the ensemble mean length '
            'changes with respect to the length $l_{y0}$ of the first '
            'observed year (see Tbl.~\\ref{tbl:glaciers}) under a '
            'randomized climate around the years 1870-1900, 1960-1980 and '
            '1984-2014 respectively.'
            '$\\Delta T^{JJA}_{ref-1885 (1970)}$ are the temperature '
            'difference between these periods and a reference period defined '
            'as 1984-2014 as derived from the HISTALP climate data. '
            '$\\Delta T_{spinup}$ is the ensemble mean temperature bias which '
            'was applied '
            'on top of the constant mean climate of the reference period to '
            ' grow the glacier to their post-LIA size. '
            'And $\\overline{ELA_{1885 (1970, 1999)}}$ are the ensemble mean '
            ' equilibrium line altitudes of the randomized climate '
            'simulations around the respective years.'
            )
    tbl = df.to_latex(na_rep='--', index=True, longtable=False,
                      column_format='r' + 'p{18mm}' + 12*'r',
                      columns=['name', 'ly0',
                               'dl 1885', 'dl 1970', 'dl 1999',
                               'tjja pre', 'tjja 70',
                               'pdjf pre', 'pdjf 70',
                               'dt spinup',
                               'ELA 1885', 'ELA 1970', 'ELA 1999'],
                      formatters=[_str, _float2,
                                  _f2int, _f2int, _f2int,
                                  _float2, _float2,
                                  _f2int, _f2int,
                                  _float2,
                                  _f2int, _f2int,
                                  _f2int] + [_ for i in range(2)],
                      caption=titl,
                      label='tbl:climate')

    # fontsize
    tbl = tbl.replace('\n', '\n\\scriptsize\n', 1)

    tbl = tbl.replace('Unterer Grindelwaldgletscher', 'U. Grindelwaldgl.')
    tbl = tbl.replace('Oberer Grindelwaldgletscher', 'O. Grindelwaldgl.')
    tbl = tbl.replace('Großer Aletschgletscher with Mittelaletschgletscher',
                      'Aletschgl.')
    tbl = tbl.replace('Vadret da Morteratsch', 'Morteratsch')
    tbl = tbl.replace('Vadret da Tschierva with Vadret da Roseg', 'Tschierva')
    tbl = tbl.replace('Glacier du Mont Mine with Glacier de Ferpecle',
                      'Mont Mine')
    tbl = tbl.replace('Mer de Glace with Glacier de Leschaux', 'Mer de Glace')
    tbl = tbl.replace('Ghiacciaio dei Forni', 'Forni')
    tbl = tbl.replace('Vadrec del Forno', 'Forno')
    tbl = tbl.replace('Glacier de ', '')
    tbl = tbl.replace('Glacier des', '')
    tbl = tbl.replace('(with tributary)', '')
    tbl = tbl.replace('(with tributaries)', '')

    tbl = tbl.replace('Glacier', 'Gl.')
    tbl = tbl.replace('gletscher', 'gl.')
    tbl = tbl.replace('dT (spinup-jjaref)', 'spinjja')
    tbl = tbl.replace('dT (spinup-somref)', 'spinsom')

    tbl = tbl.replace('{table}', '{sidewaystable}')

    tbl = tbl.replace('dl 1885', '$\\Delta l_{1885}$')
    tbl = tbl.replace('dl 1970', '$\\Delta l_{1970}$')
    tbl = tbl.replace('dl 1999', '$\\Delta l_{1999}$')
    tbl = tbl.replace('tjja pre', '$\\Delta T^{JJA}_{ref-1885}$')
    tbl = tbl.replace('tjja 70', '$\\Delta T^{JJA}_{T_{ref-1970}}$')
    tbl = tbl.replace('pdjf pre', '$\\Delta P^{DJF}_{ref-1885}$')
    tbl = tbl.replace('pdjf 70', '$\\Delta P^{DJF}_{T_{ref-1970}}$')
    tbl = tbl.replace('dt spinup', '$\\Delta T_{spinup}$')
    tbl = tbl.replace('ELA 1885', '$\\overline{ELA_{1885}}$')
    tbl = tbl.replace('ELA 1970', '$\\overline{ELA_{1970}}$')
    tbl = tbl.replace('ELA 1999', '$\\overline{ELA_{1999}}$')

    # add line with units
    tbl = tbl.replace('\\midrule',
                      ('& & [km$^2$] &[m] & [m] & [m] & $[^\\circ C]$ & '
                       '$[^\\circ C]$ & $[^\\circ C]$ & '
                       '[mm] & [mm] & '
                       '[m] & [m] & [m] '
                       '\\\\\n\\midrule'))

    with open(os.path.join(outpath, 'table2.tex'), 'w') as tf:
        tf.write(tbl)
Exemplo n.º 11
0
def glacier_to_table1(outpath):
    pd.options.display.max_colwidth = 100
    pd.options.display.float_format = '{:.2f}'.format
    df = pd.DataFrame([], index=GLCDICT.keys())

    poldict = {'Switzerland': 'CH',
               'Austria': 'AT',
               'Italy': 'IT',
               'France': 'FR'}

    rgidf = utils.get_rgi_glacier_entities(df.index)
    meta, _ = get_length_observations(df.index)

    for rgi, _ in df.iterrows():
        df.loc[rgi, 'name'] = GLCDICT[rgi][2]
        df.loc[rgi, 'country'] = poldict[GLCDICT[rgi][3]]
        df.loc[rgi, 'lon/lat'] = '{:.2f}/{:.2f}'.\
            format(rgidf.loc[rgidf.RGIId == rgi, 'CenLon'].iloc[0],
                   rgidf.loc[rgidf.RGIId == rgi, 'CenLat'].iloc[0])
        df.loc[rgi, 'lon'] = rgidf.loc[rgidf.RGIId == rgi, 'CenLon'].iloc[0]

        area = rgidf.loc[rgidf.RGIId == rgi, 'Area'].iloc[0]

        df.loc[rgi, 'RGI id'] = rgi
        if MERGEDICT.get(rgi):
            df.loc[rgi, 'RGI id'] = '{}, {}'.\
                format(rgi,
                       str(MERGEDICT[rgi][0]).strip('[]').replace("'", ''))

            tribs = MERGEDICT[rgi][0]
            tribdf = utils.get_rgi_glacier_entities(tribs)
            for trib in tribs:
                area += tribdf.loc[tribdf.RGIId == trib, 'Area'].iloc[0]

        df.loc[rgi, 'areakm2'] = '{:.1f}'.format(area)
        df.loc[rgi, '1.obs'] = '{:.0f}'.format(meta.loc[rgi, 'first'])
        df.loc[rgi, '#obs'] = '{:.0f}'.format(meta.loc[rgi, 'measurements'])

    df = df.sort_values('lon')

    df.index = np.arange(1, 31)

    # ---------------
    titl = ('All glaciers used for this study. Index corresponds '
            'to location in Figure~\\ref{fig:map} and is also indicated in '
            'the title of individual plots. '
            'Multiple RGI ids indicate that the glacier is merged with one or '
            'more tributary glaciers. '
            'For merged glaciers \\emph{area} then also includes the '
            'tributaries. '
            '\\emph{1.obs} refers to the first observation after 1850 and'
            ' the number of observations \\emph{\\#obs} is counted until '
            '2020.'
            )
    # generate table
    tbl = df.to_latex(na_rep='--', index=True, longtable=False,
                      columns=['name', 'country', 'RGI id', 'lon/lat',
                               'areakm2', '1.obs', '#obs'],
                      column_format='r' + 'p{35mm}' + 'l' + 'p{45mm}' + 4*'r',
                      caption=titl,
                      label='tbl:glaciers')

    # set fontsize to footnotesize
    tbl = tbl.replace('\n', '\n\\footnotesize\n', 1)

    # and sone manual line breaks
    tbl = tbl.replace('with Glacier', '\\newline with Glacier')
    tbl = tbl.replace('with Mittel', '\\newline with Mittel')
    tbl = tbl.replace('with Vadret', '\\newline with Vadret')
    tbl = tbl.replace('Obersulzbachkees', 'Obersulzbachkees\\newline')
    tbl = tbl.replace('RGI60-11.00168', '\\newline RGI60-11.00168')
    tbl = tbl.replace('RGI60-11.00213', '\\newline RGI60-11.00213')
    tbl = tbl.replace('country', '')
    tbl = tbl.replace('name', 'name and country')
    tbl = tbl.replace('areakm2', 'area [km$^2$]')
    with open(os.path.join(outpath, 'table1.tex'), 'w') as tf:
        tf.write(tbl)
Exemplo n.º 12
0
def compute_scaling_params(rgi_ids, path=None):
    """ The routine computes scaling parameters by fitting a linear regression
    to the volume/area and volume/length scatter in log-log space, using the
    inversion volume, the RGI area and the longest centerline as "observations"
    Thereby, the following two cases apply:
    - compute only scaling constants, since scaling exponents have a physical
        basis and should not be changed
    - compute only scaling constants and scaling exponents

    Returns parameters in a 2-level dictionary. The upper level differentiates
    between the two cases, the lower level indicates the parameters.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.

    Returns
    -------
    Dictionary containing the computed parameters.

    """
    log.info('Starting scaling parameter computation')

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:-1]

    # load default parameter file
    vascaling.initialize()

    # get environmental variables for working and output directories
    WORKING_DIR = os.environ["WORKDIR"]
    OUTPUT_DIR = os.environ["OUTDIR"]
    # create working directory
    utils.mkdir(WORKING_DIR)
    utils.mkdir(OUTPUT_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # set minimum ice thickness to include in glacier length computation
    # this reduces weird spikes in length records
    cfg.PARAMS['min_ice_thick_for_length'] = 0.1

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = True

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(climate.local_t_star, gdirs)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # create empty dictionary
    params = dict()

    # compute scaling constants for given (fixed) slope
    params['const_only'] = vascaling.get_scaling_constant(gdirs)

    # compute scaling constants and scaling exponent via linear regression
    params['const_expo'] = vascaling.get_scaling_constant_exponent(gdirs)

    # store to file
    if path:
        if not isinstance(path, str):
            # set default path and filename
            path = os.path.join(OUTPUT_DIR, 'scaling_params.json')
        json.dump(params, open(path, 'w'))

    return params
Exemplo n.º 13
0
def sensitivity_run_vas(rgi_ids,
                        use_random_mb=False,
                        use_mean=False,
                        path=True,
                        temp_bias=0,
                        tstar=None,
                        use_default_tstar=True,
                        use_bias_for_run=True,
                        scaling_params=[(4.5507, 0.191, 2.2, 1.375)],
                        time_scale_factors=[1],
                        suffixes=['_default'],
                        **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model (cf. `equilibrium_run_vas`) but for only one
    given temperature bias. However, it is possible to supply a list of
    sensitivity parameters (the scaling constants, and time scale factor) to
    alter the model behavior.
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    use_mean: bool, optional, default=False
        Choose between the mean or summation over all glaciers
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_bias: float, optional, default=0
        Temperature bias (degC) for the mass balance model.
    sensitivity_params: multi-dimensional array-like, optional,
        default=[(4.5507, 0.191, 2.2, 1.375)]
        List containing the scaling constants and scaling exponents for length
        and area scaling as tuples, e.g., (c_l, c_a, q, gamma)
    suffixes: array-like, optional, default=['_default']
        Descriptive suffixes corresponding to the given sensitivity params
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    use_default_tstar: bool, optional, default=True
        Flag deciding whether or not to compute mustar from given from reference
        table. Overridden by a given tstar.
    use_bias_for_run: bool, optional, default=True
        Flag deciding whether or not to use the mass balance residual.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(scaling_params) * len(time_scale_factors) != len(suffixes):
        raise RuntimeError("Each given combination of scaling parameters and "
                           "time scale factor must have its corresponding"
                           "suffix.")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:-1]

    # load default parameter file
    vascaling.initialize()

    # get environmental variables for working and output directories
    WORKING_DIR = os.environ["WORKDIR"]
    OUTPUT_DIR = os.environ["OUTDIR"]
    # create working directory
    utils.mkdir(WORKING_DIR)
    utils.mkdir(OUTPUT_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 2.5
    cfg.PARAMS['temp_melt'] = -0.5
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run
    # set minimum ice thickness to include in glacier length computation
    # this reduces weird spikes in length records
    cfg.PARAMS['min_ice_thick_for_length'] = 0.1

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    if tstar or use_default_tstar:
        # compute mustar from given tstar or reference table
        workflow.execute_entity_task(vascaling.local_t_star,
                                     gdirs,
                                     tstar=tstar,
                                     bias=0)
    else:
        # compute mustar from the reference table for the flowline model
        # RGI v6 and HISTALP baseline climate
        ref_df = pd.read_csv(
            utils.get_demo_file('oggm_ref_tstars_rgi6_histalp.csv'))
        workflow.execute_entity_task(vascaling.local_t_star,
                                     gdirs,
                                     ref_df=ref_df)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 1000)

    # limit parameters to 3 decimal points
    scaling_params = recursive_round(scaling_params, 3)
    time_scale_factors = recursive_round(time_scale_factors, 3)
    # assure that scaling params are handled as tuples (pairs)
    scaling_params_list = np.zeros(len(scaling_params), dtype=object)
    scaling_params_list[:] = scaling_params
    # combine scaling constants, scaling exponents and time scale factor
    # into one iterable array
    sensitivity_params = np.array(
        np.meshgrid(scaling_params_list, time_scale_factors)).T
    sensitivity_params = (sensitivity_params.reshape(
        -1, sensitivity_params.shape[-1]))

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0][0]
            cfg.PARAMS['vas_c_area_m2'] = params[0][1]
            cfg.PARAMS['vas_q_length'] = params[0][2]
            cfg.PARAMS['vas_gamma_area'] = params[0][3]
            kwargs['time_scale_factor'] = params[1]
            workflow.execute_entity_task(vascaling.run_random_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)
    else:
        # run ConstantMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0][0]
            cfg.PARAMS['vas_c_area_m2'] = params[0][1]
            cfg.PARAMS['vas_q_length'] = params[0][2]
            cfg.PARAMS['vas_gamma_area'] = params[0][3]
            kwargs['time_scale_factor'] = params[1]
            workflow.execute_entity_task(vascaling.run_constant_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)
    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all scaling constants
    for i, params in enumerate(scaling_params):
        # create empty container
        ds_ = list()
        # iterate over all time scale factor
        for j, factor in enumerate(time_scale_factors):
            # compile the output for each run
            pos = j + len(time_scale_factors) * i
            ds__ = utils.compile_run_output(np.atleast_1d(gdirs),
                                            filesuffix=suffixes[pos],
                                            path=False)
            # add time scale factor as coordinate
            ds__.coords['time_scale_factor'] = factor
            # add to container
            ds_.append(ds__)

        # concatenate using time scale factor as concat dimension
        ds_ = xr.concat(ds_, dim='time_scale_factor')
        # add scaling constants as coordinate
        params_list = np.zeros(len([params]), dtype=object)
        params_list[:] = [params]
        ds_ = ds_.expand_dims(dim={'scaling_params': params_list})
        # add to container
        ds.append(ds_)

    # concatenate using scaling constants as concat dimension
    ds = xr.concat(ds, dim='scaling_params')

    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # normalize glacier geometries (length/area/volume) with start value
    if use_mean:
        # compute average over all glaciers
        ds_normal = normalize_ds_with_start(ds).mean(dim='rgi_id')
        ds = ds.mean(dim='rgi_id')
    else:
        # compute sum over all glaciers
        ds_normal = normalize_ds_with_start(ds.sum(dim='rgi_id'))
        ds = ds.sum(dim='rgi_id')

    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = False
    ds_normal.coords['normalized'] = True

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if not isinstance(path, str):
            # set default path and filename
            mb = 'random' if use_random_mb else 'constant'
            path = os.path.join(OUTPUT_DIR, f'run_output_{mb}_vas.nc')
        # write to file
        log.info(f'Writing run output to {path}')
        pickle.dump(ds, open(path, mode='wb'))

    # return ds, ds_normal
    return ds
Exemplo n.º 14
0
def mb_calibration(rgi_version, baseline):
    """ Run the mass balance calibration for the VAS model. RGI version and
    baseline cliamte must be given.

    :param rgi_version: int, RGI version
    :param baseline: str, baseline climate 'HISTALP' or 'CRU'
    """

    # initialize OGGM and set up the run parameters
    vascaling.initialize(logging_level='WORKFLOW')

    # local paths (where to write the OGGM run output)
    # dirname = 'VAS_ref_mb_{}_RGIV{}'.format(baseline, rgi_version)
    # wdir = utils.gettempdir(dirname, home=True, reset=True)
    # utils.mkdir(wdir, reset=True)
    wdir = os.environ['WORKDIR']
    cfg.PATHS['working_dir'] = wdir

    # we are running the calibration ourselves
    cfg.PARAMS['run_mb_calibration'] = True
    # we are using which baseline data?
    cfg.PARAMS['baseline_climate'] = baseline
    # no need for intersects since this has an effect on the inversion only
    cfg.PARAMS['use_intersects'] = False
    # use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True
    # set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    if baseline == 'HISTALP':
        # other params: see https://oggm.org/2018/08/10/histalp-parameters/
        # cfg.PARAMS['prcp_scaling_factor'] = 1.75
        # cfg.PARAMS['temp_melt'] = -1.75
        cfg.PARAMS['prcp_scaling_factor'] = 2.5
        cfg.PARAMS['temp_melt'] = -0.5
    elif baseline == 'CRU':
        # using the parameters from Marzeion et al. (2012)
        cfg.PARAMS['prcp_scaling_factor'] = 2.5
        cfg.PARAMS['temp_melt'] = 1
        cfg.PARAMS['temp_all_solid'] = 3

    # the next step is to get all the reference glaciers,
    # i.e. glaciers with mass balance measurements.

    # get the reference glacier ids (they are different for each RGI version)
    rgi_dir = utils.get_rgi_dir(version=rgi_version)
    df, _ = utils.get_wgms_files()
    rids = df['RGI{}0_ID'.format(rgi_version[0])]

    # we can't do Antarctica
    rids = [rid for rid in rids if not ('-19.' in rid)]

    # For HISTALP only RGI reg 11.01 (ALPS)
    if baseline == 'HISTALP' or True:
        rids = [rid for rid in rids if '-11' in rid]

    debug = False
    if debug:
        print("==================================\n"
              + "DEBUG MODE: only RGI60-11.00897\n"
              + "==================================")
        rids = [rid for rid in rids if '-11.00897' in rid]
        cfg.PARAMS['use_multiprocessing'] = False

    # make a new dataframe with those (this takes a while)
    print('Reading the RGI shapefiles...')
    rgidf = utils.get_rgi_glacier_entities(rids, version=rgi_version)
    print('For RGIV{} we have {} candidate reference '
          'glaciers.'.format(rgi_version, len(rgidf)))

    # initialize the glacier regions
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)

    # we need to know which period we have data for
    print('Process the climate data...')
    if baseline == 'CRU':
        execute_entity_task(tasks.process_cru_data, gdirs, print_log=False)
    elif baseline == 'HISTALP':
        # Some glaciers are not in Alps
        gdirs = [gdir for gdir in gdirs if gdir.rgi_subregion == '11-01']
        # cfg.PARAMS['continue_on_error'] = True
        execute_entity_task(tasks.process_histalp_data, gdirs, print_log=False,
                            y0=1850)
        # cfg.PARAMS['continue_on_error'] = False
    else:
        execute_entity_task(tasks.process_custom_climate_data,
                            gdirs, print_log=False)

    # get reference glaciers with mass balance measurements
    gdirs = utils.get_ref_mb_glaciers(gdirs)

    # keep only these glaciers
    rgidf = rgidf.loc[rgidf.RGIId.isin([g.rgi_id for g in gdirs])]

    # save to file
    rgidf.to_file(os.path.join(wdir, 'mb_ref_glaciers.shp'))
    print('For RGIV{} and {} we have {} reference glaciers'.format(rgi_version,
                                                                   baseline,
                                                                   len(rgidf)))

    # sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    # newly initialize glacier directories
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)

    # run climate tasks
    vascaling.compute_ref_t_stars(gdirs)
    execute_entity_task(vascaling.local_t_star, gdirs)

    # we store the associated params
    mb_calib = gdirs[0].read_pickle('climate_info')['mb_calib_params']
    with open(os.path.join(wdir, 'mb_calib_params.json'), 'w') as fp:
        json.dump(mb_calib, fp)
Exemplo n.º 15
0
t_star, bias = res['t_star'], res['bias']

# --------------------
#  SCALING MODEL
# --------------------

# compute local t* and the corresponding mu*
vascaling.local_t_star(gdir, tstar=t_star, bias=bias)

# instance the mass balance models
ben_mbmod = vascaling.VAScalingMassBalance(gdir)

# get reference area
a0 = gdir.rgi_area_m2
# get reference year
rgi_df = get_rgi_glacier_entities([gdir.rgi_id])
y0 = int(rgi_df.BgnDate.values[0][:4])
# get min and max glacier surface elevation
h0, h1 = vascaling.get_min_max_elevation(gdir)

# set up the glacier model with the values of 2003
hef_ref = vascaling.VAScalingModel(year_0=y0,
                                   area_m2_0=a0,
                                   min_hgt=h0,
                                   max_hgt=h1,
                                   mb_model=ben_mbmod)


def to_minimize(area_m2_start, model_ref):
    """ Initialize VAS glacier model as copy of the reference model (model_ref)
    and adjust the model to the given starting area (area_m2_start) and
Exemplo n.º 16
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      is_test=False,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Time
    start = time.time()

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    climate_periods = [1920, 1960, 2000]

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    rgi_dir_name = 'RGI{}'.format(rgi_version)
    border_dir_name = 'b_{:03d}'.format(border)
    base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(4)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L2 - Tasks
    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    workflow.execute_entity_task(tasks.process_cru_data, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L3 - Tasks
    task_list = [
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=climate_periods,
                                     path=opath)

    # L3 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L4 - Tasks
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Copy mini data to new dir
    base_dir = os.path.join(base_dir, 'L4')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=base_dir)

    # L4 OK - compress all in output directory
    workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
    utils.base_dir_to_tar(base_dir)

    # Log
    m, s = divmod(time.time() - start, 60)
    h, m = divmod(m, 60)
    log.workflow('OGGM prepro_levels is done! Time needed: '
                 '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
Exemplo n.º 17
0
def sensitivity_run_vas_old(rgi_ids, use_random_mb=False, use_mean=True,
                            path=True, temp_bias=0, tstar=None,
                            sensitivity_params=[[(4.5507, 0.191), 1]], suffixes=[''],
                            **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model (cf. `equilibrium_run_vas`) but for only one
    given temperature bias. However, it is possible to supply a list of
    sensitivity parameters (the scaling constants, and time scale factor) to
    alter the model behavior.
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    use_mean: bool, optional, default=True
        Choose between the mean or summation over all glaciers
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_bias: float, optional, default=0
        Temperature bias (degC) for the mass balance model.
    sensitivity_params: multi-dimensional array-like, optional,
        default=[[(4.5507, 0.191), 1]]
        list containing the parameters which are to be varied in the following
        order: float tuple with length and area scaling constant, float as time
        scale factor
    suffixes: array-like, optional, default=['']
        Descriptive suffixes corresponding to the given sensitivity params
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(sensitivity_params) != len(suffixes):
        raise RuntimeError("Each given parameter set must have its "
                           "corresponding suffix")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'sensitivity_vas_wdir'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 80
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = False

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_regions(rgidf)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_histalp_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(vascaling.local_t_star, gdirs,
                                 tstar=tstar, bias=0)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0]
            cfg.PARAMS['vas_c_area_m2'] = params[1]
            kwargs['time_scale_factor'] = params[2]
            workflow.execute_entity_task(vascaling.run_random_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    else:
        # run ConstantMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0][0]
            cfg.PARAMS['vas_c_area_m2'] = params[0][1]
            kwargs['time_scale_factor'] = params[1]
            workflow.execute_entity_task(vascaling.run_constant_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all temperature biases/suffixes
    for suffix, params in zip(suffixes, sensitivity_params):
        # compile the output for each run
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       filesuffix=suffix, path=False)
        # add sensitivity parameters as coordinates
        ds_.coords['length_scaling_const'] = params[0][0]
        ds_.coords['area_scaling_const'] = params[0][1]
        ds_.coords['time_scale_factor'] = params[1]
        # add to container
        ds.append(ds_)

    # concat the single output datasets into one, using 'sensitivity_params'
    # as name fot the new concatenate dimension
    ds = xr.combine_nested(ds, )
    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # normalize glacier geometries (length/area/volume) with start value
    if use_mean:
        # compute average over all glaciers
        ds_normal = normalize_ds_with_start(ds).mean(dim='rgi_id')
        ds = ds.mean(dim='rgi_id')
    else:
        # compute sum over all glaciers
        ds_normal = normalize_ds_with_start(ds.sum(dim='rgi_id'))
        ds = ds.sum(dim='rgi_id')

    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = False
    ds_normal.coords['normalized'] = True

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            path = list()
            mb = 'random' if use_random_mb else 'constant'
            path.append(os.path.join(cfg.PATHS['working_dir'],
                                     'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'normalized_output_{}_vas.nc'.format(mb)))
        ds.to_netcdf(path)
        # ds_normal.to_netcdf(path[1])

    # return ds, ds_normal
    return ds
Exemplo n.º 18
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      test_ids=None,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      disable_mp=False,
                      params_file=None,
                      elev_bands=False,
                      match_geodetic_mb=False,
                      centerlines_only=False,
                      add_consensus=False,
                      max_level=5,
                      logging_level='WORKFLOW',
                      disable_dl_verify=False):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    params_file : str
        path to the OGGM parameter file (to override defaults)
    is_test : bool
        to test on a couple of glaciers only!
    test_ids : list
        if is_test: list of ids to process
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    elev_bands : bool
        compute all flowlines based on the Huss&Hock 2015 method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    centerlines_only : bool
        compute all flowlines based on the OGGM centerline(s) method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    match_geodetic_mb : bool
        match the regional mass-balance estimates at the regional level
        (currently Hugonnet et al., 2020).
    add_consensus : bool
        adds (reprojects) the consensus estimates thickness to the glacier
        directories. With elev_bands=True, the data will also be binned.
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    disable_dl_verify : bool
        disable the hash verification of OGGM downloads
    """

    # TODO: temporarily silence Fiona and other deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Input check
    if max_level not in [1, 2, 3, 4, 5]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4, 5]')

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Config Override Params
    params = {}

    # Local paths
    utils.mkdir(working_dir)
    params['working_dir'] = working_dir

    # Initialize OGGM and set up the run parameters
    cfg.initialize(file=params_file,
                   params=params,
                   logging_level=logging_level,
                   future=True)

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # Check for the integrity of the files OGGM downloads at run time
    # For large files (e.g. using a 1 tif DEM like ALASKA) calculating the hash
    # takes a long time, so deactivating this can make sense
    cfg.PARAMS['dl_verify'] = not disable_dl_verify

    # Log the parameters
    msg = '# OGGM Run parameters:'
    for k, v in cfg.PARAMS.items():
        if type(v) in [pd.DataFrame, dict]:
            continue
        msg += '\n    {}: {}'.format(k, v)
    log.workflow(msg)

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    output_base_dir = os.path.join(output_folder, 'RGI{}'.format(rgi_version),
                                   'b_{:03d}'.format(border))

    # Add a package version file
    utils.mkdir(output_base_dir)
    opath = os.path.join(output_base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)

        # Some RGI input quality checks - this is based on visual checks
        # of large glaciers in the RGI
        ids_to_ice_cap = [
            'RGI60-05.10315',  # huge Greenland ice cap
            'RGI60-03.01466',  # strange thing next to Devon
            'RGI60-09.00918',  # Academy of sciences Ice cap
            'RGI60-09.00969',
            'RGI60-09.00958',
            'RGI60-09.00957',
        ]
        rgidf.loc[rgidf.RGIId.isin(ids_to_ice_cap), 'Form'] = '1'

        # In AA almost all large ice bodies are actually ice caps
        if rgi_reg == '19':
            rgidf.loc[rgidf.Area > 100, 'Form'] = '1'

        # For greenland we omit connectivity level 2
        if rgi_reg == '05':
            rgidf = rgidf.loc[rgidf['Connect'] != 2]
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        if test_ids is not None:
            rgidf = rgidf.loc[rgidf.RGIId.isin(test_ids)]
        else:
            rgidf = rgidf.sample(4)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # L0 - go
    gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L0', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L0 OK - compress all in output directory
    log.workflow('L0 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L0')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 0:
        _time_log()
        return

    # L1 - Add dem files
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Which DEM source?
    if dem_source.upper() == 'ALL':
        # This is the complex one, just do the job and leave
        log.workflow('Running prepro on ALL sources')
        for i, s in enumerate(utils.DEM_SOURCES):
            rs = i == 0
            log.workflow('Running prepro on sources: {}'.format(s))
            gdirs = workflow.init_glacier_directories(rgidf,
                                                      reset=rs,
                                                      force=rs)
            workflow.execute_entity_task(tasks.define_glacier_region,
                                         gdirs,
                                         source=s)
            workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)

        # make a GeoTiff mask of the glacier, choose any source
        workflow.execute_entity_task(gis.rasterio_glacier_mask,
                                     gdirs,
                                     source='ALL')

        # Compress all in output directory
        level_base_dir = os.path.join(output_base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)

        _time_log()
        return

    # Force a given source
    source = dem_source.upper() if dem_source else None

    # L1 - go
    workflow.execute_entity_task(tasks.define_glacier_region,
                                 gdirs,
                                 source=source)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    log.workflow('L1 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 1:
        _time_log()
        return

    # L2 - Tasks
    # Check which glaciers will be processed as what
    if elev_bands:
        gdirs_band = gdirs
        gdirs_cent = []
    elif centerlines_only:
        gdirs_band = []
        gdirs_cent = gdirs
    else:
        # Default is to mix
        # Curated list of large (> 50 km2) glaciers that don't run
        # (CFL error) mostly because the centerlines are crap
        # This is a really temporary fix until we have some better
        # solution here
        ids_to_bands = [
            'RGI60-01.13696', 'RGI60-03.01710', 'RGI60-01.13635',
            'RGI60-01.14443', 'RGI60-03.01678', 'RGI60-03.03274',
            'RGI60-01.17566', 'RGI60-03.02849', 'RGI60-01.16201',
            'RGI60-01.14683', 'RGI60-07.01506', 'RGI60-07.01559',
            'RGI60-03.02687', 'RGI60-17.00172', 'RGI60-01.23649',
            'RGI60-09.00077', 'RGI60-03.00994', 'RGI60-01.26738',
            'RGI60-03.00283', 'RGI60-01.16121', 'RGI60-01.27108',
            'RGI60-09.00132', 'RGI60-13.43483', 'RGI60-09.00069',
            'RGI60-14.04404', 'RGI60-17.01218', 'RGI60-17.15877',
            'RGI60-13.30888', 'RGI60-17.13796', 'RGI60-17.15825',
            'RGI60-01.09783'
        ]
        if rgi_reg == '19':
            gdirs_band = gdirs
            gdirs_cent = []
        else:
            gdirs_band = []
            gdirs_cent = []
            for gdir in gdirs:
                if gdir.is_icecap or gdir.rgi_id in ids_to_bands:
                    gdirs_band.append(gdir)
                else:
                    gdirs_cent.append(gdir)

    log.workflow('Start flowline processing with: '
                 'N centerline type: {}, '
                 'N elev bands type: {}.'
                 ''.format(len(gdirs_cent), len(gdirs_band)))

    # HH2015 method
    workflow.execute_entity_task(tasks.simple_glacier_masks, gdirs_band)

    # Centerlines OGGM
    workflow.execute_entity_task(tasks.glacier_masks, gdirs_cent)

    if add_consensus:
        from oggm.shop.bedtopo import add_consensus_thickness
        workflow.execute_entity_task(add_consensus_thickness, gdirs_band)
        workflow.execute_entity_task(add_consensus_thickness, gdirs_cent)

        # Elev bands with var data
        vn = 'consensus_ice_thickness'
        workflow.execute_entity_task(tasks.elevation_band_flowline,
                                     gdirs_band,
                                     bin_variables=vn)
        workflow.execute_entity_task(tasks.fixed_dx_elevation_band_flowline,
                                     gdirs_band,
                                     bin_variables=vn)
    else:
        # HH2015 method without it
        task_list = [
            tasks.elevation_band_flowline,
            tasks.fixed_dx_elevation_band_flowline,
        ]
        for task in task_list:
            workflow.execute_entity_task(task, gdirs_band)

    # HH2015 method
    task_list = [
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs_band)

    # Centerlines OGGM
    task_list = [
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs_cent)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    log.workflow('L2 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 2:
        _time_log()
        return

    # L3 - Tasks
    task_list = [
        tasks.process_climate_data,
        tasks.historical_climate_qc,
        tasks.local_t_star,
        tasks.mu_star_calibration,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Inversion: we match the consensus
    workflow.calibrate_inversion_from_consensus(gdirs,
                                                apply_fs_on_mismatch=True,
                                                error_on_mismatch=False)

    # Do we want to match geodetic estimates?
    # This affects only the bias so we can actually do this *after*
    # the inversion, but we really want to take calving into account here
    if match_geodetic_mb:
        workflow.match_regional_geodetic_mb(gdirs, rgi_reg)

    # We get ready for modelling
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir,
                         'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)

    # L3 OK - compress all in output directory
    log.workflow('L3 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 3:
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir_L3 = sum_dir
    sum_dir = os.path.join(output_base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)

    # Copy L3 files for consistency
    for bn in [
            'glacier_statistics', 'climate_statistics',
            'fixed_geometry_mass_balance'
    ]:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Copy mini data to new dir
    mini_base_dir = os.path.join(working_dir, 'mini_perglacier')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=mini_base_dir)

    # L4 OK - compress all in output directory
    log.workflow('L4 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L4')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 mini_gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 4:
        _time_log()
        return

    # L5 - spinup run in mini gdirs
    gdirs = mini_gdirs

    # Get end date. The first gdir might have blown up, try some others
    i = 0
    while True:
        if i >= len(gdirs):
            raise RuntimeError('Found no valid glaciers!')
        try:
            y0 = gdirs[i].get_climate_info()['baseline_hydro_yr_0']
            # One adds 1 because the run ends at the end of the year
            ye = gdirs[i].get_climate_info()['baseline_hydro_yr_1'] + 1
            break
        except BaseException:
            i += 1

    # OK - run
    workflow.execute_entity_task(tasks.run_from_climate_data,
                                 gdirs,
                                 min_ys=y0,
                                 ye=ye,
                                 output_filesuffix='_historical')

    # Now compile the output
    sum_dir = os.path.join(output_base_dir, 'L5', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir,
                         'historical_run_output_{}.nc'.format(rgi_reg))
    utils.compile_run_output(gdirs, path=opath, input_filesuffix='_historical')

    # Glacier statistics we recompute here for error analysis
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Other stats for consistency
    for bn in ['climate_statistics', 'fixed_geometry_mass_balance']:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Add the extended files
    pf = os.path.join(sum_dir, 'historical_run_output_{}.nc'.format(rgi_reg))
    mf = os.path.join(sum_dir,
                      'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    # This is crucial - extending calving only with L3 data!!!
    sf = os.path.join(sum_dir_L3, 'glacier_statistics_{}.csv'.format(rgi_reg))
    opath = os.path.join(
        sum_dir, 'historical_run_output_extended_{}.nc'.format(rgi_reg))
    utils.extend_past_climate_run(past_run_file=pf,
                                  fixed_geometry_mb_file=mf,
                                  glacier_statistics_file=sf,
                                  path=opath)

    # L5 OK - compress all in output directory
    log.workflow('L5 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L5')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)

    _time_log()
Exemplo n.º 19
0
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = 100

# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = False

# Pre-download other files which will be needed later
utils.get_cru_cl_file()
utils.get_cru_file(var='tmp')
utils.get_cru_file(var='pre')

# Get the RGI glaciers for the run. We use a set of three glaciers here but
# this could be an entire RGI region, or any glacier list you'd like to model
rgi_list = ['RGI60-01.10299', 'RGI60-11.00897', 'RGI60-18.02342']
rgidf = utils.get_rgi_glacier_entities(rgi_list)

# We use intersects
db = utils.get_rgi_intersects_region_file(version='61', rgi_ids=rgi_list)
cfg.set_intersects_db(db)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting OGGM run')
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf)

# Preprocessing and climate tasks
Exemplo n.º 20
0
def mb_calibration(rgi_version, baseline):
    """ Run the mass balance calibration for the VAS model. RGI version and
    baseline cliamte must be given.

    :param rgi_version: int, RGI version
    :param baseline: str, baseline climate 'HISTALP' or 'CRU'
    """

    # initialize OGGM and set up the run parameters
    vascaling.initialize(logging_level='WORKFLOW')

    # LOCAL paths (where to write the OGGM run output)
    # dirname = 'VAS_ref_mb_{}_RGIV{}'.format(baseline, rgi_version)
    # wdir = utils.gettempdir(dirname, home=True, reset=True)
    # utils.mkdir(wdir, reset=True)
    # cfg.PATHS['working_dir'] = wdir

    # CLUSTER paths
    wdir = os.environ.get('WORKDIR', '')
    cfg.PATHS['working_dir'] = wdir

    # we are running the calibration ourselves
    cfg.PARAMS['run_mb_calibration'] = True
    # we are using which baseline data?
    cfg.PARAMS['baseline_climate'] = baseline
    # no need for intersects since this has an effect on the inversion only
    cfg.PARAMS['use_intersects'] = False
    # use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True
    # set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True
    # 10 is only for OGGM-VAS, OGGM needs 80 to run
    cfg.PARAMS['border'] = 80

    if baseline == 'HISTALP':
        # other params: see https://oggm.org/2018/08/10/histalp-parameters/
        # cfg.PARAMS['prcp_scaling_factor'] = 1.75
        # cfg.PARAMS['temp_melt'] = -1.75
        cfg.PARAMS['prcp_scaling_factor'] = 2.5
        cfg.PARAMS['temp_melt'] = -0.5
    elif baseline == 'CRU':
        # using the parameters from Marzeion et al. (2012)
        # cfg.PARAMS['prcp_scaling_factor'] = 2.5
        # cfg.PARAMS['temp_melt'] = 1
        # cfg.PARAMS['temp_all_solid'] = 3
        # using the parameters from Malles and Marzeion 2020
        cfg.PARAMS['prcp_scaling_factor'] = 3
        cfg.PARAMS['temp_melt'] = 0
        cfg.PARAMS['temp_all_solid'] = 4
        # cfg.PARAMS['prcp_gradient'] = 4

    # the next step is to get all the reference glaciers,
    # i.e. glaciers with mass balance measurements.

    # get the reference glacier ids (they are different for each RGI version)
    df, _ = utils.get_wgms_files()
    rids = df['RGI{}0_ID'.format(rgi_version[0])]

    # we can't do Antarctica
    rids = [rid for rid in rids if not ('-19.' in rid)]

    # For HISTALP only RGI reg 11.01 (ALPS)
    if baseline == 'HISTALP':
        rids = [rid for rid in rids if '-11' in rid]

    # make a new dataframe with those (this takes a while)
    print('Reading the RGI shapefiles...')
    rgidf = utils.get_rgi_glacier_entities(rids, version=rgi_version)
    print('For RGIV{} we have {} candidate reference '
          'glaciers.'.format(rgi_version, len(rgidf)))

    # initialize the glacier regions
    base_url = 'https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/' \
               'L3-L5_files/RGIV62_fleb_qc3_CRU_pcp2.5'
    # Go - get the pre-processed glacier directories
    gdirs = workflow.init_glacier_directories(rids,
                                              from_prepro_level=3,
                                              prepro_base_url=base_url,
                                              prepro_rgi_version=rgi_version)

    # Some glaciers in RGI Region 11 are not inside the HISTALP domain
    if baseline == 'HISTALP':
        gdirs = [gdir for gdir in gdirs if gdir.rgi_subregion == '11-01']

    # get reference glaciers with mass balance measurements
    gdirs = utils.get_ref_mb_glaciers(gdirs)

    # keep only these glaciers
    rgidf = rgidf.loc[rgidf.RGIId.isin([g.rgi_id for g in gdirs])]

    # save to file
    rgidf.to_file(os.path.join(wdir, 'mb_ref_glaciers.shp'))
    print('For RGIV{} and {} we have {} reference glaciers'.format(
        rgi_version, baseline, len(rgidf)))

    # sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    # newly initialize glacier directories
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)

    # run climate tasks
    vascaling.compute_ref_t_stars(gdirs)
    execute_entity_task(vascaling.local_t_star, gdirs)

    # we store the associated params
    mb_calib = gdirs[0].read_pickle('climate_info')['mb_calib_params']
    with open(os.path.join(wdir, 'mb_calib_params.json'), 'w') as fp:
        json.dump(mb_calib, fp)
Exemplo n.º 21
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      test_ids=None,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      disable_mp=False,
                      params_file=None,
                      elev_bands=False,
                      match_regional_geodetic_mb=False,
                      match_geodetic_mb_per_glacier=False,
                      evolution_model='fl_sia',
                      centerlines_only=False,
                      override_params=None,
                      add_consensus=False,
                      start_level=None,
                      start_base_url=None,
                      max_level=5,
                      ref_tstars_base_url='',
                      logging_level='WORKFLOW',
                      disable_dl_verify=False,
                      dynamic_spinup=False,
                      continue_on_error=True):
    """Generate the preprocessed OGGM glacier directories for this OGGM version

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    ref_tstars_base_url : str
        url where to find the pre-calibrated reference tstar list.
        Required as of v1.4.
    params_file : str
        path to the OGGM parameter file (to override defaults)
    is_test : bool
        to test on a couple of glaciers only!
    test_ids : list
        if is_test: list of ids to process
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    elev_bands : bool
        compute all flowlines based on the Huss&Hock 2015 method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    centerlines_only : bool
        compute all flowlines based on the OGGM centerline(s) method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    match_regional_geodetic_mb : str
        match the regional mass-balance estimates at the regional level
        ('hugonnet': Hugonnet et al., 2020 or 'zemp': Zemp et al., 2019).
    match_geodetic_mb_per_glacier : str
        match the mass-balance estimates at the glacier level
        (currently only 'hugonnet': Hugonnet et al., 2020).
    evolution_model : str
        which geometry evolution model to use: `fl_sia` (default),
        or `massredis` (mass redistribution curve).
    add_consensus : bool
        adds (reprojects) the consensus estimates thickness to the glacier
        directories. With elev_bands=True, the data will also be binned.
    start_level : int
        the pre-processed level to start from (default is to start from
        scratch). If set, you'll need to indicate start_base_url as well.
    start_base_url : str
        the pre-processed base-url to fetch the data from.
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    override_params : dict
        a dict of parameters to override.
    disable_dl_verify : bool
        disable the hash verification of OGGM downloads
    dynamic_spinup: str
        include a dynamic spinup matching 'area' OR 'volume' at the RGI-date
    """

    # Input check
    if max_level not in [1, 2, 3, 4, 5]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4, 5]')

    if start_level is not None:
        if start_level not in [0, 1, 2]:
            raise InvalidParamsError('start_level should be one of [0, 1, 2]')
        if start_level > 0 and start_base_url is None:
            raise InvalidParamsError('With start_level, please also indicate '
                                     'start_base_url')
    else:
        start_level = 0

    if match_regional_geodetic_mb and match_geodetic_mb_per_glacier:
        raise InvalidParamsError(
            'match_regional_geodetic_mb incompatible with '
            'match_geodetic_mb_per_glacier!')

    if match_geodetic_mb_per_glacier and match_geodetic_mb_per_glacier != 'hugonnet':
        raise InvalidParamsError('Currently only `hugonnet` is available for '
                                 'match_geodetic_mb_per_glacier.')

    if evolution_model not in ['fl_sia', 'massredis']:
        raise InvalidParamsError('evolution_model should be one of '
                                 "['fl_sia', 'massredis'].")

    if dynamic_spinup and dynamic_spinup not in ['area', 'volume']:
        raise InvalidParamsError(f"Dynamic spinup option '{dynamic_spinup}' "
                                 "not supported")

    if dynamic_spinup and evolution_model == 'massredis':
        raise InvalidParamsError("Dynamic spinup is not working/tested"
                                 "with massredis!")

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Local paths
    if override_params is None:
        override_params = {}

    utils.mkdir(working_dir)
    override_params['working_dir'] = working_dir

    # Initialize OGGM and set up the run parameters
    cfg.initialize(file=params_file,
                   params=override_params,
                   logging_level=logging_level,
                   future=True)

    if match_geodetic_mb_per_glacier and (cfg.PARAMS['hydro_month_nh'] != 1 or
                                          cfg.PARAMS['hydro_month_sh'] != 1):
        raise InvalidParamsError('We recommend to set hydro_month_nh and sh '
                                 'to 1 for the geodetic MB calibration per '
                                 'glacier.')

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = continue_on_error

    # Check for the integrity of the files OGGM downloads at run time
    # For large files (e.g. using a 1 tif DEM like ALASKA) calculating the hash
    # takes a long time, so deactivating this can make sense
    cfg.PARAMS['dl_verify'] = not disable_dl_verify

    # Other things that make sense
    cfg.PARAMS['store_model_geometry'] = True

    # Log the parameters
    msg = '# OGGM Run parameters:'
    for k, v in cfg.PARAMS.items():
        if type(v) in [pd.DataFrame, dict]:
            continue
        msg += '\n    {}: {}'.format(k, v)
    log.workflow(msg)

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    output_base_dir = os.path.join(output_folder, 'RGI{}'.format(rgi_version),
                                   'b_{:03d}'.format(border))

    # Add a package version file
    utils.mkdir(output_base_dir)
    opath = os.path.join(output_base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)

        # Some RGI input quality checks - this is based on visual checks
        # of large glaciers in the RGI
        ids_to_ice_cap = [
            'RGI60-05.10315',  # huge Greenland ice cap
            'RGI60-03.01466',  # strange thing next to Devon
            'RGI60-09.00918',  # Academy of sciences Ice cap
            'RGI60-09.00969',
            'RGI60-09.00958',
            'RGI60-09.00957',
        ]
        rgidf.loc[rgidf.RGIId.isin(ids_to_ice_cap), 'Form'] = '1'

        # In AA almost all large ice bodies are actually ice caps
        if rgi_reg == '19':
            rgidf.loc[rgidf.Area > 100, 'Form'] = '1'

        # For greenland we omit connectivity level 2
        if rgi_reg == '05':
            rgidf = rgidf.loc[rgidf['Connect'] != 2]
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        if test_ids is not None:
            rgidf = rgidf.loc[rgidf.RGIId.isin(test_ids)]
        else:
            rgidf = rgidf.sample(4)

        if max_level > 2:
            # Also use ref tstars
            utils.apply_test_ref_tstars()

    if max_level > 2 and ref_tstars_base_url:
        workflow.download_ref_tstars(base_url=ref_tstars_base_url)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # L0 - go
    if start_level == 0:
        gdirs = workflow.init_glacier_directories(rgidf,
                                                  reset=True,
                                                  force=True)

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L0', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # L0 OK - compress all in output directory
        log.workflow('L0 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L0')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 0:
            _time_log()
            return
    else:
        gdirs = workflow.init_glacier_directories(
            rgidf,
            reset=True,
            force=True,
            from_prepro_level=start_level,
            prepro_border=border,
            prepro_rgi_version=rgi_version,
            prepro_base_url=start_base_url)

    # L1 - Add dem files
    if start_level == 0:
        if test_topofile:
            cfg.PATHS['dem_file'] = test_topofile

        # Which DEM source?
        if dem_source.upper() == 'ALL':
            # This is the complex one, just do the job and leave
            log.workflow('Running prepro on ALL sources')
            for i, s in enumerate(utils.DEM_SOURCES):
                rs = i == 0
                log.workflow('Running prepro on sources: {}'.format(s))
                gdirs = workflow.init_glacier_directories(rgidf,
                                                          reset=rs,
                                                          force=rs)
                workflow.execute_entity_task(tasks.define_glacier_region,
                                             gdirs,
                                             source=s)
                workflow.execute_entity_task(_rename_dem_folder,
                                             gdirs,
                                             source=s)

            # make a GeoTiff mask of the glacier, choose any source
            workflow.execute_entity_task(gis.rasterio_glacier_mask,
                                         gdirs,
                                         source='ALL')

            # Compress all in output directory
            level_base_dir = os.path.join(output_base_dir, 'L1')
            workflow.execute_entity_task(utils.gdir_to_tar,
                                         gdirs,
                                         delete=False,
                                         base_dir=level_base_dir)
            utils.base_dir_to_tar(level_base_dir)

            _time_log()
            return

        # Force a given source
        source = dem_source.upper() if dem_source else None

        # L1 - go
        workflow.execute_entity_task(tasks.define_glacier_region,
                                     gdirs,
                                     source=source)

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L1', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # L1 OK - compress all in output directory
        log.workflow('L1 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 1:
            _time_log()
            return

    # L2 - Tasks
    if start_level <= 1:
        # Check which glaciers will be processed as what
        if elev_bands:
            gdirs_band = gdirs
            gdirs_cent = []
        elif centerlines_only:
            gdirs_band = []
            gdirs_cent = gdirs
        else:
            # Default is to centerlines_only, but it used to be a mix
            # (e.g. bands for ice caps, etc)
            # I still keep this logic here in case we want to mix again
            gdirs_band = []
            gdirs_cent = gdirs

        log.workflow('Start flowline processing with: '
                     'N centerline type: {}, '
                     'N elev bands type: {}.'
                     ''.format(len(gdirs_cent), len(gdirs_band)))

        # HH2015 method
        workflow.execute_entity_task(tasks.simple_glacier_masks, gdirs_band)

        # Centerlines OGGM
        workflow.execute_entity_task(tasks.glacier_masks, gdirs_cent)

        if add_consensus:
            from oggm.shop.bedtopo import add_consensus_thickness
            workflow.execute_entity_task(add_consensus_thickness, gdirs_band)
            workflow.execute_entity_task(add_consensus_thickness, gdirs_cent)

            # Elev bands with var data
            vn = 'consensus_ice_thickness'
            workflow.execute_entity_task(tasks.elevation_band_flowline,
                                         gdirs_band,
                                         bin_variables=vn)
            workflow.execute_entity_task(
                tasks.fixed_dx_elevation_band_flowline,
                gdirs_band,
                bin_variables=vn)
        else:
            # HH2015 method without it
            task_list = [
                tasks.elevation_band_flowline,
                tasks.fixed_dx_elevation_band_flowline,
            ]
            for task in task_list:
                workflow.execute_entity_task(task, gdirs_band)

        # Centerlines OGGM
        task_list = [
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            workflow.execute_entity_task(task, gdirs_cent)

        # Same for all glaciers
        if border >= 20:
            task_list = [
                tasks.compute_downstream_line,
                tasks.compute_downstream_bedshape,
            ]
            for task in task_list:
                workflow.execute_entity_task(task, gdirs)
        else:
            log.workflow('L2: for map border values < 20, wont compute '
                         'downstream lines.')

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L2', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # And for level 2: shapes
        if len(gdirs_cent) > 0:
            opath = os.path.join(sum_dir, 'centerlines_{}.shp'.format(rgi_reg))
            utils.write_centerlines_to_shape(gdirs_cent,
                                             to_tar=True,
                                             path=opath)

        # L2 OK - compress all in output directory
        log.workflow('L2 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L2')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 2:
            _time_log()
            return

    # L3 - Tasks
    sum_dir = os.path.join(output_base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)

    # Climate
    workflow.execute_entity_task(tasks.process_climate_data, gdirs)

    if cfg.PARAMS['climate_qc_months'] > 0:
        workflow.execute_entity_task(tasks.historical_climate_qc, gdirs)

    if match_geodetic_mb_per_glacier:
        utils.get_geodetic_mb_dataframe()  # Small optim to avoid concurrency
        workflow.execute_entity_task(
            tasks.mu_star_calibration_from_geodetic_mb, gdirs)
        workflow.execute_entity_task(tasks.apparent_mb_from_any_mb, gdirs)
    else:
        workflow.execute_entity_task(tasks.local_t_star, gdirs)
        workflow.execute_entity_task(tasks.mu_star_calibration, gdirs)

    # Inversion: we match the consensus
    filter = border >= 20
    workflow.calibrate_inversion_from_consensus(gdirs,
                                                apply_fs_on_mismatch=True,
                                                error_on_mismatch=False,
                                                filter_inversion_output=filter)

    # Do we want to match geodetic estimates?
    # This affects only the bias so we can actually do this *after*
    # the inversion, but we really want to take calving into account here
    if match_regional_geodetic_mb:
        opath = os.path.join(
            sum_dir, 'fixed_geometry_mass_balance_'
            'before_match_{}.csv'.format(rgi_reg))
        utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)
        workflow.match_regional_geodetic_mb(gdirs,
                                            rgi_reg=rgi_reg,
                                            dataset=match_regional_geodetic_mb)

    # We get ready for modelling
    if border >= 20:
        workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)
    else:
        log.workflow(
            'L3: for map border values < 20, wont initialize glaciers '
            'for the run.')
    # Glacier stats
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir,
                         'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)

    # L3 OK - compress all in output directory
    log.workflow('L3 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 3:
        _time_log()
        return
    if border < 20:
        log.workflow('L3: for map border values < 20, wont compute L4 and L5.')
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir_L3 = sum_dir
    sum_dir = os.path.join(output_base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)

    # Copy L3 files for consistency
    for bn in [
            'glacier_statistics', 'climate_statistics',
            'fixed_geometry_mass_balance'
    ]:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Copy mini data to new dir
    mini_base_dir = os.path.join(working_dir, 'mini_perglacier',
                                 'RGI{}'.format(rgi_version),
                                 'b_{:03d}'.format(border))
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=mini_base_dir)

    # L4 OK - compress all in output directory
    log.workflow('L4 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L4')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 mini_gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 4:
        _time_log()
        return

    # L5 - spinup run in mini gdirs
    gdirs = mini_gdirs

    # Get end date. The first gdir might have blown up, try some others
    i = 0
    while True:
        if i >= len(gdirs):
            raise RuntimeError('Found no valid glaciers!')
        try:
            y0 = gdirs[i].get_climate_info()['baseline_hydro_yr_0']
            # One adds 1 because the run ends at the end of the year
            ye = gdirs[i].get_climate_info()['baseline_hydro_yr_1'] + 1
            break
        except BaseException:
            i += 1

    # Which model?
    if evolution_model == 'massredis':
        from oggm.core.flowline import MassRedistributionCurveModel
        evolution_model = MassRedistributionCurveModel
    else:
        from oggm.core.flowline import FluxBasedModel
        evolution_model = FluxBasedModel

    # OK - run
    if dynamic_spinup:
        workflow.execute_entity_task(
            tasks.run_dynamic_spinup,
            gdirs,
            evolution_model=evolution_model,
            minimise_for=dynamic_spinup,
            precision_percent=1,
            output_filesuffix='_dynamic_spinup',
        )
        workflow.execute_entity_task(tasks.run_from_climate_data,
                                     gdirs,
                                     min_ys=y0,
                                     ye=ye,
                                     evolution_model=evolution_model,
                                     init_model_filesuffix='_dynamic_spinup',
                                     output_filesuffix='_hist_spin')
        workflow.execute_entity_task(tasks.merge_consecutive_run_outputs,
                                     gdirs,
                                     input_filesuffix_1='_dynamic_spinup',
                                     input_filesuffix_2='_hist_spin',
                                     output_filesuffix='_historical_spinup',
                                     delete_input=True)

    workflow.execute_entity_task(tasks.run_from_climate_data,
                                 gdirs,
                                 min_ys=y0,
                                 ye=ye,
                                 evolution_model=evolution_model,
                                 output_filesuffix='_historical')

    # Now compile the output
    sum_dir = os.path.join(output_base_dir, 'L5', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, f'historical_run_output_{rgi_reg}.nc')
    utils.compile_run_output(gdirs, path=opath, input_filesuffix='_historical')

    if dynamic_spinup:
        opath = os.path.join(sum_dir,
                             f'historical_spinup_run_output_{rgi_reg}.nc')
        utils.compile_run_output(gdirs,
                                 path=opath,
                                 input_filesuffix='_historical_spinup')

    # Glacier statistics we recompute here for error analysis
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Other stats for consistency
    for bn in ['climate_statistics', 'fixed_geometry_mass_balance']:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Add the extended files
    pf = os.path.join(sum_dir, 'historical_run_output_{}.nc'.format(rgi_reg))
    mf = os.path.join(sum_dir,
                      'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    # This is crucial - extending calving only possible with L3 data!!!
    sf = os.path.join(sum_dir_L3, 'glacier_statistics_{}.csv'.format(rgi_reg))
    opath = os.path.join(
        sum_dir, 'historical_run_output_extended_{}.nc'.format(rgi_reg))
    utils.extend_past_climate_run(past_run_file=pf,
                                  fixed_geometry_mb_file=mf,
                                  glacier_statistics_file=sf,
                                  path=opath)

    # L5 OK - compress all in output directory
    log.workflow('L5 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L5')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)

    _time_log()
def equilibrium_run_fl(rgi_ids,
                       use_random_mb=True,
                       path=True,
                       temp_biases=(0, +0.5, -0.5),
                       use_bias_for_run=False,
                       suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                       store_individual_glaciers=True,
                       store_mean_sum=True,
                       tstar=None,
                       **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    flowline model. For details see docstring of `sensitivity_run_vas`.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    WORKING_DIR = os.environ["WORKDIR"]
    utils.mkdir(WORKING_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    ref_df = pd.read_csv(
        utils.get_demo_file('oggm_ref_tstars_rgi6_histalp.csv'))
    workflow.execute_entity_task(climate.local_t_star,
                                 gdirs,
                                 ref_df=ref_df,
                                 tstar=tstar,
                                 bias=0)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)
    # disregard glaciers exceeding their domain boundaries
    # to not dirsupt the entire run
    kwargs.setdefault('check_for_boundaries', True)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_random_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )
    else:
        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_constant_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )

    ds = list()
    for suffix, temp_bias in zip(suffixes, temp_biases):
        # compile the output for each run and store to file
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       input_filesuffix=suffix,
                                       path=False)
        ds.append(ds_)

    # concat into one dataset with temperature bias as coordinate
    if ds:
        ds = xr.concat(ds, pd.Index(temp_biases, name='temp_bias'))
        # add model type as coordinate
        ds.coords['model'] = 'fl'
        # add mb model type as coordinate
        ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

        # fill NaN values (which happen for vanished glaciers) with zero
        ds = ds.fillna(0)

        if store_individual_glaciers:
            if store_mean_sum:
                # compute mean and sum over all glaciers
                ds_mean = ds.mean(dim='rgi_id')
                ds_mean.coords['rgi_id'] = 'mean'
                ds_sum = ds.sum(dim='rgi_id')
                ds_sum.coords['rgi_id'] = 'sum'
                # add to dataset
                ds = xr.concat([ds, ds_mean, ds_sum], dim='rgi_id')
            else:
                pass
        else:
            # compute mean and sum over all glaciers
            ds_mean = ds.mean(dim='rgi_id')
            ds_mean.coords['rgi_id'] = 'mean'
            ds_sum = ds.sum(dim='rgi_id')
            ds_sum.coords['rgi_id'] = 'sum'
            # add to dataset
            ds = xr.concat([ds_mean, ds_sum], dim='rgi_id')

        # normalize glacier geometries (length/area/volume) with start value
        ds_normal = normalize_ds_with_start(ds)
        # add coordinate to distinguish between normalized and absolute values
        ds.coords['normalized'] = int(False)
        ds_normal.coords['normalized'] = int(True)

        # combine datasets
        ds = xr.concat([ds, ds_normal], 'normalized')

        # store datasets
        if path:
            if path is True:
                mb = 'random' if use_random_mb else 'constant'
                path = os.path.join(cfg.PATHS['working_dir'],
                                    'run_output_{}_fl.nc'.format(mb))

            ds.to_netcdf(path)

    return ds
Exemplo n.º 23
0
def climate_run_fl(rgi_ids,
                   path=True,
                   temp_biases=[0, +0.5, -0.5],
                   use_bias_for_run=False,
                   suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                   tstar=None,
                   nyears=None,
                   **kwargs):
    """Computes 'only' the massbalance in analogy to the `equilibrium_run_...`
    routines, without running the evolution (flowline) model.

    Dataset containing yearly values of specific mass balance is returned.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float
        'Equilibrium year' used for the mass balance calibration.
    nyears: int, optional, default=None
        Number of years for which to compute the random mass balance
    kwargs:
        Additional key word arguments for massbalance model.

    Returns
    -------
    Dataset containing yearly values of specific massbalance.
    """

    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'test_cluster'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # shutil.rmtree(wdir)
    # os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    workflow.execute_entity_task(climate.local_t_star,
                                 gdirs,
                                 tstar=tstar,
                                 bias=0)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    if nyears is None:
        nyears = 10000
    years = np.arange(0, nyears + 1)

    # create dataset
    ds = list()

    # run RandomMassBalance model centered around t*, once without
    # temperature bias and once with positive and negative temperature bias
    # of 0.5 °C each.
    for gdir in gdirs:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)
        kwargs.setdefault('halfsize', 15)
        kwargs.setdefault('mb_model_class', flowline.RandomMassBalance)
        kwargs.setdefault('filename', 'climate_historical')
        kwargs.setdefault('input_filesuffix', '')
        kwargs.setdefault('unique_samples', False)

        ds_ = list()

        fls = gdir.read_pickle('model_flowlines')

        for suffix, temp_bias in zip(suffixes, temp_biases):
            # instance mass balance model
            mb_mod = flowline.MultipleFlowlineMassBalance(gdir, **kwargs)

            if temp_bias is not None:
                # add given temperature bias to mass balance model
                mb_mod.temp_bias = temp_bias

            # create empty container
            spec_mb = list()
            # iterate over all years
            for yr in years:
                spec_mb.append(mb_mod.get_specific_mb(fls=fls, year=yr))

            # add to dataset
            da = xr.DataArray(spec_mb, dims=('year'), coords={'year': years})
            ds_.append(xr.Dataset({'spec_mb': da}))

        ds_ = xr.concat(ds_, pd.Index(temp_biases, name='temp_bias'))
        ds_.coords['rgi_id'] = gdir.rgi_id
        ds.append(ds_)

    ds = xr.concat(ds, 'rgi_id')

    # store datasets
    if path:
        if path is True:
            path = os.path.join(cfg.PATHS['working_dir'], 'mb_output_fl.nc')
        ds.to_netcdf(path)
        # ds_normal.to_netcdf(path[1])

    # return ds, ds_normal
    return ds
Exemplo n.º 24
0
def test_process_era5_daily_data():

    cfg.initialize()
    test_dir = '/home/lilianschuster/Schreibtisch/PhD/oggm_files/tests'
    if not os.path.exists(test_dir):
        test_dir = utils.gettempdir(dirname='OGGM_era5_daily_test', reset=True)
    cfg.PATHS['working_dir'] = test_dir
    b_url_ = 'https://cluster.klima.uni-bremen.de/~fmaussion'
    base_url = b_url_ + '/gdirs/prepro_l2_202010/elevbands_fl_with_consensus'

    df = utils.get_rgi_glacier_entities(['RGI60-11.00897'])
    gdirs = workflow.init_glacier_directories(df,
                                              from_prepro_level=2,
                                              prepro_border=40,
                                              prepro_base_url=base_url,
                                              prepro_rgi_version='62')
    gdir = gdirs[0]

    process_era5_daily_data(gdir, y0=1979, y1=2018)

    filename = 'climate_historical_daily'
    fpath = gdir.get_filepath(filename)

    # check the climate files of an individual glacier (Hintereisferner)
    xr_nc = xr.open_dataset(fpath)
    assert np.all(xr_nc.prcp) > 0
    # to be sure that there are no erroneaous filling values inside
    assert np.all(xr_nc.prcp) < 10000
    # temperature values are in °C and in the right range
    assert np.all(xr_nc.temp) > -100
    assert np.all(xr_nc.temp) < 100

    # temperature gradient should be in the following range
    assert np.all(xr_nc.gradient > -0.015)
    assert np.all(xr_nc.gradient < -0.002)

    # all lapse rates/ precipitation values in one month should be equal
    # because only temperature is on daily basis
    np.testing.assert_allclose(xr_nc.resample(time='1M').std().prcp,
                               0,
                               atol=1e-3)
    np.testing.assert_allclose(xr_nc.resample(time='1M').std().gradient,
                               0,
                               atol=1e-3)

    # summed up monthly precipitation from daily dataset
    xr_nc_prcp_m = xr_nc.prcp.resample(time='1M').sum()

    oggm.shop.ecmwf.process_ecmwf_data(gdir, dataset="ERA5", y0=1979, y1=2018)
    filename = 'climate_historical'

    fpath = gdir.get_filepath(filename)
    xr_nc_monthly = xr.open_dataset(fpath)

    # check if summed up monthly precipitation from daily
    # dataset equals approx. to the ERA5 monthly prcp
    np.testing.assert_allclose(xr_nc_prcp_m.values,
                               xr_nc_monthly.prcp.values,
                               rtol=1e-4)

    xr_nc_temp_m = xr_nc.temp.resample(time='1M').mean()
    # check if mean temperature from daily dataset equals
    # approx. to the ERA5 monthly temp.
    np.testing.assert_allclose(xr_nc_temp_m.values,
                               xr_nc_monthly.temp.values,
                               atol=0.05)

    with pytest.raises(InvalidParamsError):
        # dataset only goes from 1979--2018
        process_era5_daily_data(gdir, y0=1979, y1=2019)

        # in cfg.PARAMS that is initiated during testing,
        # cfg.PARAMS[hydro_month_nh = 10], this is in conflict with 8
        process_era5_daily_data(gdir, y0=1979, y1=2018, hydro_month_nh=8)
Exemplo n.º 25
0
    def setup_cache(self):

        setattr(full_workflow.setup_cache, "timeout", 360)

        utils.mkdir(self.testdir, reset=True)
        self.cfg_init()

        # Pre-download other files which will be needed later
        utils.get_cru_cl_file()
        utils.get_cru_file(var='tmp')
        utils.get_cru_file(var='pre')

        # Get the RGI glaciers for the run.
        rgi_list = ['RGI60-01.10299', 'RGI60-11.00897', 'RGI60-18.02342']
        rgidf = utils.get_rgi_glacier_entities(rgi_list)

        # We use intersects
        db = utils.get_rgi_intersects_region_file(version='61',
                                                  rgi_ids=rgi_list)
        cfg.set_intersects_db(db)

        # Sort for more efficient parallel computing
        rgidf = rgidf.sort_values('Area', ascending=False)

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.compute_downstream_line,
            tasks.compute_downstream_bedshape,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        execute_entity_task(tasks.local_mustar, gdirs)
        execute_entity_task(tasks.apparent_mb, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.filter_inversion_output, gdirs)

        # Final preparation for the run
        execute_entity_task(tasks.init_present_time_glacier, gdirs)

        # Random climate representative for the tstar climate, without bias
        # In an ideal world this would imply that the glaciers remain stable,
        # but it doesn't have to be so
        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            bias=0,
                            nyears=100,
                            output_filesuffix='_tstar')

        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            y0=1990,
                            nyears=100,
                            output_filesuffix='_pd')

        # Compile output
        utils.glacier_characteristics(gdirs)
        utils.compile_run_output(gdirs, filesuffix='_tstar')
        utils.compile_run_output(gdirs, filesuffix='_pd')
        utils.compile_climate_input(gdirs)

        return gdirs
Exemplo n.º 26
0
# Get the reference glacier ids (they are different for each RGI version)
rgi_dir = utils.get_rgi_dir(version=rgi_version)
df, _ = utils.get_wgms_files()
rids = df['RGI{}0_ID'.format(rgi_version[0])]

# We can't do Antarctica
rids = [rid for rid in rids if not ('-19.' in rid)]

# For HISTALP only RGI reg 11
if baseline == 'HISTALP':
    rids = [rid for rid in rids if '-11.' in rid]

# Make a new dataframe with those (this takes a while)
log.info('Reading the RGI shapefiles...')
rgidf = utils.get_rgi_glacier_entities(rids, version=rgi_version)
log.info('For RGIV{} we have {} candidate reference '
         'glaciers.'.format(rgi_version, len(rgidf)))

# We have to check which of them actually have enough mb data.
# Let OGGM do it:
gdirs = workflow.init_glacier_regions(rgidf)

# We need to know which period we have data for
log.info('Process the climate data...')
if baseline == 'CRU':
    execute_entity_task(tasks.process_cru_data, gdirs, print_log=False)
elif baseline == 'HISTALP':
    cfg.PARAMS['continue_on_error'] = True  # Some glaciers are not in Alps
    execute_entity_task(tasks.process_histalp_data, gdirs, print_log=False)
    cfg.PARAMS['continue_on_error'] = False
Exemplo n.º 27
0
import oggm
from oggm import cfg, tasks, graphics
from gmd_analysis_scripts import PLOT_DIR
from oggm import utils

dir_path = os.path.join(tempfile.gettempdir(), 'fig_01')
fig_path = os.path.join(PLOT_DIR, 'workflow_tas.pdf')

cfg.initialize()
cfg.PARAMS['border'] = 20

cfg.PATHS['working_dir'] = dir_path
utils.mkdir(dir_path, reset=True)

rgidf = utils.get_rgi_glacier_entities(['RGI60-18.02342'])
entity = rgidf.iloc[0]

cfg.set_intersects_db(utils.get_rgi_intersects_entities(['RGI60-18.02342']))

gdir = oggm.GlacierDirectory(entity, base_dir=dir_path)

tasks.define_glacier_region(gdir, entity=entity)
tasks.glacier_masks(gdir)
tasks.compute_centerlines(gdir)
tasks.initialize_flowlines(gdir)
tasks.compute_downstream_line(gdir)
tasks.compute_downstream_bedshape(gdir)
tasks.catchment_area(gdir)
tasks.catchment_intersections(gdir)
tasks.catchment_width_geom(gdir)
Exemplo n.º 28
0
def climate_run_vas(rgi_ids, path=True, temp_biases=[0, +0.5, -0.5],
                    suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                    use_bias_for_run=False, use_default_tstar=True,
                    tstar=None, nyears=None, **kwargs):
    """Computes 'only' the massbalance in analogy to the `equilibrium_run_...`
    routines, without running the (volume/area scaling) evolution model.

    Dataset containing yearly values of specific mass balance is returned.

    Note: the task is not parallelized, hence it can take long if many glaciers
    are given. TODO: could/should be fixed sometime...
    TODO: add logging information

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    use_bias_for_run: bool, optional, default=False
        Flag deciding whether or not the mass balance residual is used
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration. Using the
        `ref_tstars.csv` table if not supplied.
    use_default_tstar : bool, optional, default=True
        Flag deciding whether or not to use the default ref_tstar.csv list. If
        `False`the `oggm_ref_tstars_rgi6_histalp.csv` reference table is used.
    nyears: int, optional, default=None
        Number of years for which to compute the random mass balance
    kwargs:
        Additional key word arguments for massbalance model.

    Returns
    -------
    Dataset containing yearly values of specific massbalance.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:-1]

    # load default parameter file
    cfg.initialize()

    # get environmental variables for working and output directories
    WORKING_DIR = os.environ["WORKDIR"]
    OUTPUT_DIR = os.environ["OUTDIR"]
    # create working directory
    utils.mkdir(WORKING_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 2.5
    cfg.PARAMS['temp_melt'] = -0.5
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = False

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    if tstar or use_default_tstar:
        # compute mustar from given tstar
        workflow.execute_entity_task(vascaling.local_t_star, gdirs, tstar=tstar, bias=0)
    else:
        # compute mustar from the reference table for the flowline model
        # RGI v6 and HISTALP baseline climate
        ref_df = pd.read_csv(utils.get_demo_file('oggm_ref_tstars_rgi6_histalp.csv'))
        workflow.execute_entity_task(vascaling.local_t_star, gdirs, ref_df=ref_df)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 10'000 years if not specified otherwise
    if nyears is None:
        nyears = 1e4
    years = np.arange(0, nyears + 1)

    # create dataset
    ds = list()

    # run RandomMassBalance model centered around t*, once without
    # temperature bias and once with positive and negative temperature bias
    # of 0.5 deg C each.
    for gdir in gdirs:
        # set random seed to get reproducible results
        kwargs.setdefault('halfsize', 15)
        kwargs.setdefault('filename', 'climate_historical')
        kwargs.setdefault('input_filesuffix', '')

        ds_ = list()

        for suffix, temp_bias in zip(suffixes, temp_biases):
            # instance mass balance model
            try:
                mb_mod = vascaling.ConstantVASMassBalance(gdir, **kwargs)
            except:
                # continue with the next glacier or raise exception
                # depending on `continue_on_error` flag
                if cfg.PARAMS['continue_on_error']:
                    continue
                else:
                    raise

            if temp_bias is not None:
                # add given temperature bias to mass balance model
                mb_mod.temp_bias = temp_bias

            # where to store the model output
            diag_path = gdir.get_filepath('model_diagnostics',
                                          filesuffix='_vas',
                                          delete=True)

            # get minimum and maximum glacier elevation
            min_hgt, max_hgt = vascaling.get_min_max_elevation(gdir)
            # create empty container
            spec_mb = list()
            # iterate over all years
            for yr in years:
                spec_mb.append(mb_mod.get_specific_mb(min_hgt, max_hgt, yr))

            # add to dataset
            da = xr.DataArray(spec_mb, dims=('year'), coords={'year': years})
            ds_.append(xr.Dataset({'spec_mb': da}))

        if ds_:
            ds_ = xr.concat(ds_, pd.Index(temp_biases, name='temp_bias'))
            ds_.coords['rgi_id'] = gdir.rgi_id
            ds.append(ds_)

    if ds:
        # combine output from single glaciers into one dataset
        ds = xr.concat(ds, 'rgi_id')

        # store datasets
        if path:
            if path is True:
                path = os.path.join(OUTPUT_DIR, 'mb_output_vas.nc')
            ds.to_netcdf(path)

    # return ds, ds_normal
    return ds
Exemplo n.º 29
0
# Get the reference glacier ids (they are different for each RGI version)
rgi_dir = utils.get_rgi_dir(version=rgi_version)
df, _ = utils.get_wgms_files()
rids = df['RGI{}0_ID'.format(rgi_version[0])]

# We can't do Antarctica
rids = [rid for rid in rids if not ('-19.' in rid)]

# For HISTALP only RGI reg 11
if baseline == 'HISTALP':
    rids = [rid for rid in rids if '-11.' in rid]

# Make a new dataframe with those (this takes a while)
log.info('Reading the RGI shapefiles...')
rgidf = utils.get_rgi_glacier_entities(rids, version=rgi_version)
log.info('For RGIV{} we have {} candidate reference '
         'glaciers.'.format(rgi_version, len(rgidf)))

# We have to check which of them actually have enough mb data.
# Let OGGM do it:
gdirs = workflow.init_glacier_regions(rgidf)

# We need to know which period we have data for
log.info('Process the climate data...')
if baseline == 'CRU':
    execute_entity_task(tasks.process_cru_data, gdirs, print_log=False)
elif baseline == 'HISTALP':
    cfg.PARAMS['continue_on_error'] = True  # Some glaciers are not in Alps
    execute_entity_task(tasks.process_histalp_data, gdirs, print_log=False)
    cfg.PARAMS['continue_on_error'] = False
Exemplo n.º 30
0
import oggm
from oggm import cfg, tasks, graphics
from gmd_analysis_scripts import PLOT_DIR
from oggm.utils import (nicenumber, mkdir, get_rgi_glacier_entities,
                        get_rgi_intersects_entities)

cfg.initialize()
cfg.PARAMS['border'] = 10

fig_path = os.path.join(PLOT_DIR, 'grindelwald.pdf')

base_dir = os.path.join(os.path.expanduser('~/tmp'), 'OGGM_GMD', 'Grindelwald')
cfg.PATHS['working_dir'] = base_dir
mkdir(base_dir, reset=True)

rgidf = get_rgi_glacier_entities(['RGI60-11.01270'])
entity = rgidf.iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=base_dir)

cfg.set_intersects_db(get_rgi_intersects_entities(['RGI60-11.01270']))

tasks.define_glacier_region(gdir, entity=entity)
tasks.glacier_masks(gdir)
tasks.compute_centerlines(gdir)
tasks.initialize_flowlines(gdir)
tasks.compute_downstream_line(gdir)
tasks.catchment_area(gdir)
tasks.catchment_intersections(gdir)
tasks.catchment_width_geom(gdir)
tasks.catchment_width_correction(gdir)
Exemplo n.º 31
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      test_crudir=None,
                      disable_mp=False,
                      timeout=0,
                      max_level=4,
                      logging_level='WORKFLOW'):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Input check
    if max_level not in [1, 2, 3, 4]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level=logging_level)

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # Timeout
    cfg.PARAMS['task_timeout'] = timeout

    # For statistics
    climate_periods = [1920, 1960, 2000]

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    rgi_dir_name = 'RGI{}'.format(rgi_version)
    border_dir_name = 'b_{:03d}'.format(border)
    base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(4)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    # Which DEM source?
    if dem_source.upper() == 'ALL':
        # This is the complex one, just do the job an leave
        log.workflow('Running prepro on ALL sources')
        for i, s in enumerate(utils.DEM_SOURCES):
            rs = i == 0
            rgidf['DEM_SOURCE'] = s
            log.workflow('Running prepro on sources: {}'.format(s))
            gdirs = workflow.init_glacier_regions(rgidf, reset=rs, force=rs)
            workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)

        # Compress all in output directory
        l_base_dir = os.path.join(base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=l_base_dir)
        utils.base_dir_to_tar(l_base_dir)

        _time_log()
        return

    if dem_source:
        # Force a given source
        rgidf['DEM_SOURCE'] = dem_source.upper()

    # L1 - go
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 1:
        _time_log()
        return

    # L2 - Tasks
    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    workflow.execute_entity_task(tasks.process_cru_data, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 2:
        _time_log()
        return

    # L3 - Tasks
    task_list = [
        tasks.glacier_masks, tasks.compute_centerlines,
        tasks.initialize_flowlines, tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape, tasks.catchment_area,
        tasks.catchment_intersections, tasks.catchment_width_geom,
        tasks.catchment_width_correction, tasks.local_t_star,
        tasks.mu_star_calibration, tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion, tasks.filter_inversion_output,
        tasks.init_present_time_glacier
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=climate_periods,
                                     path=opath)

    # L3 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 3:
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir = os.path.join(base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Copy mini data to new dir
    base_dir = os.path.join(base_dir, 'L4')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=base_dir)

    # L4 OK - compress all in output directory
    workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
    utils.base_dir_to_tar(base_dir)

    _time_log()
Exemplo n.º 32
0
# create test directory
wdir = os.path.join(os.path.abspath('.'), 'comparison_wdir')
if not os.path.exists(wdir):
    os.makedirs(wdir)
shutil.rmtree(wdir)
os.makedirs(wdir)

# load default parameter file
cfg.initialize()

## RGI entity
# choose glacier
glacier_name = 'Oberer Grindelwaldgletscher'
rgi_id = 'RGI60-11.01270'
# get/downlaod the rgi entity including the outline shapefile
rgi_df = utils.get_rgi_glacier_entities([rgi_id])
# set name, since not delivered with RGI
if rgi_df.loc[int(rgi_id[-5:]) - 1, 'Name'] is None:
    rgi_df.loc[int(rgi_id[-5:]) - 1, 'Name'] = glacier_name

# select single entry
rgi_entity = rgi_df.iloc[0]

## GlacierDirectory
# specify the working directory and define the glacier directory
cfg.PATHS['working_dir'] = wdir
gdir = oggm.GlacierDirectory(rgi_entity)

# DEM and GIS tasks
# get the path to the DEM file (will download if necessary)
dem = utils.get_topo_file(gdir.cenlon, gdir.cenlat)