Exemple #1
0
def test_multiple_inversion():

    # test directory
    testdir = os.path.join(get_test_dir(), 'tmp_mdir')
    if not os.path.exists(testdir):
        os.makedirs(testdir)

    # Init
    cfg.initialize()
    cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
    cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
    cfg.PARAMS['border'] = 40
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['baseline_climate'] = 'CUSTOM'
    cfg.PATHS['working_dir'] = testdir

    # Get the RGI ID
    hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
    hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'

    gdirs = workflow.init_glacier_regions(hef_rgi)
    workflow.gis_prepro_tasks(gdirs)
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)

    fig, ax = plt.subplots()
    graphics.plot_inversion(gdirs, ax=ax)
    fig.tight_layout()
    shutil.rmtree(testdir)
    return fig
Exemple #2
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    gdirs = up_to_climate(reset=reset)

    with open(CLI_LOGF, 'rb') as f:
        clilog = pickle.load(f)

    if clilog != 'histalp':
        reset = True
    else:
        try:
            tasks.prepare_for_inversion(gdirs[0])
        except Exception:
            reset = True

    if reset:
        # Use histalp for the actual inversion test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PARAMS['baseline_climate'] = 'HISTALP'
        cru_dir = get_demo_file('HISTALP_precipitation_all_abs_1801-2014.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        workflow.climate_tasks(gdirs)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('histalp', f)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Exemple #3
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    gdirs = up_to_climate(reset=reset)

    with open(CLI_LOGF, 'rb') as f:
        clilog = pickle.load(f)

    if clilog != 'histalp':
        reset = True
    else:
        try:
            tasks.prepare_for_inversion(gdirs[0])
        except Exception:
            reset = True

    if reset:
        # Use histalp for the actual inversion test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PARAMS['baseline_climate'] = 'HISTALP'
        workflow.climate_tasks(gdirs)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('histalp', f)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Exemple #4
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    gdirs = up_to_climate(reset=reset)

    with open(CLI_LOGF, 'rb') as f:
        clilog = pickle.load(f)

    if clilog != 'histalp':
        reset = True
    else:
        try:
            tasks.prepare_for_inversion(gdirs[0])
        except Exception:
            reset = True

    if reset:
        # Use histalp for the actual inversion test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PARAMS['baseline_climate'] = 'HISTALP'
        cru_dir = get_demo_file('HISTALP_precipitation_all_abs_1801-2014.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        workflow.climate_tasks(gdirs)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('histalp', f)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Exemple #5
0
def test_thick_elev_bands():
    fig, ax = plt.subplots()
    gdir = init_columbia_eb(dir_name='test_thick_eb')
    workflow.inversion_tasks(utils.tolist(gdir))
    inversion.distribute_thickness_per_altitude(gdir)
    graphics.plot_distributed_thickness(gdir, ax=ax)
    fig.tight_layout()
    return fig
Exemple #6
0
    def test_init_present_time_glacier(self):

        gdirs = up_to_inversion()

        # Inversion Results
        cfg.PARAMS['invert_with_sliding'] = True
        cfg.PARAMS['optimize_thick'] = True
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        cfg.PARAMS['invert_with_sliding'] = False
        cfg.PARAMS['optimize_thick'] = False
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        # Init glacier
        d = gdirs[0].read_pickle('inversion_params')
        fs = d['fs']
        glen_a = d['glen_a']
        maxs = cfg.PARAMS['max_shape_param']
        for gdir in gdirs:
            flowline.init_present_time_glacier(gdir)
            mb_mod = massbalance.ConstantMassBalanceModel(gdir)
            fls = gdir.read_pickle('model_flowlines')
            model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
                                            fs=fs, glen_a=glen_a)
            _vol = model.volume_km3
            _area = model.area_km2
            if gdir.rgi_id in df.index:
                gldf = df.loc[gdir.rgi_id]
                # TODO: broken but should work
                # assert_allclose(gldf['oggm_volume_km3'], _vol, rtol=0.03)
                # assert_allclose(gldf['ref_area_km2'], _area, rtol=0.03)
                maxo = max([fl.order for fl in model.fls])
                for fl in model.fls:
                    self.assertTrue(np.all(fl.bed_shape > 0))
                    self.assertTrue(np.all(fl.bed_shape <= maxs))
                    if len(model.fls) > 1:
                        if fl.order == (maxo-1):
                            self.assertTrue(fl.flows_to is fls[-1])

        # Test the glacier charac
        dfc = utils.glacier_characteristics(gdirs)
        self.assertTrue(np.all(dfc.terminus_type == 'Land-terminating'))
        cc = dfc[['dem_mean_elev', 'clim_temp_avgh']].corr().values[0, 1]
        self.assertTrue(cc > 0.4)
Exemple #7
0
def preprocessing(gdirs):
    """
    oggm workflow for preparing initializing
    :param gdirs: list of oggm.GlacierDirectories from preprocessed level 2 onwards
    :return None, but creates required files
    """
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)
Exemple #8
0
def plot_issue(gdir, plot_dir):
    #plt.style.use('ggplot')

    workflow.gis_prepro_tasks([gdir])
    workflow.climate_tasks([gdir])
    workflow.inversion_tasks([gdir])
    tasks.init_present_time_glacier(gdir)

    # Observed length changes
    df = gdir.get_ref_length_data()
    df = df.loc[1855:2003]['dL']
    df = df - df.iloc[-1]

    tasks.run_from_climate_data(gdir,
                                ys=1855,
                                ye=2003,
                                output_filesuffix='hist_from_current')
    ds = xr.open_dataset(
        gdir.get_filepath('model_diagnostics', filesuffix='hist_from_current'))
    (ds.length_m.to_series().rolling(36, center=True).mean() -
     ds.length_m.to_series().iloc[0]).plot(c='C0', label='OGGM')
    #s = s - s.iloc[-1]
    #print(s)
    ax = df.plot(c='k', label='Observations')
    #s.plot(c='C0', label='OGGM');
    plt.legend()
    ax.set_ylabel('Glacier Length Change [m]')
    plt.title('Hintereisferner length changes Experiment 2')
    plt.tight_layout()
    plt.show()
    '''
    fls = gdir.read_pickle('model_flowlines')
    x = np.arange(fls[-1].nx) *fls[-1].dx * fls[-1].map_dx

    plt.figure(figsize=(13,10))

    rc('axes', linewidth=3)

    plt.plot(x,fls[-1].surface_h,linewidth=3, label='Surface Elevation')
    plt.plot(x,fls[-1].bed_h,'k',linewidth=3,label='Bed Topography')
    plt.ylabel('Altitude (m)',size=30)
    plt.xlabel('Distance along the Flowline (m)',size=30)
    plt.legend(loc='best',fontsize=30)
    #plt.annotate('?', xy=(5000, 2700), fontsize=40)

    plt.tick_params(axis='both', which='major', labelsize=30)


    plt.title(gdir.rgi_id+ ': '+gdir.name,size=35)
    plt.savefig(os.path.join(plot_dir, 'issue_today.png'),dpi=200)
    '''
    #plt.savefig(os.path.join(plot_dir, 'issue_1850.pdf'),dpi=200)
    plt.show()

    return
Exemple #9
0
    def test_init_present_time_glacier(self):

        gdirs = up_to_inversion()

        # Inversion Results
        cfg.PARAMS['invert_with_sliding'] = True
        cfg.PARAMS['optimize_thick'] = True
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        assert r1 < 0.1

        cfg.PARAMS['invert_with_sliding'] = False
        cfg.PARAMS['optimize_thick'] = False
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        assert r1 < 0.12

        # Init glacier
        d = gdirs[0].read_pickle('inversion_params')
        fs = d['fs']
        glen_a = d['glen_a']
        for gdir in gdirs:
            flowline.init_present_time_glacier(gdir)
            mb_mod = massbalance.ConstantMassBalance(gdir)
            fls = gdir.read_pickle('model_flowlines')
            model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
                                            fs=fs, glen_a=glen_a)
            _vol = model.volume_km3
            _area = model.area_km2
            if gdir.rgi_id in df.index:
                gldf = df.loc[gdir.rgi_id]
                assert_allclose(gldf['oggm_volume_km3'], _vol, rtol=0.05)
                assert_allclose(gldf['ref_area_km2'], _area, rtol=0.05)
                maxo = max([fl.order for fl in model.fls])
                for fl in model.fls:
                    if len(model.fls) > 1:
                        if fl.order == (maxo-1):
                            self.assertTrue(fl.flows_to is fls[-1])

        # Test the glacier charac
        dfc = utils.glacier_characteristics(gdirs)
        self.assertTrue(np.all(dfc.terminus_type == 'Land-terminating'))
        cc = dfc[['flowline_mean_elev',
                  'tstar_avg_temp_mean_elev']].corr().values[0, 1]
        assert cc < -0.8
        assert np.all(dfc.t_star > 1900)
        assert np.all(dfc.tstar_aar.mean() > 0.5)
Exemple #10
0
    def test_init_present_time_glacier(self):

        gdirs = up_to_inversion()

        # Inversion Results
        cfg.PARAMS['invert_with_sliding'] = True
        cfg.PARAMS['optimize_thick'] = True
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        cfg.PARAMS['invert_with_sliding'] = False
        cfg.PARAMS['optimize_thick'] = False
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        # Init glacier
        d = gdirs[0].read_pickle('inversion_params')
        fs = d['fs']
        glen_a = d['glen_a']
        maxs = cfg.PARAMS['max_shape_param']
        for gdir in gdirs:
            flowline.init_present_time_glacier(gdir)
            mb_mod = massbalance.ConstantMassBalanceModel(gdir)
            fls = gdir.read_pickle('model_flowlines')
            model = flowline.FluxBasedModel(fls,
                                            mb_model=mb_mod,
                                            y0=0.,
                                            fs=fs,
                                            glen_a=glen_a)
            _vol = model.volume_km3
            _area = model.area_km2
            gldf = df.loc[gdir.rgi_id]
            assert_allclose(gldf['oggm_volume_km3'], _vol, rtol=0.03)
            assert_allclose(gldf['ref_area_km2'], _area, rtol=0.03)
            maxo = max([fl.order for fl in model.fls])
            for fl in model.fls:
                self.assertTrue(np.all(fl.bed_shape > 0))
                self.assertTrue(np.all(fl.bed_shape <= maxs))
                if len(model.fls) > 1:
                    if fl.order == (maxo - 1):
                        self.assertTrue(fl.flows_to is fls[-1])
Exemple #11
0
    def test_init_present_time_glacier(self):

        gdirs = up_to_inversion()

        # Inversion Results
        cfg.PARAMS['invert_with_sliding'] = True
        cfg.PARAMS['optimize_thick'] = True
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        cfg.PARAMS['invert_with_sliding'] = False
        cfg.PARAMS['optimize_thick'] = False
        workflow.inversion_tasks(gdirs)

        fpath = os.path.join(cfg.PATHS['working_dir'],
                             'inversion_optim_results.csv')
        df = pd.read_csv(fpath, index_col=0)
        r1 = rmsd(df['ref_volume_km3'], df['oggm_volume_km3'])
        r2 = rmsd(df['ref_volume_km3'], df['vas_volume_km3'])
        self.assertTrue(r1 < r2)

        # Init glacier
        d = gdirs[0].read_pickle('inversion_params')
        fs = d['fs']
        glen_a = d['glen_a']
        maxs = cfg.PARAMS['max_shape_param']
        for gdir in gdirs:
            flowline.init_present_time_glacier(gdir)
            mb_mod = massbalance.TstarMassBalanceModel(gdir)
            fls = gdir.read_pickle('model_flowlines')
            model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
                                            fs=fs, glen_a=glen_a)
            _vol = model.volume_km3
            _area = model.area_km2
            gldf = df.loc[gdir.rgi_id]
            assert_allclose(gldf['oggm_volume_km3'], _vol, rtol=0.03)
            assert_allclose(gldf['ref_area_km2'], _area, rtol=0.03)
            maxo = max([fl.order for fl in model.fls])
            for fl in model.fls:
                self.assertTrue(np.all(fl.bed_shape > 0))
                self.assertTrue(np.all(fl.bed_shape <= maxs))
                if len(model.fls) > 1:
                    if fl.order == (maxo-1):
                        self.assertTrue(fl.flows_to is fls[-1])
Exemple #12
0
def test_multiple_models():

    # test directory
    testdir = os.path.join(get_test_dir(), 'tmp_mdir')
    utils.mkdir(testdir, reset=True)

    # Init
    cfg.initialize()
    cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
    cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
    cfg.PATHS['working_dir'] = testdir
    cfg.PARAMS['baseline_climate'] = 'CUSTOM'
    cfg.PARAMS['trapezoid_lambdas'] = 1
    cfg.PARAMS['border'] = 40
    apply_test_ref_tstars()

    # Get the RGI ID
    hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
    hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'

    gdirs = workflow.init_glacier_directories(hef_rgi)
    workflow.gis_prepro_tasks(gdirs)
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)

    models = []
    for gdir in gdirs:
        flowline.init_present_time_glacier(gdir)
        fls = gdir.read_pickle('model_flowlines')
        models.append(flowline.FlowlineModel(fls))

    fig, ax = plt.subplots()
    graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
    fig.tight_layout()

    shutil.rmtree(testdir)
    return fig
Exemple #13
0
def test_multiple_models():

    # test directory
    testdir = os.path.join(get_test_dir(), 'tmp_mdir')
    utils.mkdir(testdir, reset=True)

    # Init
    cfg.initialize()
    cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
    cfg.PARAMS['optimize_inversion_params'] = True
    cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
    cfg.PATHS['working_dir'] = testdir
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['border'] = 40

    # Get the RGI ID
    hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
    hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'

    gdirs = workflow.init_glacier_regions(hef_rgi)
    workflow.gis_prepro_tasks(gdirs)
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)

    models = []
    for gdir in gdirs:
        flowline.init_present_time_glacier(gdir)
        fls = gdir.read_pickle('model_flowlines')
        models.append(flowline.FlowlineModel(fls))

    fig, ax = plt.subplots()
    graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
    fig.tight_layout()

    shutil.rmtree(testdir)
    return fig
Exemple #14
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('divides_workflow.shp'))
    cfg.PATHS['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oetztal.csv')
    cfg.PATHS['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oetztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oetztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Be sure data is downloaded because lock doesn't work
    cl = utils.get_cru_cl_file()

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_optimized_inversion_params'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        flowline.init_present_time_glacier(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

        # Climate related tasks
        # See if CRU is running
        cfg.PARAMS['temp_use_local_gradient'] = False
        cfg.PATHS['climate_file'] = '~'
        cru_dir = get_demo_file('cru_ts3.23.1901.2014.tmp.dat.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        with warnings.catch_warnings():
            # There is a warning from salem
            warnings.simplefilter("ignore")
            workflow.execute_entity_task(tasks.distribute_cru_style, gdirs)
        tasks.compute_ref_t_stars(gdirs)
        tasks.distribute_t_stars(gdirs)

        # Use histalp for the actual test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oetztal.nc')
        cfg.PATHS['cru_dir'] = '~'
        workflow.climate_tasks(gdirs)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Exemple #15
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('srtm_oeztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oeztal.csv')
    cfg.PATHS['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oeztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oeztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_inversion_params'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        flowline.init_present_time_glacier(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

        # Climate related tasks
        # See if CRU is running
        cfg.PARAMS['temp_use_local_gradient'] = False
        cfg.PATHS['climate_file'] = '~'
        cru_dir = get_demo_file('cru_ts3.23.1901.2014.tmp.dat.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        workflow.climate_tasks(gdirs)

        # Use histalp for the actual test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oeztal.nc')
        cfg.PATHS['cru_dir'] = '~'
        workflow.climate_tasks(gdirs)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Exemple #16
0
    basin.geometry.contains(shpg.Point(x, y))[0]
    for (x, y) in zip(rgidf.CenLon, rgidf.CenLat)
]
rgidf = rgidf.loc[in_bas]
# Store them for later
rgidf.to_file(os.path.join(WORKING_DIR, 'rgi_rofental.shp'))

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

print('Starting OGGM run')
print('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize glacier directories
gdirs = workflow.init_glacier_regions(rgidf)

# Tasks shortcuts - see the next examples for more details
workflow.gis_prepro_tasks(gdirs)
workflow.climate_tasks(gdirs)
workflow.inversion_tasks(gdirs)

# Compile output
print('Compiling output')
utils.compile_glacier_statistics(gdirs)
utils.write_centerlines_to_shape(gdirs)

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
print('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))
def climate_run_fl(rgi_ids,
                   path=True,
                   temp_biases=[0, +0.5, -0.5],
                   use_bias_for_run=False,
                   suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                   tstar=None,
                   nyears=None,
                   **kwargs):
    """Computes 'only' the massbalance in analogy to the `equilibrium_run_...`
    routines, without running the evolution (flowline) model.

    Dataset containing yearly values of specific mass balance is returned.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float
        'Equilibrium year' used for the mass balance calibration.
    nyears: int, optional, default=None
        Number of years for which to compute the random mass balance
    kwargs:
        Additional key word arguments for massbalance model.

    Returns
    -------
    Dataset containing yearly values of specific massbalance.
    """

    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'test_cluster'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # shutil.rmtree(wdir)
    # os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    workflow.execute_entity_task(climate.local_t_star,
                                 gdirs,
                                 tstar=tstar,
                                 bias=0)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    if nyears is None:
        nyears = 10000
    years = np.arange(0, nyears + 1)

    # create dataset
    ds = list()

    # run RandomMassBalance model centered around t*, once without
    # temperature bias and once with positive and negative temperature bias
    # of 0.5 °C each.
    for gdir in gdirs:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)
        kwargs.setdefault('halfsize', 15)
        kwargs.setdefault('mb_model_class', flowline.RandomMassBalance)
        kwargs.setdefault('filename', 'climate_historical')
        kwargs.setdefault('input_filesuffix', '')
        kwargs.setdefault('unique_samples', False)

        ds_ = list()

        fls = gdir.read_pickle('model_flowlines')

        for suffix, temp_bias in zip(suffixes, temp_biases):
            # instance mass balance model
            mb_mod = flowline.MultipleFlowlineMassBalance(gdir, **kwargs)

            if temp_bias is not None:
                # add given temperature bias to mass balance model
                mb_mod.temp_bias = temp_bias

            # create empty container
            spec_mb = list()
            # iterate over all years
            for yr in years:
                spec_mb.append(mb_mod.get_specific_mb(fls=fls, year=yr))

            # add to dataset
            da = xr.DataArray(spec_mb, dims=('year'), coords={'year': years})
            ds_.append(xr.Dataset({'spec_mb': da}))

        ds_ = xr.concat(ds_, pd.Index(temp_biases, name='temp_bias'))
        ds_.coords['rgi_id'] = gdir.rgi_id
        ds.append(ds_)

    ds = xr.concat(ds, 'rgi_id')

    # store datasets
    if path:
        if path is True:
            path = os.path.join(cfg.PATHS['working_dir'], 'mb_output_fl.nc')
        ds.to_netcdf(path)
        # ds_normal.to_netcdf(path[1])

    # return ds, ds_normal
    return ds
def equilibrium_run_fl(rgi_ids,
                       use_random_mb=True,
                       path=True,
                       temp_biases=(0, +0.5, -0.5),
                       use_bias_for_run=False,
                       suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                       tstar=None,
                       **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    flowline model. For details see docstring of `sensitivity_run_vas`.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'test_cluster'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # shutil.rmtree(wdir)
    # os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    workflow.execute_entity_task(climate.local_t_star,
                                 gdirs,
                                 tstar=tstar,
                                 bias=0)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)
    # disregard glaciers exceeding their domain boundaries
    # to not dirsupt the entire run
    kwargs.setdefault('check_for_boundaries', True)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_random_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )
    else:
        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_constant_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )

    ds = list()
    for suffix, temp_bias in zip(suffixes, temp_biases):
        # compile the output for each run and store to file
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       input_filesuffix=suffix,
                                       path=False)
        ds.append(ds_)
    # concat into one dataset with temperature bias as coordinate
    ds = xr.concat(ds, pd.Index(temp_biases, name='temp_bias'))
    # add model type as coordinate
    ds.coords['model'] = 'fl'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # compute mean and sum over all glaciers
    ds_mean = ds.mean(dim='rgi_id')
    ds_mean.coords['rgi_id'] = 'mean'
    ds_sum = ds.sum(dim='rgi_id')
    ds_sum.coords['rgi_id'] = 'sum'
    # add to dataset
    ds = xr.concat([ds, ds_mean, ds_sum], dim='rgi_id')

    # normalize glacier geometries (length/area/volume) with start value
    ds_normal = normalize_ds_with_start(ds)
    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = int(False)
    ds_normal.coords['normalized'] = int(True)

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            mb = 'random' if use_random_mb else 'constant'
            path = os.path.join(cfg.PATHS['working_dir'],
                                'run_output_{}_fl.nc'.format(mb))

        ds.to_netcdf(path)

    return ds
Exemple #19
0
def climate_run_fl(rgi_ids, path=True, temp_biases=[0, +0.5, -0.5],
                   suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                   use_bias_for_run=False, use_default_tstar=True,
                   tstar=None, nyears=None, **kwargs):
    """Computes 'only' the massbalance in analogy to the `equilibrium_run_...`
    routines, without running the (flowline) evolution model.

    Dataset containing yearly values of specific mass balance is returned.

    Note: the task is not parallelized, hence it can take long if many glaciers
    are given. TODO: could/should be fixed sometime...
    TODO: add logging information

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    use_bias_for_run: bool, optional, default=False
        Flag deciding whether or not the mass balance residual is used
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration. Using the
        `ref_tstars.csv` table if not supplied.
    use_default_tstar : bool, optional, default=True
        Flag deciding whether or not to use the default ref_tstar.csv list. If
        `False`the `oggm_ref_tstars_rgi6_histalp.csv` reference table is used.
    nyears: int, optional, default=None
        Number of years for which to compute the random mass balance
    kwargs:
        Additional key word arguments for massbalance model.

    Returns
    -------
    Dataset containing yearly values of specific massbalance.
    """

    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:-1]

    # load default parameter file
    cfg.initialize()

    # get environmental variables for working and output directories
    WORKING_DIR = os.environ["WORKDIR"]
    OUTPUT_DIR = os.environ["OUTDIR"]
    # create working directory
    utils.mkdir(WORKING_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = False

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    if tstar or use_default_tstar:
        # compute mustar from given tstar
        workflow.execute_entity_task(climate.local_t_star, gdirs,
                                     tstar=tstar, bias=0)
    else:
        # compute mustar from the reference table for the flowline model
        # RGI v6 and HISTALP baseline climate
        ref_df = pd.read_csv(utils.get_demo_file('oggm_ref_tstars_rgi6_histalp.csv'))
        workflow.execute_entity_task(climate.local_t_star, gdirs, ref_df=ref_df)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    if nyears is None:
        nyears = 10000
    years = np.arange(0, nyears + 1)

    # create dataset
    ds = list()

    # run RandomMassBalance model centered around t*, once without
    # temperature bias and once with positive and negative temperature bias
    # of 0.5 deg C each.
    for gdir in gdirs:
        kwargs.setdefault('halfsize', 15)
        kwargs.setdefault('mb_model_class', flowline.ConstantMassBalance)
        kwargs.setdefault('filename', 'climate_historical')
        kwargs.setdefault('input_filesuffix', '')

        ds_ = list()

        # open the flowline file if it exists
        try:
            fls = gdir.read_pickle('model_flowlines')
        except:
            # continue with the next glacier or raise exception
            # depending on `continue_on_error` flag
            if cfg.PARAMS['continue_on_error']:
                continue
            else:
                raise

        for suffix, temp_bias in zip(suffixes, temp_biases):
            # instance mass balance model
            try:
                mb_mod = flowline.MultipleFlowlineMassBalance(gdir, **kwargs)
            except:
                # continue with the next glacier or raise exception
                # depending on `continue_on_error` flag
                if cfg.PARAMS['continue_on_error']:
                    continue
                else:
                    raise

            if temp_bias is not None:
                # add given temperature bias to mass balance model
                mb_mod.temp_bias = temp_bias

            # create empty container
            spec_mb = list()
            # iterate over all years
            for yr in years:
                spec_mb.append(mb_mod.get_specific_mb(fls=fls, year=yr))

            # add to dataset
            da = xr.DataArray(spec_mb, dims=('year'), coords={'year': years})
            ds_.append(xr.Dataset({'spec_mb': da}))

        if ds_:
            ds_ = xr.concat(ds_, pd.Index(temp_biases, name='temp_bias'))
            ds_.coords['rgi_id'] = gdir.rgi_id
            ds.append(ds_)

    if ds:
        # combine output from single glaciers into one dataset
        ds = xr.concat(ds, 'rgi_id')

        # store dataset to file
        if path:
            if path is True:
                path = os.path.join(OUTPUT_DIR, 'mb_output_fl.nc')
            ds.to_netcdf(path)

    # return ds
    return ds
Exemple #20
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('srtm_oeztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oeztal.csv')
    cfg.PATHS['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oeztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oeztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_inversion_params'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        flowline.init_present_time_glacier(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

        # Climate related tasks
        # See if CRU is running
        cfg.PARAMS['temp_use_local_gradient'] = False
        cfg.PATHS['climate_file'] = '~'
        cru_dir = get_demo_file('cru_ts3.23.1901.2014.tmp.dat.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        workflow.climate_tasks(gdirs)

        # Use histalp for the actual test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oeztal.nc')
        cfg.PATHS['cru_dir'] = '~'
        workflow.climate_tasks(gdirs)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
    def setup_cache(self):

        setattr(full_workflow.setup_cache, "timeout", 360)

        utils.mkdir(self.testdir, reset=True)
        self.cfg_init()

        # Pre-download other files which will be needed later
        cru.get_cru_cl_file()
        cru.get_cru_file(var='tmp')
        cru.get_cru_file(var='pre')

        # Get the RGI glaciers for the run.
        rgi_list = ['RGI60-01.10299', 'RGI60-11.00897', 'RGI60-18.02342']
        rgidf = utils.get_rgi_glacier_entities(rgi_list)

        # We use intersects
        db = utils.get_rgi_intersects_entities(rgi_list, version='61')
        cfg.set_intersects_db(db)

        # Sort for more efficient parallel computing
        rgidf = rgidf.sort_values('Area', ascending=False)

        # Go - initialize glacier directories
        gdirs = workflow.init_glacier_directories(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.define_glacier_region,
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.compute_downstream_line,
            tasks.compute_downstream_bedshape,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        execute_entity_task(tasks.local_t_star, gdirs)
        execute_entity_task(tasks.mu_star_calibration, gdirs)

        # Inversion tasks
        workflow.inversion_tasks(gdirs)

        # Final preparation for the run
        execute_entity_task(tasks.init_present_time_glacier, gdirs)

        # Random climate representative for the tstar climate, without bias
        # In an ideal world this would imply that the glaciers remain stable,
        # but it doesn't have to be so
        execute_entity_task(tasks.run_constant_climate, gdirs,
                            bias=0, nyears=100,
                            output_filesuffix='_tstar')

        execute_entity_task(tasks.run_constant_climate, gdirs,
                            y0=1990, nyears=100,
                            output_filesuffix='_pd')

        # Compile output
        utils.compile_glacier_statistics(gdirs)
        utils.compile_run_output(gdirs, input_filesuffix='_tstar')
        utils.compile_run_output(gdirs, input_filesuffix='_pd')
        utils.compile_climate_input(gdirs)

        return gdirs
def compute_scaling_params(rgi_ids, path=None):
    """ The routine computes scaling parameters by fitting a linear regression
    to the volume/area and volume/length scatter in log-log space, using the
    inversion volume, the RGI area and the longest centerline as "observations"
    Thereby, the following two cases apply:
    - compute only scaling constants, since scaling exponents have a physical
        basis and should not be changed
    - compute only scaling constants and scaling exponents

    Returns parameters in a 2-level dictionary. The upper level differentiates
    between the two cases, the lower level indicates the parameters.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.

    Returns
    -------
    Dictionary containing the computed parameters.

    """
    log.info('Starting scaling parameter computation')

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:-1]

    # load default parameter file
    vascaling.initialize()

    # get environmental variables for working and output directories
    WORKING_DIR = os.environ["WORKDIR"]
    OUTPUT_DIR = os.environ["OUTDIR"]
    # create working directory
    utils.mkdir(WORKING_DIR)
    utils.mkdir(OUTPUT_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # set minimum ice thickness to include in glacier length computation
    # this reduces weird spikes in length records
    cfg.PARAMS['min_ice_thick_for_length'] = 0.1

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = True

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(climate.local_t_star, gdirs)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # create empty dictionary
    params = dict()

    # compute scaling constants for given (fixed) slope
    params['const_only'] = vascaling.get_scaling_constant(gdirs)

    # compute scaling constants and scaling exponent via linear regression
    params['const_expo'] = vascaling.get_scaling_constant_exponent(gdirs)

    # store to file
    if path:
        if not isinstance(path, str):
            # set default path and filename
            path = os.path.join(OUTPUT_DIR, 'scaling_params.json')
        json.dump(params, open(path, 'w'))

    return params