예제 #1
0
    def test_random(self):

        gdirs = up_to_inversion()

        workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
        rand_glac = partial(flowline.random_glacier_evolution, nyears=200,
                            seed=0, filesuffix='_test')
        workflow.execute_entity_task(rand_glac, gdirs)

        for gd in gdirs:

            path = gd.get_filepath('past_model', filesuffix='_test')

            # See that we are running ok
            with flowline.FileModel(path) as model:
                vol = model.volume_km3_ts()
                area = model.area_km2_ts()
                length = model.length_m_ts()

                self.assertTrue(np.all(np.isfinite(vol) & vol != 0.))
                self.assertTrue(np.all(np.isfinite(area) & area != 0.))
                self.assertTrue(np.all(np.isfinite(length) & length != 0.))

        # Test output
        utils.compile_run_output(gdirs, filesuffix='_test')
        path = os.path.join(cfg.PATHS['working_dir'],
                            'run_output_test.nc')
        ds = xr.open_dataset(path)
        assert_allclose(vol, ds.volume.sel(rgi_id=gd.rgi_id) * 1e-9)
        assert_allclose(area, ds.area.sel(rgi_id=gd.rgi_id) * 1e-6)
        assert_allclose(length, ds.length.sel(rgi_id=gd.rgi_id))
예제 #2
0
    def test_run_random_climate(self):
        """ Test the run_random_climate task for a climate based on the
        equilibrium period centred around t*. Additionally a positive and a
        negative temperature bias are tested.

        Returns
        -------

        """
        # let's not use the mass balance bias since we want to reproduce
        # results from mass balance calibration
        cfg.PARAMS['use_bias_for_run'] = False

        # read the Hintereisferner DEM
        hef_file = get_demo_file('Hintereisferner_RGI5.shp')
        entity = gpd.read_file(hef_file).iloc[0]

        # initialize the GlacierDirectory
        gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
        # define the local grid and glacier mask
        gis.define_glacier_region(gdir, entity=entity)
        gis.glacier_masks(gdir)

        # process the given climate file
        climate.process_custom_climate_data(gdir)
        # compute mass balance parameters
        ref_df = cfg.PARAMS['vas_ref_tstars_rgi5_histalp']
        vascaling.local_t_star(gdir, ref_df=ref_df)

        # define some parameters for the random climate model
        nyears = 300
        seed = 1
        temp_bias = 0.5
        # read the equilibirum year used for the mass balance calibration
        t_star = gdir.read_json('vascaling_mustar')['t_star']
        # run model with random climate
        _ = vascaling.run_random_climate(gdir, nyears=nyears, y0=t_star,
                                         seed=seed)
        # run model with positive temperature bias
        _ = vascaling.run_random_climate(gdir, nyears=nyears, y0=t_star,
                                         seed=seed, temperature_bias=temp_bias,
                                         output_filesuffix='_bias_p')
        # run model with negative temperature bias
        _ = vascaling.run_random_climate(gdir, nyears=nyears, y0=t_star,
                                         seed=seed,
                                         temperature_bias=-temp_bias,
                                         output_filesuffix='_bias_n')

        # compile run outputs
        ds = utils.compile_run_output([gdir], input_filesuffix='')
        ds_p = utils.compile_run_output([gdir], input_filesuffix='_bias_p')
        ds_n = utils.compile_run_output([gdir], input_filesuffix='_bias_n')

        # the glacier should not change much under a random climate
        # based on the equilibirum period centered around t*
        assert abs(1 - ds.volume.mean() / ds.volume[0]) < 0.015
        # higher temperatures should result in a smaller glacier
        assert ds.volume.mean() > ds_p.volume.mean()
        # lower temperatures should result in a larger glacier
        assert ds.volume.mean() < ds_n.volume.mean()
예제 #3
0
    def test_run_constant_climate(self):
        """ Test the run_constant_climate task for a climate based on the
        equilibrium period centred around t*. Additionally a positive and a
        negative temperature bias are tested.

        """
        # let's not use the mass balance bias since we want to reproduce
        # results from mass balance calibration
        cfg.PARAMS['use_bias_for_run'] = False

        # read the Hintereisferner DEM
        hef_file = get_demo_file('Hintereisferner_RGI5.shp')
        entity = gpd.read_file(hef_file).iloc[0]

        # initialize the GlacierDirectory
        gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
        # define the local grid and glacier mask
        gis.define_glacier_region(gdir, entity=entity)
        gis.glacier_masks(gdir)

        # process the given climate file
        climate.process_custom_climate_data(gdir)
        # compute mass balance parameters
        ref_df = cfg.PARAMS['vas_ref_tstars_rgi5_histalp']
        vascaling.local_t_star(gdir, ref_df=ref_df)

        # define some parameters for the constant climate model
        nyears = 500
        temp_bias = 0.5
        _ = vascaling.run_constant_climate(gdir, nyears=nyears,
                                           output_filesuffix='')
        _ = vascaling.run_constant_climate(gdir, nyears=nyears,
                                           temperature_bias=+temp_bias,
                                           output_filesuffix='_bias_p')
        _ = vascaling.run_constant_climate(gdir, nyears=nyears,
                                           temperature_bias=-temp_bias,
                                           output_filesuffix='_bias_n')

        # compile run outputs
        ds = utils.compile_run_output([gdir], input_filesuffix='')
        ds_p = utils.compile_run_output([gdir], input_filesuffix='_bias_p')
        ds_n = utils.compile_run_output([gdir], input_filesuffix='_bias_n')

        # the glacier should not change under a constant climate
        # based on the equilibirum period centered around t*
        assert abs(1 - ds.volume.mean() / ds.volume[0]) < 1e-7
        # higher temperatures should result in a smaller glacier
        assert ds.volume.mean() > ds_p.volume.mean()
        # lower temperatures should result in a larger glacier
        assert ds.volume.mean() < ds_n.volume.mean()

        # compute volume change from one year to the next
        dV_p = (ds_p.volume[1:].values - ds_p.volume[:-1].values).flatten()
        dV_n = (ds_n.volume[1:].values - ds_n.volume[:-1].values).flatten()
        # compute relative volume change, with respect to the final volume
        rate_p = abs(dV_p / float(ds_p.volume.values[-1]))
        rate_n = abs(dV_n / float(ds_n.volume.values[-1]))
        # the glacier should be in a new equilibirum for last 300 years
        assert max(rate_p[-300:]) < 0.001
        assert max(rate_n[-300:]) < 0.001
예제 #4
0
    def test_random(self):

        # Fake Reset (all these tests are horribly coded)
        if not os.path.exists(TEST_DIR):
            os.makedirs(TEST_DIR)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)
        gdirs = up_to_inversion()

        workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
        workflow.execute_entity_task(flowline.run_random_climate,
                                     gdirs,
                                     nyears=200,
                                     seed=0,
                                     store_monthly_step=True,
                                     output_filesuffix='_test')

        for gd in gdirs:

            path = gd.get_filepath('model_run', filesuffix='_test')
            # See that we are running ok
            with flowline.FileModel(path) as model:
                vol = model.volume_km3_ts()
                area = model.area_km2_ts()
                length = model.length_m_ts()

                self.assertTrue(np.all(np.isfinite(vol) & vol != 0.))
                self.assertTrue(np.all(np.isfinite(area) & area != 0.))
                self.assertTrue(np.all(np.isfinite(length) & length != 0.))

            ds_diag = gd.get_filepath('model_diagnostics', filesuffix='_test')
            ds_diag = xr.open_dataset(ds_diag)
            df = vol.to_frame('RUN')
            df['DIAG'] = ds_diag.volume_m3.to_series() * 1e-9
            assert_allclose(df.RUN, df.DIAG)
            df = area.to_frame('RUN')
            df['DIAG'] = ds_diag.area_m2.to_series() * 1e-6
            assert_allclose(df.RUN, df.DIAG)
            df = length.to_frame('RUN')
            df['DIAG'] = ds_diag.length_m.to_series()
            assert_allclose(df.RUN, df.DIAG)

        # Test output
        ds = utils.compile_run_output(gdirs, filesuffix='_test')
        assert_allclose(ds_diag.volume_m3, ds.volume.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.area_m2, ds.area.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.length_m, ds.length.sel(rgi_id=gd.rgi_id))
        # Test output
        ds = utils.compile_run_output(gdirs, filesuffix='_test')
        df = ds.volume.sel(rgi_id=gd.rgi_id).to_series().to_frame('OUT')
        df['RUN'] = ds_diag.volume_m3.to_series()
        assert_allclose(df.RUN, df.OUT)
예제 #5
0
    def test_random(self):

        # Fake Reset (all these tests are horribly coded)
        if not os.path.exists(TEST_DIR):
            os.makedirs(TEST_DIR)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)
        gdirs = up_to_inversion()

        workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
        workflow.execute_entity_task(flowline.run_random_climate, gdirs,
                                     nyears=200, seed=0,
                                     store_monthly_step=True,
                                     output_filesuffix='_test')

        for gd in gdirs:

            path = gd.get_filepath('model_run', filesuffix='_test')
            # See that we are running ok
            with flowline.FileModel(path) as model:
                vol = model.volume_km3_ts()
                area = model.area_km2_ts()
                length = model.length_m_ts()

                self.assertTrue(np.all(np.isfinite(vol) & vol != 0.))
                self.assertTrue(np.all(np.isfinite(area) & area != 0.))
                self.assertTrue(np.all(np.isfinite(length) & length != 0.))

            ds_diag = gd.get_filepath('model_diagnostics', filesuffix='_test')
            ds_diag = xr.open_dataset(ds_diag)
            df = vol.to_frame('RUN')
            df['DIAG'] = ds_diag.volume_m3.to_series() * 1e-9
            assert_allclose(df.RUN, df.DIAG)
            df = area.to_frame('RUN')
            df['DIAG'] = ds_diag.area_m2.to_series() * 1e-6
            assert_allclose(df.RUN, df.DIAG)
            df = length.to_frame('RUN')
            df['DIAG'] = ds_diag.length_m.to_series()
            assert_allclose(df.RUN, df.DIAG)

        # Test output
        ds = utils.compile_run_output(gdirs, filesuffix='_test')
        assert_allclose(ds_diag.volume_m3, ds.volume.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.area_m2, ds.area.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.length_m, ds.length.sel(rgi_id=gd.rgi_id))
        # Test output
        ds = utils.compile_run_output(gdirs, filesuffix='_test')
        df = ds.volume.sel(rgi_id=gd.rgi_id).to_series().to_frame('OUT')
        df['RUN'] = ds_diag.volume_m3.to_series()
        assert_allclose(df.RUN, df.OUT)
예제 #6
0
    def test_random(self):

        # Fake Reset (all these tests are horribly coded)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)
        gdirs = up_to_inversion()

        workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
        rand_glac = partial(flowline.random_glacier_evolution,
                            nyears=200,
                            seed=0,
                            filesuffix='_test')
        workflow.execute_entity_task(rand_glac, gdirs)

        for gd in gdirs:

            path = gd.get_filepath('past_model', filesuffix='_test')

            # See that we are running ok
            with flowline.FileModel(path) as model:
                vol = model.volume_km3_ts()
                area = model.area_km2_ts()
                length = model.length_m_ts()

                self.assertTrue(np.all(np.isfinite(vol) & vol != 0.))
                self.assertTrue(np.all(np.isfinite(area) & area != 0.))
                self.assertTrue(np.all(np.isfinite(length) & length != 0.))

        # Test output
        ds = utils.compile_run_output(gdirs, filesuffix='_test')
        assert_allclose(vol, ds.volume.sel(rgi_id=gd.rgi_id) * 1e-9)
        assert_allclose(area, ds.area.sel(rgi_id=gd.rgi_id) * 1e-6)
        assert_allclose(length, ds.length.sel(rgi_id=gd.rgi_id))
예제 #7
0
def single_node_example(run_for_test=False):
    y0 = 2008
    nyears = 100
    halfsize = 0
    mean_years = (2002, 2012)
    mtypes = ['scenew_ctl_3', 'sce_ctl_3']
    outpath = utils.mkdir(os.path.join(cluster_dir, 'Climate_3'))
    gdirs = pre_process_tasks(run_for_test=run_for_test)
    workflow.execute_entity_task(run_my_random_climate, gdirs, nyears=nyears,
                                 y0=y0, seed=1, halfsize=halfsize,
                                 output_filesuffix=f'_origin_hf{halfsize}',
                                 mean_years=mean_years)
    for mtype in mtypes:
        fpath_prcp_diff = os.path.join(data_dir, f'Precip_diff_{mtype}.nc')
        fpath_temp_diff = os.path.join(data_dir, f'T2m_diff_{mtype}.nc')
        workflow.execute_entity_task(run_my_random_climate, gdirs,
                                     nyears=nyears, y0=y0, seed=1,
                                     halfsize=halfsize,
                                     output_filesuffix=f'_exper_{mtype}_hf{halfsize}',
                                     fpath_temp_diff=fpath_temp_diff,
                                     fpath_prcp_diff=fpath_prcp_diff,
                                     mean_years=mean_years)

    output_list = []
    suffixes = [f'_origin_hf{halfsize}', f'_exper_{mtypes[0]}_hf{halfsize}',
                f'_exper_{mtypes[1]}_hf{halfsize}']
    for suffix in suffixes:
        path = os.path.join(outpath, 'result'+suffix+'.nc')
        output_list.append(utils.compile_run_output(gdirs, input_filesuffix=suffix, 
                                                    path=path, use_compression=True))
    
    # TODO: Test!
    a = output_list[0].volume.values
    print(a[-1, 2])
예제 #8
0
    def test_ensemble_workflow(self, case_dir):

        cfg.initialize()
        cfg.PARAMS['prcp_scaling_factor'] = 1.6
        cfg.PATHS['working_dir'] = case_dir
        cfg.PARAMS['use_multiprocessing'] = True
        cfg.PARAMS['store_diagnostic_variables'] = ['volume', 'area']

        # Go - get the pre-processed glacier directories
        rgi_ids = ['RGI60-03.04384']
        gdirs = workflow.init_glacier_directories(
            rgi_ids,
            from_prepro_level=5,
            prepro_base_url=prepro_base_url,
            prepro_border=80,
            prepro_rgi_version='62')
        workflow.execute_entity_task(parse_dt_per_dt, gdirs)

        exp = 'netzero_py2050_fac1.0_decr0.3'
        magicc_file = magicc_dir + exp + '.nc'
        with xr.open_dataset(utils.file_downloader(magicc_file),
                             decode_times=False) as ds:
            ds = ds.load()

        odf = pd.DataFrame()
        for q in ds['quantile'].data:

            df = ds.global_mean_temperature.sel(quantile=q).to_series()

            workflow.execute_entity_task(tasks.run_with_hydro,
                                         gdirs,
                                         run_task=run_from_magicc_data,
                                         magicc_ts=df,
                                         init_model_filesuffix='_historical',
                                         output_filesuffix='_{:.2f}'.format(q))

            ods = utils.compile_run_output(gdirs,
                                           filesuffix='_{:.2f}'.format(q))
            odf[q] = ods.volume.isel(rgi_id=0).to_series()

        odf = odf / odf.iloc[0, 0]

        assert odf.loc[2150].std() > 0.05
        assert_allclose(odf.loc[2010].std(), 0, atol=1e-3)
        assert_allclose(odf.loc[2300].std(), 0, atol=1e-2)

        if DO_PLOT:
            odf.plot(title='Ensemble stuff')
            plt.show()
예제 #9
0
def run_with_job_array(y0, nyears, halfsize, mtype, prcp_prefix=None,
                       temp_prefix=None, run_for_test=False, mean_years=None,
                       output_dir=None, output_filesuffix=None):

    if output_dir is None:
        outpath = utils.mkdir(cluster_dir, reset=False)
    else:
        outpath = utils.mkdir(os.path.join(cluster_dir, output_dir),
                              reset=False)
    gdirs = pre_process_tasks(run_for_test=run_for_test)
    if mtype == 'origin':
        suffix = f'_origin_hf{halfsize}'
        workflow.execute_entity_task(run_my_random_climate, gdirs, 
                                     nyears=nyears, y0=y0, seed=1,
                                     halfsize=halfsize,
                                     output_filesuffix=f'_origin_hf{halfsize}',
                                     mean_years=mean_years)
    else:
        if CLIMATE_DATA == '2':
            mtype = '_' + mtype
        if prcp_prefix:
            fpath_prcp_diff = os.path.join(data_dir, f'{prcp_prefix}{mtype}.nc')
        else:
            fpath_prcp_diff = None

        if temp_prefix:
            fpath_temp_diff = os.path.join(data_dir, f'{temp_prefix}{mtype}.nc')
        else:
            fpath_temp_diff = None

        if output_filesuffix is None:
            output_filesuffix = f'_exper_{mtype}_hf{halfsize}'
        workflow.execute_entity_task(run_my_random_climate, gdirs, nyears=nyears,
                                     y0=y0, seed=1, halfsize=halfsize,
                                     output_filesuffix=output_filesuffix,
                                     mean_years=mean_years,
                                     fpath_temp_diff=fpath_temp_diff,
                                     fpath_prcp_diff=fpath_prcp_diff)

    ds = utils.compile_run_output(gdirs, input_filesuffix=output_filesuffix, path=False)
    # to avoid cluster stull problem report in:
    # https://github.com/OGGM/oggm/pull/1122 and 
    # https://github.com/pydata/xarray/issues/4710
    print(f"Save result{output_filesuffix}.nc")

    ds.load().to_netcdf(path=os.path.join(outpath, 'result'+output_filesuffix+'.nc'))
예제 #10
0
gdirs = workflow.init_glacier_regions(rgidf, from_prepro_level=4)

# Init glaciers
workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

# Runs
nyears = 300
task_names = []

fsuf = '_rdn_tstar_noseed'
log.info('Start experiment ' + fsuf)
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=nyears,
                             output_filesuffix=fsuf)
log.info('Compiling output ' + fsuf + ' ...')
utils.compile_run_output(gdirs, filesuffix=fsuf)
task_names.append('run_random_climate' + fsuf)

fsuf = '_rdn_2000_noseed'
log.info('Start experiment ' + fsuf)
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=nyears, y0=2000,
                             output_filesuffix=fsuf)
log.info('Compiling output ' + fsuf + ' ...')
utils.compile_run_output(gdirs, filesuffix=fsuf)
task_names.append('run_random_climate' + fsuf)

fsuf = '_rdn_2000_tbias_p05_noseed'
log.info('Start experiment ' + fsuf)
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=nyears, y0=2000,
예제 #11
0
    cfg.set_intersects_db(intersects_db)

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = False

    # Module logger
    log = logging.getLogger(__name__)
    log.workflow('Starting run for RGI reg {}'.format(rgi_reg))

    # Go - get the pre-processed glacier directories
    base_url = 'https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/' \
               'exps/thesis_vas/'
    gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3,
                                              prepro_base_url=base_url,
                                              prepro_rgi_version=rgi_version)

    # run vascaling climate tasks
    workflow.execute_entity_task(vascaling.local_t_star, gdirs)
    # adjust mass balance residual with geodetic observations
    vascaling.match_regional_geodetic_mb(gdirs=gdirs, rgi_reg=rgi_reg)
    # prepare historic "spinup"
    workflow.execute_entity_task(vascaling.run_historic_from_climate_data,
                                 gdirs,
                                 ys=2000, ye=2020,
                                 output_filesuffix='_historical_2000')

    utils.compile_run_output(gdirs, input_filesuffix='_historical_2000')

    log.workflow('OGGM Done')

예제 #12
0
def equilibrium_run_vas(rgi_ids,
                        use_random_mb=True,
                        path=True,
                        temp_biases=(0, +0.5, -0.5),
                        use_bias_for_run=False,
                        suffixes=('_bias_zero', '_bias_p', '_bias_n'),
                        tstar=None,
                        vas_c_length_m=None,
                        vas_c_area_m2=None,
                        **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model:
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    vas_c_length_m: float, optional, default=None
        Scaling constant for volume/length scaling
    vas_c_area_m2: float, optional, default=None
        Scaling constant for volume/area scaling
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'test_cluster'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # shutil.rmtree(wdir)
    # os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # change scaling constants for lenght and area
    if vas_c_length_m:
        cfg.PARAMS['vas_c_length_m'] = vas_c_length_m
    if vas_c_area_m2:
        cfg.PARAMS['vas_c_area_m2'] = vas_c_area_m2
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.define_glacier_region, gdirs)
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(vascaling.local_t_star,
                                 gdirs,
                                 tstar=tstar,
                                 bias=0)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each (per default).
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(vascaling.run_random_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)
    else:
        # run ConstantMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each (per default).
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(vascaling.run_constant_climate,
                                         gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix,
                                         **kwargs)

    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all temperature biases/suffixes
    for suffix in suffixes:
        # compile the output for each run
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       input_filesuffix=suffix,
                                       path=False)
        # add to container
        ds.append(ds_)

    # concat the single output datasets into one,
    # with temperature bias as coordinate
    ds = xr.concat(ds, pd.Index(temp_biases, name='temp_bias'))
    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # compute mean and sum over all glaciers
    ds_mean = ds.mean(dim='rgi_id')
    ds_mean.coords['rgi_id'] = 'mean'
    ds_sum = ds.sum(dim='rgi_id')
    ds_sum.coords['rgi_id'] = 'sum'
    # add to dataset
    ds = xr.concat([ds, ds_mean, ds_sum], dim='rgi_id')

    # normalize glacier geometries (length/area/volume) with start value
    ds_normal = normalize_ds_with_start(ds)
    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = int(False)
    ds_normal.coords['normalized'] = int(True)

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            mb = 'random' if use_random_mb else 'constant'
            path = os.path.join(cfg.PATHS['working_dir'],
                                'run_output_{}_vas.nc'.format(mb))

        ds.to_netcdf(path)

    return ds
예제 #13
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      test_ids=None,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      disable_mp=False,
                      params_file=None,
                      elev_bands=False,
                      match_regional_geodetic_mb=False,
                      match_geodetic_mb_per_glacier=False,
                      evolution_model='fl_sia',
                      centerlines_only=False,
                      override_params=None,
                      add_consensus=False,
                      start_level=None,
                      start_base_url=None,
                      max_level=5,
                      ref_tstars_base_url='',
                      logging_level='WORKFLOW',
                      disable_dl_verify=False,
                      dynamic_spinup=False,
                      continue_on_error=True):
    """Generate the preprocessed OGGM glacier directories for this OGGM version

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    ref_tstars_base_url : str
        url where to find the pre-calibrated reference tstar list.
        Required as of v1.4.
    params_file : str
        path to the OGGM parameter file (to override defaults)
    is_test : bool
        to test on a couple of glaciers only!
    test_ids : list
        if is_test: list of ids to process
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    elev_bands : bool
        compute all flowlines based on the Huss&Hock 2015 method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    centerlines_only : bool
        compute all flowlines based on the OGGM centerline(s) method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    match_regional_geodetic_mb : str
        match the regional mass-balance estimates at the regional level
        ('hugonnet': Hugonnet et al., 2020 or 'zemp': Zemp et al., 2019).
    match_geodetic_mb_per_glacier : str
        match the mass-balance estimates at the glacier level
        (currently only 'hugonnet': Hugonnet et al., 2020).
    evolution_model : str
        which geometry evolution model to use: `fl_sia` (default),
        or `massredis` (mass redistribution curve).
    add_consensus : bool
        adds (reprojects) the consensus estimates thickness to the glacier
        directories. With elev_bands=True, the data will also be binned.
    start_level : int
        the pre-processed level to start from (default is to start from
        scratch). If set, you'll need to indicate start_base_url as well.
    start_base_url : str
        the pre-processed base-url to fetch the data from.
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    override_params : dict
        a dict of parameters to override.
    disable_dl_verify : bool
        disable the hash verification of OGGM downloads
    dynamic_spinup: str
        include a dynamic spinup matching 'area' OR 'volume' at the RGI-date
    """

    # Input check
    if max_level not in [1, 2, 3, 4, 5]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4, 5]')

    if start_level is not None:
        if start_level not in [0, 1, 2]:
            raise InvalidParamsError('start_level should be one of [0, 1, 2]')
        if start_level > 0 and start_base_url is None:
            raise InvalidParamsError('With start_level, please also indicate '
                                     'start_base_url')
    else:
        start_level = 0

    if match_regional_geodetic_mb and match_geodetic_mb_per_glacier:
        raise InvalidParamsError(
            'match_regional_geodetic_mb incompatible with '
            'match_geodetic_mb_per_glacier!')

    if match_geodetic_mb_per_glacier and match_geodetic_mb_per_glacier != 'hugonnet':
        raise InvalidParamsError('Currently only `hugonnet` is available for '
                                 'match_geodetic_mb_per_glacier.')

    if evolution_model not in ['fl_sia', 'massredis']:
        raise InvalidParamsError('evolution_model should be one of '
                                 "['fl_sia', 'massredis'].")

    if dynamic_spinup and dynamic_spinup not in ['area', 'volume']:
        raise InvalidParamsError(f"Dynamic spinup option '{dynamic_spinup}' "
                                 "not supported")

    if dynamic_spinup and evolution_model == 'massredis':
        raise InvalidParamsError("Dynamic spinup is not working/tested"
                                 "with massredis!")

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Local paths
    if override_params is None:
        override_params = {}

    utils.mkdir(working_dir)
    override_params['working_dir'] = working_dir

    # Initialize OGGM and set up the run parameters
    cfg.initialize(file=params_file,
                   params=override_params,
                   logging_level=logging_level,
                   future=True)

    if match_geodetic_mb_per_glacier and (cfg.PARAMS['hydro_month_nh'] != 1 or
                                          cfg.PARAMS['hydro_month_sh'] != 1):
        raise InvalidParamsError('We recommend to set hydro_month_nh and sh '
                                 'to 1 for the geodetic MB calibration per '
                                 'glacier.')

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = continue_on_error

    # Check for the integrity of the files OGGM downloads at run time
    # For large files (e.g. using a 1 tif DEM like ALASKA) calculating the hash
    # takes a long time, so deactivating this can make sense
    cfg.PARAMS['dl_verify'] = not disable_dl_verify

    # Other things that make sense
    cfg.PARAMS['store_model_geometry'] = True

    # Log the parameters
    msg = '# OGGM Run parameters:'
    for k, v in cfg.PARAMS.items():
        if type(v) in [pd.DataFrame, dict]:
            continue
        msg += '\n    {}: {}'.format(k, v)
    log.workflow(msg)

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    output_base_dir = os.path.join(output_folder, 'RGI{}'.format(rgi_version),
                                   'b_{:03d}'.format(border))

    # Add a package version file
    utils.mkdir(output_base_dir)
    opath = os.path.join(output_base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)

        # Some RGI input quality checks - this is based on visual checks
        # of large glaciers in the RGI
        ids_to_ice_cap = [
            'RGI60-05.10315',  # huge Greenland ice cap
            'RGI60-03.01466',  # strange thing next to Devon
            'RGI60-09.00918',  # Academy of sciences Ice cap
            'RGI60-09.00969',
            'RGI60-09.00958',
            'RGI60-09.00957',
        ]
        rgidf.loc[rgidf.RGIId.isin(ids_to_ice_cap), 'Form'] = '1'

        # In AA almost all large ice bodies are actually ice caps
        if rgi_reg == '19':
            rgidf.loc[rgidf.Area > 100, 'Form'] = '1'

        # For greenland we omit connectivity level 2
        if rgi_reg == '05':
            rgidf = rgidf.loc[rgidf['Connect'] != 2]
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        if test_ids is not None:
            rgidf = rgidf.loc[rgidf.RGIId.isin(test_ids)]
        else:
            rgidf = rgidf.sample(4)

        if max_level > 2:
            # Also use ref tstars
            utils.apply_test_ref_tstars()

    if max_level > 2 and ref_tstars_base_url:
        workflow.download_ref_tstars(base_url=ref_tstars_base_url)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # L0 - go
    if start_level == 0:
        gdirs = workflow.init_glacier_directories(rgidf,
                                                  reset=True,
                                                  force=True)

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L0', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # L0 OK - compress all in output directory
        log.workflow('L0 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L0')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 0:
            _time_log()
            return
    else:
        gdirs = workflow.init_glacier_directories(
            rgidf,
            reset=True,
            force=True,
            from_prepro_level=start_level,
            prepro_border=border,
            prepro_rgi_version=rgi_version,
            prepro_base_url=start_base_url)

    # L1 - Add dem files
    if start_level == 0:
        if test_topofile:
            cfg.PATHS['dem_file'] = test_topofile

        # Which DEM source?
        if dem_source.upper() == 'ALL':
            # This is the complex one, just do the job and leave
            log.workflow('Running prepro on ALL sources')
            for i, s in enumerate(utils.DEM_SOURCES):
                rs = i == 0
                log.workflow('Running prepro on sources: {}'.format(s))
                gdirs = workflow.init_glacier_directories(rgidf,
                                                          reset=rs,
                                                          force=rs)
                workflow.execute_entity_task(tasks.define_glacier_region,
                                             gdirs,
                                             source=s)
                workflow.execute_entity_task(_rename_dem_folder,
                                             gdirs,
                                             source=s)

            # make a GeoTiff mask of the glacier, choose any source
            workflow.execute_entity_task(gis.rasterio_glacier_mask,
                                         gdirs,
                                         source='ALL')

            # Compress all in output directory
            level_base_dir = os.path.join(output_base_dir, 'L1')
            workflow.execute_entity_task(utils.gdir_to_tar,
                                         gdirs,
                                         delete=False,
                                         base_dir=level_base_dir)
            utils.base_dir_to_tar(level_base_dir)

            _time_log()
            return

        # Force a given source
        source = dem_source.upper() if dem_source else None

        # L1 - go
        workflow.execute_entity_task(tasks.define_glacier_region,
                                     gdirs,
                                     source=source)

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L1', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # L1 OK - compress all in output directory
        log.workflow('L1 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 1:
            _time_log()
            return

    # L2 - Tasks
    if start_level <= 1:
        # Check which glaciers will be processed as what
        if elev_bands:
            gdirs_band = gdirs
            gdirs_cent = []
        elif centerlines_only:
            gdirs_band = []
            gdirs_cent = gdirs
        else:
            # Default is to centerlines_only, but it used to be a mix
            # (e.g. bands for ice caps, etc)
            # I still keep this logic here in case we want to mix again
            gdirs_band = []
            gdirs_cent = gdirs

        log.workflow('Start flowline processing with: '
                     'N centerline type: {}, '
                     'N elev bands type: {}.'
                     ''.format(len(gdirs_cent), len(gdirs_band)))

        # HH2015 method
        workflow.execute_entity_task(tasks.simple_glacier_masks, gdirs_band)

        # Centerlines OGGM
        workflow.execute_entity_task(tasks.glacier_masks, gdirs_cent)

        if add_consensus:
            from oggm.shop.bedtopo import add_consensus_thickness
            workflow.execute_entity_task(add_consensus_thickness, gdirs_band)
            workflow.execute_entity_task(add_consensus_thickness, gdirs_cent)

            # Elev bands with var data
            vn = 'consensus_ice_thickness'
            workflow.execute_entity_task(tasks.elevation_band_flowline,
                                         gdirs_band,
                                         bin_variables=vn)
            workflow.execute_entity_task(
                tasks.fixed_dx_elevation_band_flowline,
                gdirs_band,
                bin_variables=vn)
        else:
            # HH2015 method without it
            task_list = [
                tasks.elevation_band_flowline,
                tasks.fixed_dx_elevation_band_flowline,
            ]
            for task in task_list:
                workflow.execute_entity_task(task, gdirs_band)

        # Centerlines OGGM
        task_list = [
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            workflow.execute_entity_task(task, gdirs_cent)

        # Same for all glaciers
        if border >= 20:
            task_list = [
                tasks.compute_downstream_line,
                tasks.compute_downstream_bedshape,
            ]
            for task in task_list:
                workflow.execute_entity_task(task, gdirs)
        else:
            log.workflow('L2: for map border values < 20, wont compute '
                         'downstream lines.')

        # Glacier stats
        sum_dir = os.path.join(output_base_dir, 'L2', 'summary')
        utils.mkdir(sum_dir)
        opath = os.path.join(sum_dir,
                             'glacier_statistics_{}.csv'.format(rgi_reg))
        utils.compile_glacier_statistics(gdirs, path=opath)

        # And for level 2: shapes
        if len(gdirs_cent) > 0:
            opath = os.path.join(sum_dir, 'centerlines_{}.shp'.format(rgi_reg))
            utils.write_centerlines_to_shape(gdirs_cent,
                                             to_tar=True,
                                             path=opath)

        # L2 OK - compress all in output directory
        log.workflow('L2 done. Writing to tar...')
        level_base_dir = os.path.join(output_base_dir, 'L2')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)
        if max_level == 2:
            _time_log()
            return

    # L3 - Tasks
    sum_dir = os.path.join(output_base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)

    # Climate
    workflow.execute_entity_task(tasks.process_climate_data, gdirs)

    if cfg.PARAMS['climate_qc_months'] > 0:
        workflow.execute_entity_task(tasks.historical_climate_qc, gdirs)

    if match_geodetic_mb_per_glacier:
        utils.get_geodetic_mb_dataframe()  # Small optim to avoid concurrency
        workflow.execute_entity_task(
            tasks.mu_star_calibration_from_geodetic_mb, gdirs)
        workflow.execute_entity_task(tasks.apparent_mb_from_any_mb, gdirs)
    else:
        workflow.execute_entity_task(tasks.local_t_star, gdirs)
        workflow.execute_entity_task(tasks.mu_star_calibration, gdirs)

    # Inversion: we match the consensus
    filter = border >= 20
    workflow.calibrate_inversion_from_consensus(gdirs,
                                                apply_fs_on_mismatch=True,
                                                error_on_mismatch=False,
                                                filter_inversion_output=filter)

    # Do we want to match geodetic estimates?
    # This affects only the bias so we can actually do this *after*
    # the inversion, but we really want to take calving into account here
    if match_regional_geodetic_mb:
        opath = os.path.join(
            sum_dir, 'fixed_geometry_mass_balance_'
            'before_match_{}.csv'.format(rgi_reg))
        utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)
        workflow.match_regional_geodetic_mb(gdirs,
                                            rgi_reg=rgi_reg,
                                            dataset=match_regional_geodetic_mb)

    # We get ready for modelling
    if border >= 20:
        workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)
    else:
        log.workflow(
            'L3: for map border values < 20, wont initialize glaciers '
            'for the run.')
    # Glacier stats
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir,
                         'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)

    # L3 OK - compress all in output directory
    log.workflow('L3 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 3:
        _time_log()
        return
    if border < 20:
        log.workflow('L3: for map border values < 20, wont compute L4 and L5.')
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir_L3 = sum_dir
    sum_dir = os.path.join(output_base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)

    # Copy L3 files for consistency
    for bn in [
            'glacier_statistics', 'climate_statistics',
            'fixed_geometry_mass_balance'
    ]:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Copy mini data to new dir
    mini_base_dir = os.path.join(working_dir, 'mini_perglacier',
                                 'RGI{}'.format(rgi_version),
                                 'b_{:03d}'.format(border))
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=mini_base_dir)

    # L4 OK - compress all in output directory
    log.workflow('L4 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L4')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 mini_gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 4:
        _time_log()
        return

    # L5 - spinup run in mini gdirs
    gdirs = mini_gdirs

    # Get end date. The first gdir might have blown up, try some others
    i = 0
    while True:
        if i >= len(gdirs):
            raise RuntimeError('Found no valid glaciers!')
        try:
            y0 = gdirs[i].get_climate_info()['baseline_hydro_yr_0']
            # One adds 1 because the run ends at the end of the year
            ye = gdirs[i].get_climate_info()['baseline_hydro_yr_1'] + 1
            break
        except BaseException:
            i += 1

    # Which model?
    if evolution_model == 'massredis':
        from oggm.core.flowline import MassRedistributionCurveModel
        evolution_model = MassRedistributionCurveModel
    else:
        from oggm.core.flowline import FluxBasedModel
        evolution_model = FluxBasedModel

    # OK - run
    if dynamic_spinup:
        workflow.execute_entity_task(
            tasks.run_dynamic_spinup,
            gdirs,
            evolution_model=evolution_model,
            minimise_for=dynamic_spinup,
            precision_percent=1,
            output_filesuffix='_dynamic_spinup',
        )
        workflow.execute_entity_task(tasks.run_from_climate_data,
                                     gdirs,
                                     min_ys=y0,
                                     ye=ye,
                                     evolution_model=evolution_model,
                                     init_model_filesuffix='_dynamic_spinup',
                                     output_filesuffix='_hist_spin')
        workflow.execute_entity_task(tasks.merge_consecutive_run_outputs,
                                     gdirs,
                                     input_filesuffix_1='_dynamic_spinup',
                                     input_filesuffix_2='_hist_spin',
                                     output_filesuffix='_historical_spinup',
                                     delete_input=True)

    workflow.execute_entity_task(tasks.run_from_climate_data,
                                 gdirs,
                                 min_ys=y0,
                                 ye=ye,
                                 evolution_model=evolution_model,
                                 output_filesuffix='_historical')

    # Now compile the output
    sum_dir = os.path.join(output_base_dir, 'L5', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, f'historical_run_output_{rgi_reg}.nc')
    utils.compile_run_output(gdirs, path=opath, input_filesuffix='_historical')

    if dynamic_spinup:
        opath = os.path.join(sum_dir,
                             f'historical_spinup_run_output_{rgi_reg}.nc')
        utils.compile_run_output(gdirs,
                                 path=opath,
                                 input_filesuffix='_historical_spinup')

    # Glacier statistics we recompute here for error analysis
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Other stats for consistency
    for bn in ['climate_statistics', 'fixed_geometry_mass_balance']:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Add the extended files
    pf = os.path.join(sum_dir, 'historical_run_output_{}.nc'.format(rgi_reg))
    mf = os.path.join(sum_dir,
                      'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    # This is crucial - extending calving only possible with L3 data!!!
    sf = os.path.join(sum_dir_L3, 'glacier_statistics_{}.csv'.format(rgi_reg))
    opath = os.path.join(
        sum_dir, 'historical_run_output_extended_{}.nc'.format(rgi_reg))
    utils.extend_past_climate_run(past_run_file=pf,
                                  fixed_geometry_mb_file=mf,
                                  glacier_statistics_file=sf,
                                  path=opath)

    # L5 OK - compress all in output directory
    log.workflow('L5 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L5')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)

    _time_log()
def equilibrium_run_fl(rgi_ids,
                       use_random_mb=True,
                       path=True,
                       temp_biases=(0, +0.5, -0.5),
                       use_bias_for_run=False,
                       suffixes=['_bias_zero', '_bias_p', '_bias_n'],
                       store_individual_glaciers=True,
                       store_mean_sum=True,
                       tstar=None,
                       **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    flowline model. For details see docstring of `sensitivity_run_vas`.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_biases: array-like, optional, default=(0, +0.5, -0.5)
        List of temperature biases (float, in degC) for the mass balance model.
    suffixes: array-like, optional, default=['_normal', '_bias_p', '_bias_n']
        Descriptive suffixes corresponding to the given temperature biases.
    tstar: float
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(temp_biases) != len(suffixes):
        raise RuntimeError("Each given temperature bias must have its "
                           "corresponding suffix")

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    WORKING_DIR = os.environ["WORKDIR"]
    utils.mkdir(WORKING_DIR)
    # set path to working directory
    cfg.PATHS['working_dir'] = WORKING_DIR
    # set RGI version and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 120
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = use_bias_for_run

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # sort by area for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    cfg.PARAMS['use_multiprocessing'] = True
    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_directories(rgidf, reset=False, force=True)

    # run gis tasks
    workflow.gis_prepro_tasks(gdirs)
    # run climate tasks
    workflow.execute_entity_task(climate.process_climate_data, gdirs)
    ref_df = pd.read_csv(
        utils.get_demo_file('oggm_ref_tstars_rgi6_histalp.csv'))
    workflow.execute_entity_task(climate.local_t_star,
                                 gdirs,
                                 ref_df=ref_df,
                                 tstar=tstar,
                                 bias=0)
    workflow.execute_entity_task(climate.mu_star_calibration, gdirs)
    # run inversion tasks
    workflow.inversion_tasks(gdirs)
    # finalize preprocessing
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)
    # disregard glaciers exceeding their domain boundaries
    # to not dirsupt the entire run
    kwargs.setdefault('check_for_boundaries', True)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_random_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )
    else:
        # run RandomMassBalance model centered around t*, once without
        # temperature bias and once with positive and negative temperature bias
        # of 0.5 °C each.
        for suffix, temp_bias in zip(suffixes, temp_biases):
            workflow.execute_entity_task(
                flowline.run_constant_climate,
                gdirs,
                temperature_bias=temp_bias,
                output_filesuffix=suffix,
                **kwargs,
            )

    ds = list()
    for suffix, temp_bias in zip(suffixes, temp_biases):
        # compile the output for each run and store to file
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       input_filesuffix=suffix,
                                       path=False)
        ds.append(ds_)

    # concat into one dataset with temperature bias as coordinate
    if ds:
        ds = xr.concat(ds, pd.Index(temp_biases, name='temp_bias'))
        # add model type as coordinate
        ds.coords['model'] = 'fl'
        # add mb model type as coordinate
        ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

        # fill NaN values (which happen for vanished glaciers) with zero
        ds = ds.fillna(0)

        if store_individual_glaciers:
            if store_mean_sum:
                # compute mean and sum over all glaciers
                ds_mean = ds.mean(dim='rgi_id')
                ds_mean.coords['rgi_id'] = 'mean'
                ds_sum = ds.sum(dim='rgi_id')
                ds_sum.coords['rgi_id'] = 'sum'
                # add to dataset
                ds = xr.concat([ds, ds_mean, ds_sum], dim='rgi_id')
            else:
                pass
        else:
            # compute mean and sum over all glaciers
            ds_mean = ds.mean(dim='rgi_id')
            ds_mean.coords['rgi_id'] = 'mean'
            ds_sum = ds.sum(dim='rgi_id')
            ds_sum.coords['rgi_id'] = 'sum'
            # add to dataset
            ds = xr.concat([ds_mean, ds_sum], dim='rgi_id')

        # normalize glacier geometries (length/area/volume) with start value
        ds_normal = normalize_ds_with_start(ds)
        # add coordinate to distinguish between normalized and absolute values
        ds.coords['normalized'] = int(False)
        ds_normal.coords['normalized'] = int(True)

        # combine datasets
        ds = xr.concat([ds, ds_normal], 'normalized')

        # store datasets
        if path:
            if path is True:
                mb = 'random' if use_random_mb else 'constant'
                path = os.path.join(cfg.PATHS['working_dir'],
                                    'run_output_{}_fl.nc'.format(mb))

            ds.to_netcdf(path)

    return ds
예제 #15
0
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

# rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-01.10299'])]

print('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
# -----------------------------------

# you can use the command below to reset your run -- use with caution!
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)

utils.glacier_characteristics(gdirs)
utils.compile_run_output(gdirs, filesuffix='_fromzero')
utils.compile_run_output(gdirs, filesuffix='_fromzero_newparams')
utils.compile_run_output(gdirs, filesuffix='_fromtoday')
utils.compile_run_output(gdirs, filesuffix='_fromtoday_newparams')

exit()

# Prepro tasks
task_list = [
    # tasks.glacier_masks,
    # tasks.compute_centerlines,
    # tasks.compute_downstream_lines,
    # tasks.initialize_flowlines,
    # tasks.compute_downstream_bedshape,
    # tasks.catchment_area,
    # tasks.catchment_intersections,
예제 #16
0
# Run the spinup simulation - t* climate with a cold temperature bias
execute_entity_task(tasks.run_constant_climate, gdirs,
                    nyears=100, bias=0, temperature_bias=-0.5,
                    output_filesuffix='_spinup')
# Run a past climate run based on this spinup
execute_entity_task(tasks.run_from_climate_data, gdirs,
                    climate_filename='gcm_data',
                    ys=1801, ye=2000,
                    init_model_filesuffix='_spinup',
                    output_filesuffix='_with_spinup')

# Compile output
log.info('Compiling output')
utils.compile_glacier_statistics(gdirs)
ds1 = utils.compile_run_output(gdirs, filesuffix='_no_spinup')
ds2 = utils.compile_run_output(gdirs, filesuffix='_with_spinup')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.info('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))

# Plot
f, ax = plt.subplots(figsize=(9, 4))
(ds1.volume.sum(dim='rgi_id') * 1e-9).plot(ax=ax, label='No spinup')
(ds2.volume.sum(dim='rgi_id') * 1e-9).plot(ax=ax, label='With spinup')
ax.set_ylabel('Volume (km$^3$)')
ax.set_xlabel('')
ax.set_title('Hintereisferner volume under CESM forcing')
plt.legend()
예제 #17
0
    def test_hydro_workflow(self, case_dir):

        cfg.initialize()
        cfg.PARAMS['prcp_scaling_factor'] = 1.6
        cfg.PATHS['working_dir'] = case_dir
        cfg.PARAMS['use_multiprocessing'] = True
        cfg.PARAMS['store_diagnostic_variables'] = ALL_DIAGS

        # Go - get the pre-processed glacier directories
        rgi_ids = ['RGI60-14.06794', 'RGI60-11.00897']
        gdirs = workflow.init_glacier_directories(
            rgi_ids,
            from_prepro_level=5,
            prepro_base_url=prepro_base_url,
            prepro_border=80,
            prepro_rgi_version='62')
        workflow.execute_entity_task(parse_dt_per_dt, gdirs)

        exp = 'netzero_py2050_fac1.0_decr0.3'
        magicc_file = magicc_dir + exp + '.nc'
        with xr.open_dataset(utils.file_downloader(magicc_file),
                             decode_times=False) as ds:
            ds = ds.load()
        df = ds['ens_avg'].to_dataframe()

        workflow.execute_entity_task(tasks.run_with_hydro,
                                     gdirs,
                                     ref_area_from_y0=True,
                                     run_task=run_from_magicc_data,
                                     magicc_ts=df['ens_avg'],
                                     use_dp_per_dt=False,
                                     init_model_filesuffix='_historical',
                                     output_filesuffix='_' + exp)

        ds = utils.compile_run_output(gdirs, filesuffix='_' + exp)

        for rgi_id in rgi_ids:
            odf = ds.sel(rgi_id=rgi_id).to_dataframe().iloc[:-1]

            # Sanity checks
            # Tot prcp here is constant (constant climate)
            odf['tot_prcp'] = (odf['liq_prcp_off_glacier'] +
                               odf['liq_prcp_on_glacier'] +
                               odf['snowfall_off_glacier'] +
                               odf['snowfall_on_glacier'])
            assert_allclose(odf['tot_prcp'],
                            odf['tot_prcp'].iloc[0],
                            rtol=1e-4)

            # Glacier area is the same (remove on_area?)
            assert_allclose(odf['on_area'], odf['area'])

            # Our MB is the same as the glacier dyn one
            reconstructed_vol = (
                odf['model_mb'].cumsum() / cfg.PARAMS['ice_density'] +
                odf['volume'].iloc[0])
            assert_allclose(odf['volume'].iloc[1:],
                            reconstructed_vol.iloc[:-1],
                            atol=1e-2)

            # Mass-conservation
            odf['runoff'] = (odf['melt_on_glacier'] + odf['melt_off_glacier'] +
                             odf['liq_prcp_on_glacier'] +
                             odf['liq_prcp_off_glacier'])

            mass_in_glacier_end = odf['volume'].iloc[-1] * cfg.PARAMS[
                'ice_density']
            mass_in_glacier_start = odf['volume'].iloc[0] * cfg.PARAMS[
                'ice_density']

            mass_in_snow = odf['snow_bucket'].iloc[-1]
            mass_in = odf['tot_prcp'].iloc[:-1].sum()
            mass_out = odf['runoff'].iloc[:-1].sum()
            assert_allclose(mass_in_glacier_end,
                            mass_in_glacier_start + mass_in - mass_out -
                            mass_in_snow,
                            atol=1e-2)  # 0.01 kg is OK as numerical error

            # Qualitative assessments
            assert odf['melt_on_glacier'].iloc[
                -1] < odf['melt_on_glacier'].iloc[0] * 0.7
            assert odf['liq_prcp_off_glacier'].iloc[-1] > odf[
                'liq_prcp_on_glacier'].iloc[-1]
            assert odf['liq_prcp_off_glacier'].iloc[0] < odf[
                'liq_prcp_on_glacier'].iloc[0]

            # Residual MB should not be crazy large
            frac = odf['residual_mb'] / odf['runoff']
            assert_allclose(frac, 0, atol=0.13)

            if DO_PLOT:
                plt.figure()
                odf[['volume']].plot(title=rgi_id)

                plt.figure()
                odf[[
                    'melt_on_glacier', 'melt_off_glacier',
                    'liq_prcp_on_glacier', 'liq_prcp_off_glacier'
                ]].plot.area(title=rgi_id)

                plt.figure()
                frac.plot(title=rgi_id)

                plt.show()
예제 #18
0
    )

# DataFrames to store modelling results
rcp26_result = pd.DataFrame()
rcp45_result = pd.DataFrame()
rcp60_result = pd.DataFrame()
rcp85_result = pd.DataFrame()

# plot model results
for gdir in gdirs:

    # plot modelling results
    #    f, ax1 = plt.subplots(1, 1, figsize=(14, 4))
    for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
        rid = '_CCSM4_{}'.format(rcp)
        ds = utils.compile_run_output(gdir, input_filesuffix=rid)
        # store modelling results to dataframe
        temp_df = ds.to_dataframe()
        temp_df = temp_df[['calendar_year', 'volume', 'area',
                           'length']]  # keep only relevant columns
        if rcp == 'rcp26':
            rcp26_result = rcp26_result.append(temp_df)
        elif rcp == 'rcp45':
            rcp45_result = rcp45_result.append(temp_df)
        elif rcp == 'rcp60':
            rcp60_result = rcp60_result.append(temp_df)
        elif rcp == 'rcp85':
            rcp85_result = rcp85_result.append(temp_df)

#        ds.isel(rgi_id=0).volume.plot(ax=ax1, label=rcp);
#ds.isel(rgi_id=1).volume.plot(ax=ax2, label=rcp);
예제 #19
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      test_ids=None,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      disable_mp=False,
                      params_file=None,
                      elev_bands=False,
                      match_geodetic_mb=False,
                      centerlines_only=False,
                      add_consensus=False,
                      max_level=5,
                      logging_level='WORKFLOW',
                      disable_dl_verify=False):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    params_file : str
        path to the OGGM parameter file (to override defaults)
    is_test : bool
        to test on a couple of glaciers only!
    test_ids : list
        if is_test: list of ids to process
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    elev_bands : bool
        compute all flowlines based on the Huss&Hock 2015 method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    centerlines_only : bool
        compute all flowlines based on the OGGM centerline(s) method instead
        of the OGGM default, which is a mix of elev_bands and centerlines.
    match_geodetic_mb : bool
        match the regional mass-balance estimates at the regional level
        (currently Hugonnet et al., 2020).
    add_consensus : bool
        adds (reprojects) the consensus estimates thickness to the glacier
        directories. With elev_bands=True, the data will also be binned.
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    disable_dl_verify : bool
        disable the hash verification of OGGM downloads
    """

    # TODO: temporarily silence Fiona and other deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Input check
    if max_level not in [1, 2, 3, 4, 5]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4, 5]')

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Config Override Params
    params = {}

    # Local paths
    utils.mkdir(working_dir)
    params['working_dir'] = working_dir

    # Initialize OGGM and set up the run parameters
    cfg.initialize(file=params_file,
                   params=params,
                   logging_level=logging_level,
                   future=True)

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # Check for the integrity of the files OGGM downloads at run time
    # For large files (e.g. using a 1 tif DEM like ALASKA) calculating the hash
    # takes a long time, so deactivating this can make sense
    cfg.PARAMS['dl_verify'] = not disable_dl_verify

    # Log the parameters
    msg = '# OGGM Run parameters:'
    for k, v in cfg.PARAMS.items():
        if type(v) in [pd.DataFrame, dict]:
            continue
        msg += '\n    {}: {}'.format(k, v)
    log.workflow(msg)

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    output_base_dir = os.path.join(output_folder, 'RGI{}'.format(rgi_version),
                                   'b_{:03d}'.format(border))

    # Add a package version file
    utils.mkdir(output_base_dir)
    opath = os.path.join(output_base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)

        # Some RGI input quality checks - this is based on visual checks
        # of large glaciers in the RGI
        ids_to_ice_cap = [
            'RGI60-05.10315',  # huge Greenland ice cap
            'RGI60-03.01466',  # strange thing next to Devon
            'RGI60-09.00918',  # Academy of sciences Ice cap
            'RGI60-09.00969',
            'RGI60-09.00958',
            'RGI60-09.00957',
        ]
        rgidf.loc[rgidf.RGIId.isin(ids_to_ice_cap), 'Form'] = '1'

        # In AA almost all large ice bodies are actually ice caps
        if rgi_reg == '19':
            rgidf.loc[rgidf.Area > 100, 'Form'] = '1'

        # For greenland we omit connectivity level 2
        if rgi_reg == '05':
            rgidf = rgidf.loc[rgidf['Connect'] != 2]
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        if test_ids is not None:
            rgidf = rgidf.loc[rgidf.RGIId.isin(test_ids)]
        else:
            rgidf = rgidf.sample(4)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # L0 - go
    gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L0', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L0 OK - compress all in output directory
    log.workflow('L0 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L0')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 0:
        _time_log()
        return

    # L1 - Add dem files
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Which DEM source?
    if dem_source.upper() == 'ALL':
        # This is the complex one, just do the job and leave
        log.workflow('Running prepro on ALL sources')
        for i, s in enumerate(utils.DEM_SOURCES):
            rs = i == 0
            log.workflow('Running prepro on sources: {}'.format(s))
            gdirs = workflow.init_glacier_directories(rgidf,
                                                      reset=rs,
                                                      force=rs)
            workflow.execute_entity_task(tasks.define_glacier_region,
                                         gdirs,
                                         source=s)
            workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)

        # make a GeoTiff mask of the glacier, choose any source
        workflow.execute_entity_task(gis.rasterio_glacier_mask,
                                     gdirs,
                                     source='ALL')

        # Compress all in output directory
        level_base_dir = os.path.join(output_base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=level_base_dir)
        utils.base_dir_to_tar(level_base_dir)

        _time_log()
        return

    # Force a given source
    source = dem_source.upper() if dem_source else None

    # L1 - go
    workflow.execute_entity_task(tasks.define_glacier_region,
                                 gdirs,
                                 source=source)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    log.workflow('L1 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 1:
        _time_log()
        return

    # L2 - Tasks
    # Check which glaciers will be processed as what
    if elev_bands:
        gdirs_band = gdirs
        gdirs_cent = []
    elif centerlines_only:
        gdirs_band = []
        gdirs_cent = gdirs
    else:
        # Default is to mix
        # Curated list of large (> 50 km2) glaciers that don't run
        # (CFL error) mostly because the centerlines are crap
        # This is a really temporary fix until we have some better
        # solution here
        ids_to_bands = [
            'RGI60-01.13696', 'RGI60-03.01710', 'RGI60-01.13635',
            'RGI60-01.14443', 'RGI60-03.01678', 'RGI60-03.03274',
            'RGI60-01.17566', 'RGI60-03.02849', 'RGI60-01.16201',
            'RGI60-01.14683', 'RGI60-07.01506', 'RGI60-07.01559',
            'RGI60-03.02687', 'RGI60-17.00172', 'RGI60-01.23649',
            'RGI60-09.00077', 'RGI60-03.00994', 'RGI60-01.26738',
            'RGI60-03.00283', 'RGI60-01.16121', 'RGI60-01.27108',
            'RGI60-09.00132', 'RGI60-13.43483', 'RGI60-09.00069',
            'RGI60-14.04404', 'RGI60-17.01218', 'RGI60-17.15877',
            'RGI60-13.30888', 'RGI60-17.13796', 'RGI60-17.15825',
            'RGI60-01.09783'
        ]
        if rgi_reg == '19':
            gdirs_band = gdirs
            gdirs_cent = []
        else:
            gdirs_band = []
            gdirs_cent = []
            for gdir in gdirs:
                if gdir.is_icecap or gdir.rgi_id in ids_to_bands:
                    gdirs_band.append(gdir)
                else:
                    gdirs_cent.append(gdir)

    log.workflow('Start flowline processing with: '
                 'N centerline type: {}, '
                 'N elev bands type: {}.'
                 ''.format(len(gdirs_cent), len(gdirs_band)))

    # HH2015 method
    workflow.execute_entity_task(tasks.simple_glacier_masks, gdirs_band)

    # Centerlines OGGM
    workflow.execute_entity_task(tasks.glacier_masks, gdirs_cent)

    if add_consensus:
        from oggm.shop.bedtopo import add_consensus_thickness
        workflow.execute_entity_task(add_consensus_thickness, gdirs_band)
        workflow.execute_entity_task(add_consensus_thickness, gdirs_cent)

        # Elev bands with var data
        vn = 'consensus_ice_thickness'
        workflow.execute_entity_task(tasks.elevation_band_flowline,
                                     gdirs_band,
                                     bin_variables=vn)
        workflow.execute_entity_task(tasks.fixed_dx_elevation_band_flowline,
                                     gdirs_band,
                                     bin_variables=vn)
    else:
        # HH2015 method without it
        task_list = [
            tasks.elevation_band_flowline,
            tasks.fixed_dx_elevation_band_flowline,
        ]
        for task in task_list:
            workflow.execute_entity_task(task, gdirs_band)

    # HH2015 method
    task_list = [
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs_band)

    # Centerlines OGGM
    task_list = [
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs_cent)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    log.workflow('L2 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 2:
        _time_log()
        return

    # L3 - Tasks
    task_list = [
        tasks.process_climate_data,
        tasks.historical_climate_qc,
        tasks.local_t_star,
        tasks.mu_star_calibration,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Inversion: we match the consensus
    workflow.calibrate_inversion_from_consensus(gdirs,
                                                apply_fs_on_mismatch=True,
                                                error_on_mismatch=False)

    # Do we want to match geodetic estimates?
    # This affects only the bias so we can actually do this *after*
    # the inversion, but we really want to take calving into account here
    if match_geodetic_mb:
        workflow.match_regional_geodetic_mb(gdirs, rgi_reg)

    # We get ready for modelling
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

    # Glacier stats
    sum_dir = os.path.join(output_base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir,
                         'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    utils.compile_fixed_geometry_mass_balance(gdirs, path=opath)

    # L3 OK - compress all in output directory
    log.workflow('L3 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 3:
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir_L3 = sum_dir
    sum_dir = os.path.join(output_base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)

    # Copy L3 files for consistency
    for bn in [
            'glacier_statistics', 'climate_statistics',
            'fixed_geometry_mass_balance'
    ]:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Copy mini data to new dir
    mini_base_dir = os.path.join(working_dir, 'mini_perglacier')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=mini_base_dir)

    # L4 OK - compress all in output directory
    log.workflow('L4 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L4')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 mini_gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)
    if max_level == 4:
        _time_log()
        return

    # L5 - spinup run in mini gdirs
    gdirs = mini_gdirs

    # Get end date. The first gdir might have blown up, try some others
    i = 0
    while True:
        if i >= len(gdirs):
            raise RuntimeError('Found no valid glaciers!')
        try:
            y0 = gdirs[i].get_climate_info()['baseline_hydro_yr_0']
            # One adds 1 because the run ends at the end of the year
            ye = gdirs[i].get_climate_info()['baseline_hydro_yr_1'] + 1
            break
        except BaseException:
            i += 1

    # OK - run
    workflow.execute_entity_task(tasks.run_from_climate_data,
                                 gdirs,
                                 min_ys=y0,
                                 ye=ye,
                                 output_filesuffix='_historical')

    # Now compile the output
    sum_dir = os.path.join(output_base_dir, 'L5', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir,
                         'historical_run_output_{}.nc'.format(rgi_reg))
    utils.compile_run_output(gdirs, path=opath, input_filesuffix='_historical')

    # Glacier statistics we recompute here for error analysis
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Other stats for consistency
    for bn in ['climate_statistics', 'fixed_geometry_mass_balance']:
        ipath = os.path.join(sum_dir_L3, bn + '_{}.csv'.format(rgi_reg))
        opath = os.path.join(sum_dir, bn + '_{}.csv'.format(rgi_reg))
        shutil.copyfile(ipath, opath)

    # Add the extended files
    pf = os.path.join(sum_dir, 'historical_run_output_{}.nc'.format(rgi_reg))
    mf = os.path.join(sum_dir,
                      'fixed_geometry_mass_balance_{}.csv'.format(rgi_reg))
    # This is crucial - extending calving only with L3 data!!!
    sf = os.path.join(sum_dir_L3, 'glacier_statistics_{}.csv'.format(rgi_reg))
    opath = os.path.join(
        sum_dir, 'historical_run_output_extended_{}.nc'.format(rgi_reg))
    utils.extend_past_climate_run(past_run_file=pf,
                                  fixed_geometry_mb_file=mf,
                                  glacier_statistics_file=sf,
                                  path=opath)

    # L5 OK - compress all in output directory
    log.workflow('L5 done. Writing to tar...')
    level_base_dir = os.path.join(output_base_dir, 'L5')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=level_base_dir)
    utils.base_dir_to_tar(level_base_dir)

    _time_log()
예제 #20
0
def run_cmip():
    # Initialize OGGM and set up the default run parameters
    vascaling.initialize(logging_level='WORKFLOW')
    rgi_version = '62'
    cfg.PARAMS['border'] = 80

    # CLUSTER paths
    wdir = os.environ.get('WORKDIR', '')
    cfg.PATHS['working_dir'] = wdir
    outdir = os.environ.get('OUTDIR', '')

    # define the baseline climate CRU or HISTALP
    cfg.PARAMS['baseline_climate'] = 'CRU'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 3
    cfg.PARAMS['temp_melt'] = 0
    cfg.PARAMS['temp_all_solid'] = 4
    cfg.PARAMS['run_mb_calibration'] = False
    # set minimum ice thickness to include in glacier length computation
    # this reduces weird spikes in length records
    cfg.PARAMS['min_ice_thick_for_length'] = 0.1

    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = True

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    # RGI glaciers
    rgi_reg = os.environ.get('OGGM_RGI_REG', '')
    if rgi_reg not in ['{:02d}'.format(r) for r in range(1, 20)]:
        raise RuntimeError('Need an RGI Region')
    rgi_ids = gpd.read_file(
        utils.get_rgi_region_file(rgi_reg, version=rgi_version))

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_reg)
    cfg.set_intersects_db(intersects_db)

    # operational run, all glaciers should run
    cfg.PARAMS['continue_on_error'] = True

    # Module logger
    log = logging.getLogger(__name__)
    log.workflow('Starting run for RGI reg {}'.format(rgi_reg))

    # Go - get the pre-processed glacier directories
    # base_url = 'https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/' \
    #            'L3-L5_files/RGIV62_fleb_qc3_CRU_pcp2.5'
    prepro_dir = '/home/users/moberrauch/run_output/vas_prepro/'
    gdirs = workflow.init_glacier_directories(rgi_ids, from_tar=prepro_dir)

    # # run vascaling climate tasks
    # workflow.execute_entity_task(vascaling.local_t_star, gdirs)
    # # adjust mass balance residual with geodetic observations
    # vascaling.match_regional_geodetic_mb(gdirs=gdirs, rgi_reg=rgi_reg)
    # # prepare historic "spinup"
    # workflow.execute_entity_task(vascaling.run_from_climate_data, gdirs,
    #                              ys=2003, ye=2020,
    #                              output_filesuffix='_historical')

    # read gcm list
    gcms = pd.read_csv('/home/www/oggm/cmip6/all_gcm_list.csv', index_col=0)

    # iterate over all specified gcms
    for gcm in sys.argv[1:]:
        # iterate over all SSPs (Shared Socioeconomic Pathways)
        df1 = gcms.loc[gcms.gcm == gcm]
        for ssp in df1.ssp.unique():
            df2 = df1.loc[df1.ssp == ssp]
            assert len(df2) == 2
            # get temperature projections
            ft = df2.loc[df2['var'] == 'tas'].iloc[0]
            # get precipitation projections
            fp = df2.loc[df2['var'] == 'pr'].iloc[0].path
            rid = ft.fname.replace('_r1i1p1f1_tas.nc', '')
            ft = ft.path

            log.workflow('Starting run for {}'.format(rid))

            workflow.execute_entity_task(
                gcm_climate.process_cmip_data,
                gdirs,
                filesuffix='_' + rid,
                # recognize the climate file for later
                fpath_temp=ft,
                # temperature projections
                fpath_precip=fp,  # precip projections
                year_range=('1981', '2020'))

            workflow.execute_entity_task(vascaling.run_from_climate_data,
                                         gdirs,
                                         climate_filename='gcm_data',
                                         climate_input_filesuffix='_' + rid,
                                         init_model_filesuffix='_historical',
                                         output_filesuffix=rid,
                                         return_value=False)
            gcm_dir = os.path.join(outdir, 'RGI' + rgi_reg, gcm)
            utils.mkdir(gcm_dir)
            utils.compile_run_output(gdirs,
                                     input_filesuffix=rid,
                                     path=os.path.join(gcm_dir, rid + '.nc'))

    log.workflow('OGGM Done')
예제 #21
0
                             nyears=300,
                             y0=2000,
                             seed=2,
                             temperature_bias=0.5,
                             output_filesuffix='_bias_p')
workflow.execute_entity_task(tasks.run_random_climate,
                             gdirs,
                             nyears=300,
                             y0=2000,
                             seed=3,
                             temperature_bias=-0.5,
                             output_filesuffix='_bias_m')

# Write the compiled output
utils.compile_glacier_statistics(gdirs)
utils.compile_run_output(gdirs, input_filesuffix='_commitment')
utils.compile_run_output(gdirs, input_filesuffix='_bias_p')
utils.compile_run_output(gdirs, input_filesuffix='_bias_m')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))

# Imports
import os
import xarray as xr
import matplotlib.pyplot as plt
from oggm.utils import get_demo_file, gettempdir

# Local working directory (where OGGM wrote its output)
예제 #22
0
# Go - get the pre-processed glacier directories
gdirs = workflow.init_glacier_regions(rgidf, from_prepro_level=4)

# We can step directly to a new experiment!
# Random climate representative for the recent climate (1985-2015)
# This is a kind of "commitment" run
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=1,
                             output_filesuffix='_commitment')
# Now we add a positive and a negative bias to the random temperature series
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=2,
                             temperature_bias=0.5,
                             output_filesuffix='_bias_p')
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=3,
                             temperature_bias=-0.5,
                             output_filesuffix='_bias_m')

# Write the compiled output
utils.compile_glacier_statistics(gdirs)
utils.compile_run_output(gdirs, filesuffix='_commitment')
utils.compile_run_output(gdirs, filesuffix='_bias_p')
utils.compile_run_output(gdirs, filesuffix='_bias_m')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))
예제 #23
0
# Read RGI
rgidf = salem.read_shapefile(
    path.join(WORKING_DIR, 'RGI_example_glaciers', 'RGI_example_glaciers.shp'))
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting OGGM run')
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Initialize from existing directories
gdirs = workflow.init_glacier_regions(rgidf)

# We can step directly to a new experiment!
# Random climate representative for the recent climate (1985-2015)
# This is a kinf of "commitment" run
execute_entity_task(tasks.run_random_climate,
                    gdirs,
                    nyears=200,
                    y0=2000,
                    seed=1,
                    output_filesuffix='_commitment')

# Compile output
log.info('Compiling output')
utils.compile_run_output(gdirs, filesuffix='_commitment')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.info('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))
예제 #24
0
                    bias=0,
                    temperature_bias=-0.5,
                    output_filesuffix='_spinup')
# Run a past climate run based on this spinup
execute_entity_task(tasks.run_from_climate_data,
                    gdirs,
                    climate_filename='gcm_data',
                    ys=1801,
                    ye=2000,
                    init_model_filesuffix='_spinup',
                    output_filesuffix='_with_spinup')

# Compile output
log.info('Compiling output')
utils.compile_glacier_statistics(gdirs)
ds1 = utils.compile_run_output(gdirs, input_filesuffix='_no_spinup')
ds2 = utils.compile_run_output(gdirs, input_filesuffix='_with_spinup')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.info('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))

# Plot
f, ax = plt.subplots(figsize=(9, 4))
(ds1.volume.sum(dim='rgi_id') * 1e-9).plot(ax=ax, label='No spinup')
(ds2.volume.sum(dim='rgi_id') * 1e-9).plot(ax=ax, label='With spinup')
ax.set_ylabel('Volume (km$^3$)')
ax.set_xlabel('')
ax.set_title('Hintereisferner volume under CESM forcing')
plt.legend()
예제 #25
0
def run_benchmark(rgi_version=None,
                  rgi_reg=None,
                  border=None,
                  output_folder='',
                  working_dir='',
                  is_test=False,
                  test_rgidf=None,
                  test_intersects_file=None,
                  test_topofile=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    odf = pd.DataFrame()

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(2)
    _add_time_to_df(odf, 'Read RGI', time.time() - start)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Initialize working directories
    start = time.time()
    gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True)
    _add_time_to_df(odf, 'init_glacier_directories', time.time() - start)

    # Tasks
    task_list = [
        tasks.define_glacier_region,
        tasks.process_cru_data,
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in task_list:
        start = time.time()
        workflow.execute_entity_task(task, gdirs)
        _add_time_to_df(odf, task.__name__, time.time() - start)

    # Runs
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate,
                                 gdirs,
                                 nyears=250,
                                 bias=0,
                                 seed=0,
                                 output_filesuffix='_tstar')
    _add_time_to_df(odf, 'run_random_climate_tstar_250', time.time() - start)

    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate,
                                 gdirs,
                                 nyears=250,
                                 y0=1995,
                                 seed=0,
                                 output_filesuffix='_commit')
    _add_time_to_df(odf, 'run_random_climate_commit_250', time.time() - start)

    # Compile results
    start = time.time()
    utils.compile_glacier_statistics(gdirs)
    _add_time_to_df(odf, 'compile_glacier_statistics', time.time() - start)

    start = time.time()
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=[1920, 1960, 2000])
    _add_time_to_df(odf, 'compile_climate_statistics', time.time() - start)

    start = time.time()
    utils.compile_run_output(gdirs, input_filesuffix='_tstar')
    _add_time_to_df(odf, 'compile_run_output_tstar', time.time() - start)

    start = time.time()
    utils.compile_run_output(gdirs, input_filesuffix='_commit')
    _add_time_to_df(odf, 'compile_run_output_commit', time.time() - start)

    # Log
    opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
    odf.index.name = 'Task'
    odf.to_csv(opath)
    log.workflow('OGGM benchmarks is done!')
예제 #26
0
    def setup_cache(self):

        setattr(full_workflow.setup_cache, "timeout", 360)

        utils.mkdir(self.testdir, reset=True)
        self.cfg_init()

        # Pre-download other files which will be needed later
        utils.get_cru_cl_file()
        utils.get_cru_file(var='tmp')
        utils.get_cru_file(var='pre')

        # Get the RGI glaciers for the run.
        rgi_list = ['RGI60-01.10299', 'RGI60-11.00897', 'RGI60-18.02342']
        rgidf = utils.get_rgi_glacier_entities(rgi_list)

        # We use intersects
        db = utils.get_rgi_intersects_region_file(version='61',
                                                  rgi_ids=rgi_list)
        cfg.set_intersects_db(db)

        # Sort for more efficient parallel computing
        rgidf = rgidf.sort_values('Area', ascending=False)

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.compute_downstream_line,
            tasks.compute_downstream_bedshape,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        execute_entity_task(tasks.local_mustar, gdirs)
        execute_entity_task(tasks.apparent_mb, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.filter_inversion_output, gdirs)

        # Final preparation for the run
        execute_entity_task(tasks.init_present_time_glacier, gdirs)

        # Random climate representative for the tstar climate, without bias
        # In an ideal world this would imply that the glaciers remain stable,
        # but it doesn't have to be so
        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            bias=0,
                            nyears=100,
                            output_filesuffix='_tstar')

        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            y0=1990,
                            nyears=100,
                            output_filesuffix='_pd')

        # Compile output
        utils.glacier_characteristics(gdirs)
        utils.compile_run_output(gdirs, filesuffix='_tstar')
        utils.compile_run_output(gdirs, filesuffix='_pd')
        utils.compile_climate_input(gdirs)

        return gdirs
예제 #27
0
def run_benchmark(rgi_version=None, rgi_reg=None, border=None,
                  output_folder='', working_dir='', is_test=False,
                  test_rgidf=None, test_intersects_file=None,
                  test_topofile=None, test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    odf = pd.DataFrame()

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(2)
    _add_time_to_df(odf, 'Read RGI', time.time()-start)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Initialize working directories
    start = time.time()
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
    _add_time_to_df(odf, 'init_glacier_regions', time.time()-start)

    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    # Tasks
    task_list = [
        tasks.process_cru_data,
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in task_list:
        start = time.time()
        workflow.execute_entity_task(task, gdirs)
        _add_time_to_df(odf, task.__name__, time.time()-start)

    # Runs
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, bias=0, seed=0,
                                 output_filesuffix='_tstar')
    _add_time_to_df(odf, 'run_random_climate_tstar_250', time.time()-start)

    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, y0=1995, seed=0,
                                 output_filesuffix='_commit')
    _add_time_to_df(odf, 'run_random_climate_commit_250', time.time()-start)

    # Compile results
    start = time.time()
    utils.compile_glacier_statistics(gdirs)
    _add_time_to_df(odf, 'compile_glacier_statistics', time.time()-start)

    start = time.time()
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=[1920, 1960, 2000])
    _add_time_to_df(odf, 'compile_climate_statistics', time.time()-start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_tstar')
    _add_time_to_df(odf, 'compile_run_output_tstar', time.time()-start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_commit')
    _add_time_to_df(odf, 'compile_run_output_commit', time.time()-start)

    # Log
    opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
    odf.index.name = 'Task'
    odf.to_csv(opath)
    log.workflow('OGGM benchmarks is done!')
예제 #28
0
    # Module logger
    log = logging.getLogger(__name__)
    log.workflow('Starting run for RGI reg {}'.format(rgi_reg))

    # Go - get the pre-processed glacier directories
    base_url = "https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/" \
               "L3-L5_files/CRU/elev_bands/qc3/pcp2.5/match_geod"
    gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3,
                                              prepro_base_url=base_url,
                                              prepro_rgi_version=rgi_version)

    # run vascaling climate tasks
    workflow.execute_entity_task(vascaling.local_t_star, gdirs)
    # adjust mass balance residual with geodetic observations
    vascaling.match_regional_geodetic_mb(gdirs=gdirs, rgi_reg=rgi_reg)
    # prepare historic "spinup"
    workflow.execute_entity_task(vascaling.run_historic_from_climate_data,
                                 gdirs, ys=2000, ye=2020,
                                 output_filesuffix='_historical')
    # store summary
    outpath = os.path.join(wdir, f'historical_run_output_{rgi_reg}.nc')
    utils.compile_run_output(gdirs, input_filesuffix='_historical',
                             path=outpath)

    # compress all gdirs
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False)
    # compress 1000 bundles
    utils.base_dir_to_tar()

    log.workflow('OGGM Done')
예제 #29
0
cfg.PARAMS['continue_on_error'] = True

# Local working directory (where OGGM will write its output)
WORKING_DIR = utils.gettempdir('OGGM_Errors')
utils.mkdir(WORKING_DIR, reset=True)
cfg.PATHS['working_dir'] = WORKING_DIR

rgi_ids = ['RGI60-11.00897', 'RGI60-11.01450', 'RGI60-11.03295']

log.workflow('Starting OGGM run')
log.workflow('Number of glaciers: {}'.format(len(rgi_ids)))

# Go - get the pre-processed glacier directories
gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=4)

# We can step directly to the experiment!
# Random climate representative for the recent climate (1985-2015)
# with a negative bias added to the random temperature series
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=150, seed=0,
                             temperature_bias=-1)

# Write the compiled output
utils.compile_glacier_statistics(gdirs)
utils.compile_run_output(gdirs)

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))
예제 #30
0
rgidf = rgidf.sort_values('Area', ascending=False)

# rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-01.10299'])]

print('Number of glaciers: {}'.format(len(rgidf)))


# Go - initialize working directories
# -----------------------------------

# you can use the command below to reset your run -- use with caution!
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)

utils.glacier_characteristics(gdirs)
utils.compile_run_output(gdirs, filesuffix='_fromzero')
utils.compile_run_output(gdirs, filesuffix='_fromzero_newparams')
utils.compile_run_output(gdirs, filesuffix='_fromtoday')
utils.compile_run_output(gdirs, filesuffix='_fromtoday_newparams')

exit()

# Prepro tasks
task_list = [
    # tasks.glacier_masks,
    # tasks.compute_centerlines,
    # tasks.compute_downstream_lines,
    # tasks.initialize_flowlines,
    # tasks.compute_downstream_bedshape,
    # tasks.catchment_area,
    # tasks.catchment_intersections,
예제 #31
0
    # Module logger
    log = logging.getLogger(__name__)
    log.workflow('Starting run for RGI reg {}'.format(rgi_reg))

    # Go - get the pre-processed glacier directories
    base_url = 'https://cluster.klima.uni-bremen.de/' \
               '~moberrauch/prepro_vas_paper/'
    gdirs = workflow.init_glacier_directories(rgi_ids,
                                              from_prepro_level=3,
                                              prepro_base_url=base_url,
                                              prepro_rgi_version=rgi_version)

    for temp_bias in np.arange(-0.5, 5.5, .5):
        filesuffix = "bias{:+.1f}".format(temp_bias)
        workflow.execute_entity_task(vascaling.run_constant_climate,
                                     gdirs,
                                     nyears=3000,
                                     y0=2009,
                                     halfsize=10,
                                     temperature_bias=temp_bias,
                                     init_model_filesuffix='_historical',
                                     output_filesuffix=filesuffix,
                                     return_value=False)
        eq_dir = os.path.join(outdir, 'RGI' + rgi_reg)
        utils.mkdir(eq_dir)
        utils.compile_run_output(gdirs,
                                 input_filesuffix=filesuffix,
                                 path=os.path.join(eq_dir, filesuffix + '.nc'))

    log.workflow('OGGM Done')
예제 #32
0
def sensitivity_run_vas_old(rgi_ids, use_random_mb=False, use_mean=True,
                            path=True, temp_bias=0, tstar=None,
                            sensitivity_params=[[(4.5507, 0.191), 1]], suffixes=[''],
                            **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model (cf. `equilibrium_run_vas`) but for only one
    given temperature bias. However, it is possible to supply a list of
    sensitivity parameters (the scaling constants, and time scale factor) to
    alter the model behavior.
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    use_mean: bool, optional, default=True
        Choose between the mean or summation over all glaciers
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_bias: float, optional, default=0
        Temperature bias (degC) for the mass balance model.
    sensitivity_params: multi-dimensional array-like, optional,
        default=[[(4.5507, 0.191), 1]]
        list containing the parameters which are to be varied in the following
        order: float tuple with length and area scaling constant, float as time
        scale factor
    suffixes: array-like, optional, default=['']
        Descriptive suffixes corresponding to the given sensitivity params
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(sensitivity_params) != len(suffixes):
        raise RuntimeError("Each given parameter set must have its "
                           "corresponding suffix")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'sensitivity_vas_wdir'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 80
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = False

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_regions(rgidf)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_histalp_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(vascaling.local_t_star, gdirs,
                                 tstar=tstar, bias=0)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0]
            cfg.PARAMS['vas_c_area_m2'] = params[1]
            kwargs['time_scale_factor'] = params[2]
            workflow.execute_entity_task(vascaling.run_random_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    else:
        # run ConstantMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0][0]
            cfg.PARAMS['vas_c_area_m2'] = params[0][1]
            kwargs['time_scale_factor'] = params[1]
            workflow.execute_entity_task(vascaling.run_constant_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all temperature biases/suffixes
    for suffix, params in zip(suffixes, sensitivity_params):
        # compile the output for each run
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       filesuffix=suffix, path=False)
        # add sensitivity parameters as coordinates
        ds_.coords['length_scaling_const'] = params[0][0]
        ds_.coords['area_scaling_const'] = params[0][1]
        ds_.coords['time_scale_factor'] = params[1]
        # add to container
        ds.append(ds_)

    # concat the single output datasets into one, using 'sensitivity_params'
    # as name fot the new concatenate dimension
    ds = xr.combine_nested(ds, )
    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # normalize glacier geometries (length/area/volume) with start value
    if use_mean:
        # compute average over all glaciers
        ds_normal = normalize_ds_with_start(ds).mean(dim='rgi_id')
        ds = ds.mean(dim='rgi_id')
    else:
        # compute sum over all glaciers
        ds_normal = normalize_ds_with_start(ds.sum(dim='rgi_id'))
        ds = ds.sum(dim='rgi_id')

    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = False
    ds_normal.coords['normalized'] = True

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            path = list()
            mb = 'random' if use_random_mb else 'constant'
            path.append(os.path.join(cfg.PATHS['working_dir'],
                                     'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'normalized_output_{}_vas.nc'.format(mb)))
        ds.to_netcdf(path)
        # ds_normal.to_netcdf(path[1])

    # return ds, ds_normal
    return ds
예제 #33
0
    def test_random(self):

        # Fake Reset (all these tests are horribly coded)
        if not os.path.exists(TEST_DIR):
            os.makedirs(TEST_DIR)
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)
        gdirs = up_to_inversion(reset=False)

        # First tests
        df = utils.compile_glacier_statistics(gdirs)
        df['volume_before_calving_km3'] = df['volume_before_calving'] * 1e-9
        assert np.sum(~df.volume_before_calving.isnull()) == 2
        dfs = df.iloc[:2]
        assert np.all(dfs['volume_before_calving_km3'] < dfs['inv_volume_km3'])
        assert_allclose(df['inv_flowline_glacier_area'] * 1e-6,
                        df['rgi_area_km2'])

        workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
        # Check init_present_time_glacier not messing around too much
        for gd in gdirs:
            from oggm.core.massbalance import LinearMassBalance
            from oggm.core.flowline import FluxBasedModel
            mb_mod = LinearMassBalance(ela_h=2500)
            fls = gd.read_pickle('model_flowlines')
            model = FluxBasedModel(fls, mb_model=mb_mod)
            df.loc[gd.rgi_id, 'start_area_km2'] = model.area_km2
            df.loc[gd.rgi_id, 'start_volume_km3'] = model.volume_km3
            df.loc[gd.rgi_id, 'start_length'] = model.length_m
        assert_allclose(df['rgi_area_km2'], df['start_area_km2'], rtol=0.01)
        assert_allclose(df['rgi_area_km2'].sum(),
                        df['start_area_km2'].sum(),
                        rtol=0.005)
        assert_allclose(df['inv_volume_km3'], df['start_volume_km3'])
        assert_allclose(df['inv_volume_km3'].sum(),
                        df['start_volume_km3'].sum())
        assert_allclose(df['main_flowline_length'], df['start_length'])

        workflow.execute_entity_task(flowline.run_random_climate,
                                     gdirs,
                                     nyears=100,
                                     seed=0,
                                     store_monthly_step=True,
                                     output_filesuffix='_test')

        for gd in gdirs:
            path = gd.get_filepath('model_run', filesuffix='_test')
            # See that we are running ok
            with flowline.FileModel(path) as model:
                vol = model.volume_km3_ts()
                area = model.area_km2_ts()
                length = model.length_m_ts()

                self.assertTrue(np.all(np.isfinite(vol) & vol != 0.))
                self.assertTrue(np.all(np.isfinite(area) & area != 0.))
                self.assertTrue(np.all(np.isfinite(length) & length != 0.))

            ds_diag = gd.get_filepath('model_diagnostics', filesuffix='_test')
            ds_diag = xr.open_dataset(ds_diag)
            df = vol.to_frame('RUN')
            df['DIAG'] = ds_diag.volume_m3.to_series() * 1e-9
            assert_allclose(df.RUN, df.DIAG)
            df = area.to_frame('RUN')
            df['DIAG'] = ds_diag.area_m2.to_series() * 1e-6
            assert_allclose(df.RUN, df.DIAG)
            df = length.to_frame('RUN')
            df['DIAG'] = ds_diag.length_m.to_series()
            assert_allclose(df.RUN, df.DIAG)

        # Test output
        ds = utils.compile_run_output(gdirs, input_filesuffix='_test')
        assert_allclose(ds_diag.volume_m3, ds.volume.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.area_m2, ds.area.sel(rgi_id=gd.rgi_id))
        assert_allclose(ds_diag.length_m, ds.length.sel(rgi_id=gd.rgi_id))
        df = ds.volume.sel(rgi_id=gd.rgi_id).to_series().to_frame('OUT')
        df['RUN'] = ds_diag.volume_m3.to_series()
        assert_allclose(df.RUN, df.OUT)

        # Compare to statistics
        df = utils.compile_glacier_statistics(gdirs)
        df['y0_vol'] = ds.volume.sel(rgi_id=df.index, time=0) * 1e-9
        df['y0_area'] = ds.area.sel(rgi_id=df.index, time=0) * 1e-6
        df['y0_len'] = ds.length.sel(rgi_id=df.index, time=0)
        assert_allclose(df['rgi_area_km2'], df['y0_area'], 0.06)
        assert_allclose(df['inv_volume_km3'], df['y0_vol'], 0.04)
        assert_allclose(df['main_flowline_length'], df['y0_len'])

        # Calving stuff
        assert ds.isel(rgi_id=0).calving[-1] > 0
        assert ds.isel(rgi_id=0).calving_rate[-1] > 0
        assert ds.isel(rgi_id=0).volume_bsl[-1] == 0
        assert ds.isel(rgi_id=0).volume_bwl[-1] > 0
        assert ds.isel(rgi_id=1).calving[-1] > 0
        assert ds.isel(rgi_id=1).calving_rate[-1] > 0
        assert not np.isfinite(ds.isel(rgi_id=1).volume_bsl[-1])
예제 #34
0
    execute_entity_task(task, gdirs)

# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.filter_inversion_output, gdirs)

# Final preparation for the run
execute_entity_task(tasks.init_present_time_glacier, gdirs)

# Random climate representative for the tstar climate, without bias
# In an ideal world this would imply that the glaciers remain stable,
# but it doesn't have to be so
execute_entity_task(tasks.run_random_climate,
                    gdirs,
                    nyears=200,
                    bias=0,
                    seed=1,
                    output_filesuffix='_tstar')

# Compile output
log.info('Compiling output')
utils.glacier_characteristics(gdirs)
utils.compile_run_output(gdirs, filesuffix='_tstar')

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.info('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))
예제 #35
0
            workflow.execute_entity_task(
                gcm_climate.process_cmip_data,
                gdirs,
                filesuffix='_' + rid,
                # recognize the climate file for later
                fpath_temp=ft,
                # temperature projections
                fpath_precip=fp,  # precip projections
                year_range=('1981', '2018'),
            )
            workflow.execute_entity_task(
                tasks.run_from_climate_data,
                gdirs,
                climate_filename='gcm_data',
                # use gcm_data, not climate_historical
                climate_input_filesuffix='_' + rid,
                # use a different scenario
                init_model_filesuffix='_historical',
                # this is important! Start from 2019 glacier
                output_filesuffix=rid,
                # recognize the run for later
                return_value=False,
            )
            gcm_dir = os.path.join(OUTPUT_DIR, 'RGI' + rgi_reg, gcm)
            utils.mkdir(gcm_dir)
            utils.compile_run_output(gdirs,
                                     input_filesuffix=rid,
                                     path=os.path.join(gcm_dir, rid + '.nc'))

    log.workflow('OGGM Done')
예제 #36
0
cfg.PARAMS['continue_on_error'] = True

# Local working directory (where OGGM will write its output)
WORKING_DIR = utils.gettempdir('OGGM_Errors')
utils.mkdir(WORKING_DIR, reset=True)
cfg.PATHS['working_dir'] = WORKING_DIR

rgi_ids = ['RGI60-11.00897', 'RGI60-11.01450', 'RGI60-11.03295']

log.workflow('Starting OGGM run')
log.workflow('Number of glaciers: {}'.format(len(rgi_ids)))

# Go - get the pre-processed glacier directories
gdirs = workflow.init_glacier_regions(rgi_ids, from_prepro_level=4)

# We can step directly to the experiment!
# Random climate representative for the recent climate (1985-2015)
# with a negative bias added to the random temperature series
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=150, seed=0,
                             temperature_bias=-1)

# Write the compiled output
utils.compile_glacier_statistics(gdirs)
utils.compile_run_output(gdirs)

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM is done! Time needed: %d:%02d:%02d' % (h, m, s))