def init_glacier_dirs(workdir, RGI_file_path, error_file_path):

    cfg.initialize()
    cfg.PARAMS['use_multiprocessing'] = False
    cfg.PARAMS['use_tar_shapefiles'] = False
    cfg.PATHS['working_dir'] = workdir
    cfg.PARAMS['border'] = 20

    # Read RGI file
    rgidf = gpd.read_file(RGI_file_path)

    # Run only for Lake Terminating and Marine Terminating
    glac_type = [0]
    keep_glactype = [(i not in glac_type) for i in rgidf.TermType]
    rgidf = rgidf.iloc[keep_glactype]

    # Run only glaciers that have a week connection or are
    # not connected to the ice-sheet
    connection = [2]
    keep_connection = [(i not in connection) for i in rgidf.Connect]
    rgidf = rgidf.iloc[keep_connection]

    # Run glaciers without errors
    de = pd.read_csv(error_file_path)
    keep_indexes = [(i not in de.RGIId.values) for i in rgidf.RGIId]
    rgidf = rgidf.iloc[keep_indexes]

    return workflow.init_glacier_regions(rgidf)
Esempio n. 2
0
def test_multiple_inversion():

    # test directory
    testdir = os.path.join(get_test_dir(), 'tmp_mdir')
    if not os.path.exists(testdir):
        os.makedirs(testdir)

    # Init
    cfg.initialize()
    cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
    cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
    cfg.PARAMS['border'] = 40
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['baseline_climate'] = 'CUSTOM'
    cfg.PATHS['working_dir'] = testdir

    # Get the RGI ID
    hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
    hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'

    gdirs = workflow.init_glacier_regions(hef_rgi)
    workflow.gis_prepro_tasks(gdirs)
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)

    fig, ax = plt.subplots()
    graphics.plot_inversion(gdirs, ax=ax)
    fig.tight_layout()
    shutil.rmtree(testdir)
    return fig
Esempio n. 3
0
def test_ice_cap():

    testdir = os.path.join(get_test_dir(), 'tmp_icecap')
    utils.mkdir(testdir, reset=True)

    cfg.initialize()
    cfg.PARAMS['use_intersects'] = False
    cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-05.08389.tif')
    cfg.PARAMS['border'] = 60
    cfg.PATHS['working_dir'] = testdir

    df = gpd.read_file(get_demo_file('divides_RGI50-05.08389.shp'))
    df['Area'] = df.Area * 1e-6  # cause it was in m2
    df['RGIId'] = ['RGI50-05.08389_d{:02d}'.format(d + 1) for d in df.index]

    gdirs = workflow.init_glacier_regions(df)
    workflow.gis_prepro_tasks(gdirs)

    from salem import mercator_grid, Map
    smap = mercator_grid((gdirs[0].cenlon, gdirs[0].cenlat),
                         extent=[20000, 23000])
    smap = Map(smap)

    fig, ax = plt.subplots()
    graphics.plot_catchment_width(gdirs,
                                  ax=ax,
                                  add_intersects=True,
                                  add_touches=True,
                                  smap=smap)
    fig.tight_layout()
    shutil.rmtree(testdir)
    return fig
Esempio n. 4
0
def up_to_climate(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    if not os.path.exists(CLI_LOGF):
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)

    # Init
    cfg.initialize()

    # Use multiprocessing
    # We don't use mp on TRAVIS because unsure if compatible with test coverage
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oetztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Be sure data is downloaded because lock doesn't work
    cl = utils.get_cru_cl_file()

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_optimized_inversion_params'] = True
    cfg.PARAMS['tstar_search_window'] = [1902, 0]
    cfg.PARAMS['invert_with_rectangular'] = False

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    assert gdirs[14].name == 'Hintereisferner'

    try:
        tasks.catchment_width_correction(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

    return gdirs
Esempio n. 5
0
def get_mean_temps_2k(rgi, return_prcp):
    from oggm import cfg, utils, workflow, tasks
    from oggm.core.massbalance import PastMassBalance

    # Initialize OGGM
    cfg.initialize()
    wd = utils.gettempdir(reset=True)
    cfg.PATHS['working_dir'] = wd
    utils.mkdir(wd, reset=True)
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # and set standard histalp values
    cfg.PARAMS['temp_melt'] = -1.75
    cfg.PARAMS['prcp_scaling_factor'] = 1.75

    gdir = workflow.init_glacier_regions(rgidf=rgi.split('_')[0],
                                         from_prepro_level=3,
                                         prepro_border=10)[0]
    # run histalp climate on glacier!
    tasks.process_histalp_data(gdir)

    f = gdir.get_filepath('climate_historical')
    with utils.ncDataset(f) as nc:
        refhgt = nc.ref_hgt

    mb = PastMassBalance(gdir, check_calib_params=False)

    df = pd.DataFrame()
    df2 = pd.DataFrame()

    for y in np.arange(1870, 2015):
        for i in np.arange(9, 12):
            flyear = utils.date_to_floatyear(y, i)
            tmp = mb.get_monthly_climate([refhgt], flyear)[0]
            df.loc[y, i] = tmp.mean()

        if return_prcp:
            for i in np.arange(3, 6):
                flyear = utils.date_to_floatyear(y, i)
                pcp = mb.get_monthly_climate([refhgt], flyear)[3]
                df2.loc[y, i] = tmp.mean()

    t99 = df.loc[1984:2014, :].mean().mean()
    t85 = df.loc[1870:1900, :].mean().mean()
    t2k = df.loc[1900:2000, :].mean().mean()

    if return_prcp:
        p99 = df2.loc[1984:2014, :].mean().mean()
        p85 = df2.loc[1870:1900, :].mean().mean()
        p2k = df2.loc[1900:2000, :].mean().mean()
        return t85, t99, t2k, p85, p99, p2k

    return t85, t99, t2k
Esempio n. 6
0
def up_to_climate(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    if not os.path.exists(CLI_LOGF):
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = use_multiprocessing()

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oetztal.shp')
    rgidf = gpd.read_file(rgi_file)

    # Be sure data is downloaded
    cl = utils.get_cru_cl_file()

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['optimize_inversion_params'] = True
    cfg.PARAMS['use_optimized_inversion_params'] = True
    cfg.PARAMS['tstar_search_window'] = [1902, 0]
    cfg.PARAMS['invert_with_rectangular'] = False
    cfg.PARAMS['run_mb_calibration'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        tasks.catchment_width_correction(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

    return gdirs
Esempio n. 7
0
def up_to_climate(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    if not os.path.exists(CLI_LOGF):
        with open(CLI_LOGF, 'wb') as f:
            pickle.dump('none', f)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = use_multiprocessing()

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR
    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')
    cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oetztal.shp')
    rgidf = gpd.read_file(rgi_file)

    # Be sure data is downloaded
    utils.get_cru_cl_file()

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['tstar_search_window'] = [1902, 0]
    cfg.PARAMS['run_mb_calibration'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        tasks.catchment_width_correction(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

    return gdirs
Esempio n. 8
0
def init_velocity(workdir):
    cfg.initialize()
    cfg.PARAMS['use_multiprocessing'] = False
    cfg.PATHS['working_dir'] = workdir
    cfg.PARAMS['border'] = 20

    # Read RGI file
    rgidf = gpd.read_file(RGI_FILE)

    # Run only for Marine terminating
    glac_type = [0, 2]
    keep_glactype = [(i not in glac_type) for i in rgidf.TermType]
    rgidf = rgidf.iloc[keep_glactype]

    return workflow.init_glacier_regions(rgidf)
Esempio n. 9
0
    def test_workflow(self):

        # This is a check that the inversion workflow works fine

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        tasks.distribute_t_stars(gdirs)
        execute_entity_task(tasks.apparent_mb, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.volume_inversion, gdirs, glen_a=cfg.A, fs=0)
        execute_entity_task(tasks.filter_inversion_output, gdirs)

        df = utils.glacier_characteristics(gdirs)
        assert df.inv_thickness_m[0] < 100

        if do_plot:
            import matplotlib.pyplot as plt
            from oggm.graphics import plot_inversion
            plot_inversion(gdirs)
            plt.show()
Esempio n. 10
0
    def test_workflow(self):

        # This is a check that the inversion workflow works fine

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_t_star,
            tasks.mu_star_calibration,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.filter_inversion_output, gdirs)

        df = utils.compile_glacier_statistics(gdirs)
        assert df.inv_thickness_m[0] < 100

        if do_plot:
            import matplotlib.pyplot as plt
            from oggm.graphics import plot_inversion
            plot_inversion(gdirs)
            plt.show()
Esempio n. 11
0
def test_chhota_shigri():

    testdir = os.path.join(get_test_dir(), 'tmp_chhota')
    utils.mkdir(testdir, reset=True)

    # Init
    cfg.initialize()
    cfg.PATHS['dem_file'] = get_demo_file('dem_chhota_shigri.tif')
    cfg.PARAMS['border'] = 80
    cfg.PARAMS['use_intersects'] = False
    cfg.PATHS['working_dir'] = testdir

    hef_file = get_demo_file('divides_RGI50-14.15990.shp')
    df = gpd.read_file(hef_file)
    df['Area'] = df.Area * 1e-6  # cause it was in m2
    df['RGIId'] = ['RGI50-14.15990' + d for d in ['_d01', '_d02']]

    gdirs = workflow.init_glacier_regions(df)
    workflow.gis_prepro_tasks(gdirs)
    for gdir in gdirs:
        climate.apparent_mb_from_linear_mb(gdir)
    workflow.execute_entity_task(inversion.prepare_for_inversion, gdirs)
    workflow.execute_entity_task(inversion.volume_inversion,
                                 gdirs,
                                 glen_a=cfg.A,
                                 fs=0)
    workflow.execute_entity_task(inversion.filter_inversion_output, gdirs)
    workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)

    models = []
    for gdir in gdirs:
        flowline.init_present_time_glacier(gdir)
        fls = gdir.read_pickle('model_flowlines')
        models.append(flowline.FlowlineModel(fls))

    fig, ax = plt.subplots()
    graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
    fig.tight_layout()
    shutil.rmtree(testdir)
    return fig
Esempio n. 12
0
def process_cmip_for_merged_glacier(gdir, filesuffix, ft, fp):

    rgi = gdir.rgi_id.split('_')[0]

    rgis = merge_pair_dict(rgi)[0] + [rgi]

    gdirs = init_glacier_regions(rgis, prepro_border=10, from_prepro_level=1)
    execute_entity_task(tasks.process_histalp_data, gdirs)

    execute_entity_task(gcm_climate.process_cmip5_data,
                        gdirs,
                        filesuffix=filesuffix,
                        fpath_temp=ft,
                        fpath_precip=fp)

    for gd in gdirs:
        # copy climate files
        shutil.copyfile(
            gd.get_filepath('gcm_data', filesuffix=filesuffix),
            gdir.get_filepath('gcm_data',
                              filesuffix='_{}{}'.format(gd.rgi_id,
                                                        filesuffix)))
Esempio n. 13
0
def test_multiple_models():

    # test directory
    testdir = os.path.join(get_test_dir(), 'tmp_mdir')
    utils.mkdir(testdir, reset=True)

    # Init
    cfg.initialize()
    cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
    cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
    cfg.PATHS['working_dir'] = testdir
    cfg.PARAMS['baseline_climate'] = 'CUSTOM'
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['border'] = 40

    # Get the RGI ID
    hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
    hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'

    gdirs = workflow.init_glacier_regions(hef_rgi)
    workflow.gis_prepro_tasks(gdirs)
    workflow.climate_tasks(gdirs)
    workflow.inversion_tasks(gdirs)

    models = []
    for gdir in gdirs:
        flowline.init_present_time_glacier(gdir)
        fls = gdir.read_pickle('model_flowlines')
        models.append(flowline.FlowlineModel(fls))

    fig, ax = plt.subplots()
    graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
    fig.tight_layout()

    shutil.rmtree(testdir)
    return fig
Esempio n. 14
0
cfg.initialize()

# Local working directory (where OGGM will write its output)
WORKING_DIR = utils.gettempdir('OGGM_spinup_run')
utils.mkdir(WORKING_DIR, reset=True)
cfg.PATHS['working_dir'] = WORKING_DIR

# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True

# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = 80

# Go - initialize glacier directories
gdirs = workflow.init_glacier_regions(['RGI60-11.00897'], from_prepro_level=4)

# Additional climate file (CESM)
cfg.PATHS['cesm_temp_file'] = get_demo_file('cesm.TREFHT.160001-200512'
                                            '.selection.nc')
cfg.PATHS['cesm_precc_file'] = get_demo_file('cesm.PRECC.160001-200512'
                                             '.selection.nc')
cfg.PATHS['cesm_precl_file'] = get_demo_file('cesm.PRECL.160001-200512'
                                             '.selection.nc')
execute_entity_task(tasks.process_cesm_data, gdirs)

# Run the last 200 years with the default starting point (current glacier)
# and CESM data as input
execute_entity_task(tasks.run_from_climate_data, gdirs,
                    climate_filename='gcm_data',
                    ys=1801, ye=2000,
Esempio n. 15
0
def sensitivity_run_vas_old(rgi_ids, use_random_mb=False, use_mean=True,
                            path=True, temp_bias=0, tstar=None,
                            sensitivity_params=[[(4.5507, 0.191), 1]], suffixes=[''],
                            **kwargs):
    """ The routine runs all steps for the equilibrium experiments using the
    volume/area scaling model (cf. `equilibrium_run_vas`) but for only one
    given temperature bias. However, it is possible to supply a list of
    sensitivity parameters (the scaling constants, and time scale factor) to
    alter the model behavior.
    - OGGM preprocessing, including initialization, GIS tasks, climate tasks and
      massbalance tasks.
    - Run model for all glaciers with constant (or random) massbalance model
      over 3000 years (default value).
    - Process the model output dataset(s), i.e. normalization, average/sum, ...

    The final dataset containing all results is returned. Given a path is is
    also stored to file.

    Parameters
    ----------
    rgi_ids: array-like
        List of RGI IDs for which the equilibrium experiments are performed.
    use_random_mb: bool, optional, default=True
        Choose between random massbalance model and constant massbalance model.
    use_mean: bool, optional, default=True
        Choose between the mean or summation over all glaciers
    path: bool or str, optional, default=True
        If a path is given (or True), the resulting dataset is stored to file.
    temp_bias: float, optional, default=0
        Temperature bias (degC) for the mass balance model.
    sensitivity_params: multi-dimensional array-like, optional,
        default=[[(4.5507, 0.191), 1]]
        list containing the parameters which are to be varied in the following
        order: float tuple with length and area scaling constant, float as time
        scale factor
    suffixes: array-like, optional, default=['']
        Descriptive suffixes corresponding to the given sensitivity params
    tstar: float, optional, default=None
        'Equilibrium year' used for the mass balance calibration.
    kwargs:
        Additional key word arguments for the `run_random_climate` or
        `run_constant_climate` routines of the vascaling module.

    Returns
    -------
    Dataset containing yearly values of all glacier geometries.

    """
    # assert correct output file suffixes for temp biases
    if len(sensitivity_params) != len(suffixes):
        raise RuntimeError("Each given parameter set must have its "
                           "corresponding suffix")

    # OGGM preprocessing
    # ------------------

    # compute RGI region and version from RGI IDs
    # assuming all they are all the same
    rgi_region = (rgi_ids[0].split('-')[-1]).split('.')[0]
    rgi_version = (rgi_ids[0].split('-')[0])[-2:]

    # load default parameter file
    cfg.initialize()

    # create working directory
    wdir = '/Users/oberrauch/work/master/working_directories/'
    wdir += 'sensitivity_vas_wdir'
    if not os.path.exists(wdir):
        os.makedirs(wdir)
    # set path to working directory
    cfg.PATHS['working_dir'] = wdir
    # set RGI verion and region
    cfg.PARAMS['rgi_version'] = rgi_version
    # define how many grid points to use around the glacier,
    # if you expect the glacier to grow large use a larger border
    cfg.PARAMS['border'] = 80
    # we use HistAlp climate data
    cfg.PARAMS['baseline_climate'] = 'HISTALP'
    # set the mb hyper parameters accordingly
    cfg.PARAMS['prcp_scaling_factor'] = 1.75
    cfg.PARAMS['temp_melt'] = -1.75
    # the bias is defined to be zero during the calibration process,
    # which is why we don't use it here to reproduce the results
    cfg.PARAMS['use_bias_for_run'] = False

    # read RGI entry for the glaciers as DataFrame
    # containing the outline area as shapefile
    rgidf = utils.get_rgi_glacier_entities(rgi_ids)

    # get and set path to intersect shapefile
    intersects_db = utils.get_rgi_intersects_region_file(region=rgi_region)
    cfg.set_intersects_db(intersects_db)

    # initialize the GlacierDirectory
    gdirs = workflow.init_glacier_regions(rgidf)

    # define the local grid and glacier mask
    workflow.execute_entity_task(gis.glacier_masks, gdirs)
    # process the given climate file
    workflow.execute_entity_task(climate.process_histalp_data, gdirs)
    # compute local t* and the corresponding mu*
    workflow.execute_entity_task(vascaling.local_t_star, gdirs,
                                 tstar=tstar, bias=0)

    # Run model with constant/random mass balance model
    # -------------------------------------------------

    # use t* as center year, even if specified differently
    kwargs['y0'] = tstar
    # run for 3000 years if not specified otherwise
    kwargs.setdefault('nyears', 3000)

    if use_random_mb:
        # set random seed to get reproducible results
        kwargs.setdefault('seed', 12)

        # run RandomMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0]
            cfg.PARAMS['vas_c_area_m2'] = params[1]
            kwargs['time_scale_factor'] = params[2]
            workflow.execute_entity_task(vascaling.run_random_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    else:
        # run ConstantMassBalance model centered around t* for each given
        # parameter set
        for suffix, params in zip(suffixes, sensitivity_params):
            cfg.PARAMS['vas_c_length_m'] = params[0][0]
            cfg.PARAMS['vas_c_area_m2'] = params[0][1]
            kwargs['time_scale_factor'] = params[1]
            workflow.execute_entity_task(vascaling.run_constant_climate, gdirs,
                                         temperature_bias=temp_bias,
                                         output_filesuffix=suffix, **kwargs)
    # Process output dataset(s)
    # -------------------------

    # create empty container
    ds = list()
    # iterate over all temperature biases/suffixes
    for suffix, params in zip(suffixes, sensitivity_params):
        # compile the output for each run
        ds_ = utils.compile_run_output(np.atleast_1d(gdirs),
                                       filesuffix=suffix, path=False)
        # add sensitivity parameters as coordinates
        ds_.coords['length_scaling_const'] = params[0][0]
        ds_.coords['area_scaling_const'] = params[0][1]
        ds_.coords['time_scale_factor'] = params[1]
        # add to container
        ds.append(ds_)

    # concat the single output datasets into one, using 'sensitivity_params'
    # as name fot the new concatenate dimension
    ds = xr.combine_nested(ds, )
    # add model type as coordinate
    ds.coords['model'] = 'vas'
    # add mb model type as coordinate
    ds.coords['mb_model'] = 'random' if use_random_mb else 'constant'

    # normalize glacier geometries (length/area/volume) with start value
    if use_mean:
        # compute average over all glaciers
        ds_normal = normalize_ds_with_start(ds).mean(dim='rgi_id')
        ds = ds.mean(dim='rgi_id')
    else:
        # compute sum over all glaciers
        ds_normal = normalize_ds_with_start(ds.sum(dim='rgi_id'))
        ds = ds.sum(dim='rgi_id')

    # add coordinate to distinguish between normalized and absolute values
    ds.coords['normalized'] = False
    ds_normal.coords['normalized'] = True

    # combine datasets
    ds = xr.concat([ds, ds_normal], 'normalized')

    # store datasets
    if path:
        if path is True:
            path = list()
            mb = 'random' if use_random_mb else 'constant'
            path.append(os.path.join(cfg.PATHS['working_dir'],
                                     'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'run_output_{}_vas.nc'.format(mb)))
            # path.append(os.path.join(cfg.PATHS['working_dir'],
            #                          'normalized_output_{}_vas.nc'.format(mb)))
        ds.to_netcdf(path)
        # ds_normal.to_netcdf(path[1])

    # return ds, ds_normal
    return ds
Esempio n. 16
0
cfg.PARAMS['border'] = 160

# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
cfg.PARAMS['auto_skip_task'] = False

# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg, version=rgi_version))

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

# Module logger
log = logging.getLogger(__name__)
log.info('Starting run for RGI reg: ' + rgi_reg)
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf, from_prepro_level=0)

# Tasks
workflow.execute_entity_task(tasks.glacier_masks, gdirs)

# End - compress all
workflow.execute_entity_task(utils.gdir_to_tar, gdirs)

# Log
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.info("OGGM is done! Time needed: %02d:%02d:%02d" % (h, m, s))
rgidf = salem.read_shapefile(rgif, cached=True)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

# rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-01.10299'])]

print('Number of glaciers: {}'.format(len(rgidf)))


# Go - initialize working directories
# -----------------------------------

# you can use the command below to reset your run -- use with caution!
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)

utils.glacier_characteristics(gdirs)
utils.compile_run_output(gdirs, filesuffix='_fromzero')
utils.compile_run_output(gdirs, filesuffix='_fromzero_newparams')
utils.compile_run_output(gdirs, filesuffix='_fromtoday')
utils.compile_run_output(gdirs, filesuffix='_fromtoday_newparams')

exit()

# Prepro tasks
task_list = [
    # tasks.glacier_masks,
    # tasks.compute_centerlines,
    # tasks.compute_downstream_lines,
    # tasks.initialize_flowlines,
Esempio n. 18
0
    def test_optimize_inversion(self):

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_mustar,
            tasks.apparent_mb,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Reference data
        gdir = gdirs[0]
        df = self.get_ref_data(gdir)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)

        glen_a = cfg.PARAMS['inversion_glen_a']
        fs = cfg.PARAMS['inversion_fs']

        def to_optimize(x):
            execute_entity_task(tasks.mass_conservation_inversion,
                                gdirs,
                                glen_a=glen_a * x[0],
                                fs=fs * x[1])
            execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)
            with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
                thick = ds.distributed_thickness.isel(x=('z', df['i']),
                                                      y=('z', df['j']))
            return (np.abs(thick - df.thick)).mean()

        opti = optimization.minimize(to_optimize, [1., 1.],
                                     bounds=((0.01, 10), (0.01, 10)),
                                     tol=0.1)
        # Check results and save.
        execute_entity_task(tasks.mass_conservation_inversion,
                            gdirs,
                            glen_a=glen_a * opti['x'][0],
                            fs=0)
        execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)

        with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
            df['oggm'] = ds.distributed_thickness.isel(x=('z', df['i']),
                                                       y=('z', df['j']))
            ds['ref'] = xr.zeros_like(ds.distributed_thickness) * np.NaN
            ds['ref'].data[df['j'], df['i']] = df['thick']

        rmsd = ((df.oggm - df.thick)**2).mean()**.5
        assert rmsd < 60

        dfm = df.mean()
        np.testing.assert_allclose(dfm.thick, dfm.oggm, 10)
        if do_plot:
            import matplotlib.pyplot as plt
            df.plot(kind='scatter', x='oggm', y='thick')
            plt.axis('equal')
            f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
            ds.ref.plot(ax=ax1)
            ds.distributed_thickness.plot(ax=ax2)
            plt.tight_layout()
            plt.show()
Esempio n. 19
0
    def test_mb(self):

        # This is a function to produce the MB function needed by Anna

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        tasks.distribute_t_stars(gdirs)
        execute_entity_task(tasks.apparent_mb, gdirs)

        mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
        demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))

        mbref = mbref.get_vardata()
        mbref[mbref == -9999] = np.NaN
        demref = demref.get_vardata()[np.isfinite(mbref)]
        mbref = mbref[np.isfinite(mbref)] * 1000

        # compute the bias to make it 0 SMB on the 2D DEM
        mbmod = ConstantMassBalance(gdirs[0], bias=0)
        mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * cfg.RHO
        mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
        mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * cfg.RHO
        np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)

        # Same for ref
        mbref = mbref - np.average(mbref)
        np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)

        # Fit poly
        p = np.polyfit(demref, mbref, deg=2)
        poly = np.poly1d(p)
        myfit = poly(demref)
        np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)

        if do_plot:
            import matplotlib.pyplot as plt
            plt.scatter(mbref, demref, s=5, label='Obs (2007-2012), shifted to '
                                                   'Avg(SMB) = 0')
            plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
            plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
            plt.xlabel('MB (mm w.e yr-1)')
            plt.ylabel('Altidude (m)')
            plt.legend()
            plt.show()
Esempio n. 20
0
def run_benchmark(rgi_version=None,
                  rgi_reg=None,
                  border=None,
                  output_folder='',
                  working_dir='',
                  is_test=False,
                  test_rgidf=None,
                  test_intersects_file=None,
                  test_topofile=None,
                  test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    odf = pd.DataFrame()

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(2)
    _add_time_to_df(odf, 'Read RGI', time.time() - start)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Initialize working directories
    start = time.time()
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
    _add_time_to_df(odf, 'init_glacier_regions', time.time() - start)

    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    # Tasks
    task_list = [
        tasks.process_cru_data,
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in task_list:
        start = time.time()
        workflow.execute_entity_task(task, gdirs)
        _add_time_to_df(odf, task.__name__, time.time() - start)

    # Runs
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate,
                                 gdirs,
                                 nyears=250,
                                 bias=0,
                                 seed=0,
                                 output_filesuffix='_tstar')
    _add_time_to_df(odf, 'run_random_climate_tstar_250', time.time() - start)

    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate,
                                 gdirs,
                                 nyears=250,
                                 y0=1995,
                                 seed=0,
                                 output_filesuffix='_commit')
    _add_time_to_df(odf, 'run_random_climate_commit_250', time.time() - start)

    # Compile results
    start = time.time()
    utils.compile_glacier_statistics(gdirs)
    _add_time_to_df(odf, 'compile_glacier_statistics', time.time() - start)

    start = time.time()
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=[1920, 1960, 2000])
    _add_time_to_df(odf, 'compile_climate_statistics', time.time() - start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_tstar')
    _add_time_to_df(odf, 'compile_run_output_tstar', time.time() - start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_commit')
    _add_time_to_df(odf, 'compile_run_output_commit', time.time() - start)

    # Log
    opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
    odf.index.name = 'Task'
    odf.to_csv(opath)
    log.workflow('OGGM benchmarks is done!')
Esempio n. 21
0
def hypsometries(rgi_df, to_file='', job_id='', oggm_working_dir='',
                 set_oggm_params=None):
    """
    Create hypsometries for glacier geometries using the best available DEM.

    We use the same convention as documented in RGIV6: bins of size 50,
    from 0 m a.s.l. to max elevation in 50 m bins.

    The DEM choice and grid resolution is managed by OGGM.

    Parameters
    ----------
    rgi_df : str or geopandas.GeoDataFrame
        the RGI shapefile
    to_file : str, optional
        set to a valid path to write the file on disk
        For this task: the file name should have no ending, as two files
        are written to disk
    job_id : str, optional
        if you want to log what happens, give a name to this job
    oggm_working_dir: str, optional
        path to the folder where oggm will write its GlacierDirectories.
        Default is to use a temporary folder (not recommended)
    set_oggm_params : callable, optional
        a function which sets the desired OGGM parameters
    """

    if to_file:
        _, ext = os.path.splitext(to_file)
        if ext != '':
            raise ValueError('to_file should not have an extension!')
        if os.path.exists(to_file + '.csv'):
            raise RuntimeError("Won't overwrite existing file: " +
                               to_file + '.csv')
        if os.path.exists(to_file + '.shp'):
            raise RuntimeError("Won't overwrite existing file: " +
                               to_file + '.shp')

    from oggm import cfg, workflow, tasks
    cfg.initialize()

    if set_oggm_params is not None:
        set_oggm_params(cfg)

    del_dir = False
    if not oggm_working_dir:
        del_dir = True
        oggm_working_dir = tempfile.mkdtemp()
    cfg.PATHS['working_dir'] = oggm_working_dir

    # Get the DEM job done by OGGM
    cfg.PARAMS['use_intersects'] = False
    cfg.PARAMS['continue_on_error'] = True
    cfg.PARAMS['use_multiprocessing'] = False
    gdirs = workflow.init_glacier_regions(rgi_df)
    workflow.execute_entity_task(tasks.simple_glacier_masks, gdirs)
    compile_glacier_statistics(gdirs,
                               filesuffix='_{}'.format(gdirs[0].rgi_region))

    out_gdf = rgi_df.copy().set_index('RGIId')
    try:
        is_nominal = np.array([int(s[0]) == 2 for s in out_gdf.RGIFlag])
    except AttributeError:
        is_nominal = np.array([int(s) == 2 for s in out_gdf.Status])
    cols = ['Zmed', 'Zmin', 'Zmax', 'Slope', 'Aspect']
    out_gdf.loc[~is_nominal, cols] = np.NaN

    df = pd.DataFrame()
    for gdir in gdirs:

        rid = gdir.rgi_id
        df.loc[rid, 'RGIId'] = gdir.rgi_id
        df.loc[rid, 'GLIMSId'] = gdir.glims_id
        df.loc[rid, 'Area'] = gdir.rgi_area_km2

        if not gdir.has_file('hypsometry') or gdir.is_nominal:
            continue

        idf = pd.read_csv(gdir.get_filepath('hypsometry')).iloc[0]
        for c in idf.index:
            try:
                int(c)
            except ValueError:
                continue
            df.loc[rid, c] = idf[c]

        out_gdf.loc[rid, 'Zmed'] = idf.loc['Zmed']
        out_gdf.loc[rid, 'Zmin'] = idf.loc['Zmin']
        out_gdf.loc[rid, 'Zmax'] = idf.loc['Zmax']
        out_gdf.loc[rid, 'Slope'] = idf.loc['Slope']
        out_gdf.loc[rid, 'Aspect'] = idf.loc['Aspect']

    out_gdf = out_gdf.reset_index()
    df = df.reset_index(drop=True)
    bdf = df[df.columns[3:]].fillna(0).astype(np.int)
    ok = bdf.sum(axis=1)
    bdf.loc[ok < 1000, :] = -9
    df[df.columns[3:]] = bdf

    # Sort columns
    df = df[np.append(df.columns[:3], sorted(df.columns[3:]))]

    if del_dir:
        shutil.rmtree(oggm_working_dir)

    # replace io write
    if to_file:
        out_gdf.crs = wgs84.srs
        out_gdf.to_file(to_file + '.shp')
        df.to_csv(to_file + '_hypso.csv', index=False)

    return df, out_gdf.reset_index()
Esempio n. 22
0
path = utils.get_rgi_region_file(rgi_region, version=rgi_version)
rgidf = gpd.read_file(path)

# Select the glaciers in the Pyrenees
rgidf = rgidf.loc[rgidf['O2Region'] == '2']

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.workflow('Starting OGGM inversion run')
log.workflow('Number of glaciers: {}'.format(len(rgidf)))

# Go - get the pre-processed glacier directories
# We start at level 3, because we need all data for the inversion
gdirs = workflow.init_glacier_regions(rgidf,
                                      from_prepro_level=3,
                                      prepro_border=10)

# Default parameters
# Deformation: from Cuffey and Patterson 2010
glen_a = 2.4e-24
# Sliding: from Oerlemans 1997
fs = 5.7e-20

# Correction factors
factors = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
factors += [1.1, 1.2, 1.3, 1.5, 1.7, 2, 2.5, 3, 4, 5]
factors += [6, 7, 8, 9, 10]

# Run the inversions tasks with the given factors
for f in factors:
Esempio n. 23
0
def configure(workdir, glclist, baselineclimate='HISTALP', resetwd=False):
    global MERGEDICT
    global GLCDICT
    global ADDITIONAL_REFERENCE_GLACIERS

    # Initialize OGGM
    cfg.initialize()
    cfg.PATHS['working_dir'] = workdir

    # Local working directory (where OGGM will write its output)
    utils.mkdir(workdir, reset=resetwd)

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = False

    # We use intersects
    cfg.PARAMS['use_intersects'] = True
    rgif = utils.get_rgi_intersects_region_file('11', version='60')
    cfg.set_intersects_db(rgif)

    cfg.PARAMS['use_rgi_area'] = True

    # set negative flux filtering to false. should be standard soon
    cfg.PARAMS['filter_for_neg_flux'] = False
    cfg.PARAMS['correct_for_neg_flux'] = True

    # here in relic we want to run the mb calibration every time
    cfg.PARAMS['run_mb_calibration'] = True

    # glacier length
    cfg.PARAMS['min_ice_thick_for_length'] = 1.0
    cfg.PARAMS['glacier_length_method'] = 'consecutive'

    # check if we want to merge a glacier
    mglclist = []
    for glc in glclist:
        mglc = merge_pair_dict(glc)
        if mglc is not None:
            mglclist += mglc[0]

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = 160

    gdirs = workflow.init_glacier_regions(glclist + mglclist,
                                          from_prepro_level=3)

    # and we want to use all glaciers for the MB calibration
    refids = get_ref_mb_glaciers_candidates()
    # right now we only do Alpine glaciers
    refids = [rid for rid in refids if '-11.' in rid]
    # but do leave out the actual glaciers
    refids = [rid for rid in refids if rid not in glclist + mglclist]
    # I SAID ALPS, NOT PYRENEES
    refids.remove('RGI60-11.03232')
    refids.remove('RGI60-11.03209')
    refids.remove('RGI60-11.03241')
    # initialize the reference glaciers with a small border
    ref_gdirs = workflow.init_glacier_regions(rgidf=refids,
                                              from_prepro_level=3,
                                              prepro_border=10)
    # save these ids for later
    ADDITIONAL_REFERENCE_GLACIERS = refids

    # climate
    if baselineclimate == 'CRU':
        cfg.PARAMS['prcp_scaling_factor'] = 2.5
        cfg.PARAMS['temp_melt'] = -1.0

    if baselineclimate == 'HISTALP':
        cfg.PARAMS['baseline_climate'] = baselineclimate
        # and set standard histalp values
        cfg.PARAMS['prcp_scaling_factor'] = 1.75
        cfg.PARAMS['temp_melt'] = -1.75
        # run histalp climate on all glaciers!
        execute_entity_task(tasks.process_histalp_data,
                            gdirs + ref_gdirs,
                            y0=1849)

    # TODO: if I do use custom climate stuff like histalp_annual_mean:
    #   ->>>> look back at commits before 1.10.2019

    return gdirs
Esempio n. 24
0
    # add to BASENAMES
    _doc = 'contains observed and searched glacier from synthetic experiment to find intial state'
    cfg.BASENAMES['synthetic_experiment'] = ('synthetic_experiment.pkl', _doc)
    _doc = 'output of reconstruction'
    cfg.BASENAMES['reconstruction_output'] = ('reconstruction_output.pkl',
                                              _doc)

    plt.rcParams['figure.figsize'] = (8, 8)  # Default plot size

    # get rgi file
    rgi = get_demo_file('rgi_oetztal.shp')
    rgidf = salem.read_shapefile(rgi)

    # Initialize working directories
    gdir = workflow.init_glacier_regions(
        rgidf[rgidf.RGIId == 'RGI50-11.00897'])[0]
    #prepare_for_initializing([gdir])

    result = pd.Series()
    fls = gdir.read_pickle('model_flowlines')
    '''
    fls_obs = deepcopy(fls)
    i = 1
    for yr in np.arange(2000,1850,-50):
        t0 = yr-50
        te = yr
        run_optimization(gdir, t0, te, fls_obs)
        df, best = find_best_objective(gdir,fls,t0,te)
        result = result.append(df,ignore_index=True)
        pickle.dump(result, open(os.path.join(gdir.dir,'result_multistep'+str(i)),'wb'))
        fls_obs = df.loc[best,str(t0)].fls
if __name__ == '__main__':

    cfg.initialize()

    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')
    cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oetztal.nc')
    cfg.PATHS[
        'working_dir'] = '/home/juliaeis/PycharmProjects/find_inital_state/test_HEF'
    #cfg.PATHS['working_dir'] = os.environ.get("S_WORKDIR")
    cfg.PARAMS['border'] = 80
    cfg.PARAMS['prcp_scaling_factor']
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['optimize_inversion_params'] = True
    cfg.PARAMS['use_multiprocessing'] = True
    plt.rcParams['figure.figsize'] = (8, 8)  # Default plot size

    rgi = get_demo_file('rgi_oetztal.shp')
    gdirs = workflow.init_glacier_regions(salem.read_shapefile(rgi))
    workflow.execute_entity_task(tasks.glacier_masks, gdirs)

    for gdir in gdirs:
        if gdir.rgi_id.endswith('00897'):
            prepare_for_initializing([gdir])
            find_initial_state(gdir)
    '''
    pool = mp.Pool()
    pool.map(find_initial_state,gdirs)
    pool.close()
    pool.join()
    '''
Esempio n. 26
0
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
                      output_folder='', working_dir='', is_test=False,
                      demo=False, test_rgidf=None, test_intersects_file=None,
                      test_topofile=None, test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Time
    start = time.time()

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    climate_periods = [1920, 1960, 2000]

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    rgi_dir_name = 'RGI{}'.format(rgi_version)
    border_dir_name = 'b_{:03d}'.format(border)
    base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DEMO_GLACIERS.index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(4)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L2 - Tasks
    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    workflow.execute_entity_task(tasks.process_cru_data, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L3 - Tasks
    task_list = [
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
                                     path=opath)

    # L3 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)

    # L4 - Tasks
    workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Copy mini data to new dir
    base_dir = os.path.join(base_dir, 'L4')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
                                              base_dir=base_dir)

    # L4 OK - compress all in output directory
    workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
    utils.base_dir_to_tar(base_dir)

    # Log
    m, s = divmod(time.time() - start, 60)
    h, m = divmod(m, 60)
    log.workflow('OGGM prepro_levels is done! Time needed: '
                 '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
Esempio n. 27
0
# Read in the Benchmark RGI file
rgif = 'https://dl.dropboxusercontent.com/u/20930277/rgi_benchmark.zip'
rgif = utils.file_downloader(rgif)
with zipfile.ZipFile(rgif) as zf:
    zf.extractall(WORKING_DIR)
rgif = os.path.join(WORKING_DIR, 'rgi_benchmark.shp')
rgidf = salem.read_shapefile(rgif, cached=True)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf)  # reset=True, force=True

# Prepro tasks
task_list = [
    tasks.glacier_masks,
    tasks.compute_centerlines,
    tasks.compute_downstream_line,
    tasks.initialize_flowlines,
    tasks.compute_downstream_bedshape,
    tasks.catchment_area,
    tasks.catchment_intersections,
    tasks.catchment_width_geom,
    tasks.catchment_width_correction,
]
for task in task_list:
    execute_entity_task(task, gdirs)
Esempio n. 28
0
path = utils.get_demo_file('rofental_hydrosheds.shp')
basin = gpd.read_file(path)

# Take all glaciers in the Rofental Basin
in_bas = [basin.geometry.contains(shpg.Point(x, y))[0] for
          (x, y) in zip(rgidf.CenLon, rgidf.CenLat)]
rgidf = rgidf.loc[in_bas]

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.workflow('Starting OGGM run')
log.workflow('Number of glaciers: {}'.format(len(rgidf)))

# Go - get the pre-processed glacier directories
gdirs = workflow.init_glacier_regions(rgidf, from_prepro_level=4)

# We can step directly to a new experiment!
# Random climate representative for the recent climate (1985-2015)
# This is a kind of "commitment" run
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=1,
                             output_filesuffix='_commitment')
# Now we add a positive and a negative bias to the random temperature series
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=2,
                             temperature_bias=0.5,
                             output_filesuffix='_bias_p')
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                             nyears=300, y0=2000, seed=3,
                             temperature_bias=-0.5,
Esempio n. 29
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('divides_workflow.shp'))
    cfg.PATHS['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oetztal.csv')
    cfg.PATHS['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oetztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oetztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Be sure data is downloaded because lock doesn't work
    cl = utils.get_cru_cl_file()

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_optimized_inversion_params'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        flowline.init_present_time_glacier(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

        # Climate related tasks
        # See if CRU is running
        cfg.PARAMS['temp_use_local_gradient'] = False
        cfg.PATHS['climate_file'] = '~'
        cru_dir = get_demo_file('cru_ts3.23.1901.2014.tmp.dat.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        with warnings.catch_warnings():
            # There is a warning from salem
            warnings.simplefilter("ignore")
            workflow.execute_entity_task(tasks.distribute_cru_style, gdirs)
        tasks.compute_ref_t_stars(gdirs)
        tasks.distribute_t_stars(gdirs)

        # Use histalp for the actual test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oetztal.nc')
        cfg.PATHS['cru_dir'] = '~'
        workflow.climate_tasks(gdirs)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Esempio n. 30
0
# RGI file
path = utils.get_rgi_region_file(rgi_region, version=rgi_version)
rgidf = gpd.read_file(path)

# Select the glaciers in the Pyrenees
rgidf = rgidf.loc[rgidf['O2Region'] == '2']

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.workflow('Starting OGGM inversion run')
log.workflow('Number of glaciers: {}'.format(len(rgidf)))

# Go - get the pre-processed glacier directories
# We start at level 3, because we need all data for the inversion
gdirs = workflow.init_glacier_regions(rgidf, from_prepro_level=3,
                                      prepro_border=10)

# Default parameters
# Deformation: from Cuffey and Patterson 2010
glen_a = 2.4e-24
# Sliding: from Oerlemans 1997
fs = 5.7e-20

# Correction factors
factors = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
factors += [1.1, 1.2, 1.3, 1.5, 1.7, 2, 2.5, 3, 4, 5]
factors += [6, 7, 8, 9, 10]

# Run the inversions tasks with the given factors
for f in factors:
    # Without sliding
Esempio n. 31
0
    def test_inversion(self):

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_mustar,
            tasks.apparent_mb,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.distribute_thickness_per_altitude,
                            gdirs,
                            varname_suffix='_alt')
        execute_entity_task(tasks.distribute_thickness_interp,
                            gdirs,
                            varname_suffix='_int')

        # Reference data
        gdir = gdirs[0]
        df = self.get_ref_data(gdir)

        with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:

            v = ds.distributed_thickness_alt
            df['oggm_alt'] = v.isel(x=('z', df['i']), y=('z', df['j']))
            v = ds.distributed_thickness_int
            df['oggm_int'] = v.isel(x=('z', df['i']), y=('z', df['j']))

            ds['ref'] = xr.zeros_like(ds.distributed_thickness_int) * np.NaN
            ds['ref'].data[df['j'], df['i']] = df['thick']

        rmsd_int = ((df.oggm_int - df.thick)**2).mean()**.5
        rmsd_alt = ((df.oggm_int - df.thick)**2).mean()**.5
        assert rmsd_int < 80
        assert rmsd_alt < 80

        dfm = df.mean()
        np.testing.assert_allclose(dfm.thick, dfm.oggm_int, 50)
        np.testing.assert_allclose(dfm.thick, dfm.oggm_alt, 50)

        if do_plot:
            import matplotlib.pyplot as plt
            df.plot(kind='scatter', x='oggm_int', y='thick')
            plt.axis('equal')
            df.plot(kind='scatter', x='oggm_alt', y='thick')
            plt.axis('equal')
            f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3))
            ds.ref.plot(ax=ax1)
            ds.distributed_thickness_int.plot(ax=ax2)
            ds.distributed_thickness_alt.plot(ax=ax3)
            plt.tight_layout()
            plt.show()
Esempio n. 32
0
def up_to_inversion():
    """Run the tasks you want."""

    # test directory
    testdir = os.path.join(current_dir, 'tmp')
    if not os.path.exists(testdir):
        os.makedirs(testdir)
    clean_dir(testdir)

    # Init
    cfg.initialize()

    # Prevent multiprocessing
    cfg.use_mp = False

    # Working dir
    cfg.paths['working_dir'] = testdir

    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.paths['srtm_file'] = get_demo_file('srtm_oeztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.paths['histalp_file'] = get_demo_file('HISTALP_oeztal.nc')

    # Get test glaciers (all glaciers with MB or Thickness data)
    cfg.paths['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oeztal.csv')
    cfg.paths['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oeztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oeztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    # First preprocessing tasks
    workflow.gis_prepro_tasks(gdirs)

    # Climate related tasks
    workflow.climate_tasks(gdirs)

    # Merge climate and catchments
    workflow.execute_task(inversion.prepare_for_inversion, gdirs)
    fs, fd = inversion.optimize_inversion_params(gdirs)

    # Tests
    dfids = cfg.paths['glathida_rgi_links']
    gtd_df = pd.read_csv(dfids).sort_values(by=['RGI_ID'])
    dfids = gtd_df['RGI_ID'].values
    ref_gdirs = [gdir for gdir in gdirs if gdir.rgi_id in dfids]

    # Account for area differences between glathida and rgi
    ref_area_km2 = gtd_df.RGI_AREA.values
    ref_cs = gtd_df.VOLUME.values / (gtd_df.GTD_AREA.values**1.375)
    ref_volume_km3 = ref_cs * ref_area_km2**1.375

    vol = []
    area = []
    rgi = []
    for gdir in ref_gdirs:
        v, a = inversion.inversion_parabolic_point_slope(gdir, fs=fs, fd=fd,
                                                         write=True)
        vol.append(v)
        area.append(a)
        rgi.append(gdir.rgi_id)

    df = pd.DataFrame()
    df['rgi'] = rgi
    df['area'] = area
    df['ref_vol'] = ref_volume_km3
    df['oggm_vol'] = np.array(vol) * 1e-9
    df['vas_vol'] = 0.034*(ref_area_km2**1.375)
    df = df.set_index('rgi')

    shutil.rmtree(testdir)

    return df
Esempio n. 33
0
    def test_inversion(self):

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_t_star,
            tasks.mu_star_calibration,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs,
                            varname_suffix='_alt')
        execute_entity_task(tasks.distribute_thickness_interp, gdirs,
                            varname_suffix='_int')

        # Reference data
        gdir = gdirs[0]
        df = self.get_ref_data(gdir)

        with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:

            v = ds.distributed_thickness_alt
            df['oggm_alt'] = v.isel(x=('z', df['i']), y=('z', df['j']))
            v = ds.distributed_thickness_int
            df['oggm_int'] = v.isel(x=('z', df['i']), y=('z', df['j']))

            ds['ref'] = xr.zeros_like(ds.distributed_thickness_int) * np.NaN
            ds['ref'].data[df['j'], df['i']] = df['thick']

        rmsd_int = ((df.oggm_int - df.thick) ** 2).mean() ** .5
        rmsd_alt = ((df.oggm_int - df.thick) ** 2).mean() ** .5
        assert rmsd_int < 80
        assert rmsd_alt < 80

        dfm = df.mean()
        np.testing.assert_allclose(dfm.thick, dfm.oggm_int, 50)
        np.testing.assert_allclose(dfm.thick, dfm.oggm_alt, 50)

        if do_plot:
            import matplotlib.pyplot as plt
            df.plot(kind='scatter', x='oggm_int', y='thick')
            plt.axis('equal')
            df.plot(kind='scatter', x='oggm_alt', y='thick')
            plt.axis('equal')
            f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3))
            ds.ref.plot(ax=ax1)
            ds.distributed_thickness_int.plot(ax=ax2)
            ds.distributed_thickness_alt.plot(ax=ax3)
            plt.tight_layout()
            plt.show()
Esempio n. 34
0
    def test_mb(self):

        # This is a function to produce the MB function needed by Anna

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_t_star,
            tasks.mu_star_calibration,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
        demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))

        mbref = mbref.get_vardata()
        mbref[mbref == -9999] = np.NaN
        demref = demref.get_vardata()[np.isfinite(mbref)]
        mbref = mbref[np.isfinite(mbref)] * 1000

        # compute the bias to make it 0 SMB on the 2D DEM
        rho = cfg.PARAMS['ice_density']
        mbmod = ConstantMassBalance(gdirs[0], bias=0)
        mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
        mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
        mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
        np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)

        # Same for ref
        mbref = mbref - np.average(mbref)
        np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)

        # Fit poly
        p = np.polyfit(demref, mbref, deg=2)
        poly = np.poly1d(p)
        myfit = poly(demref)
        np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)

        if do_plot:
            import matplotlib.pyplot as plt
            plt.scatter(mbref, demref, s=5,
                        label='Obs (2007-2012), shifted to Avg(SMB) = 0')
            plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
            plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
            plt.xlabel('MB (mm w.e yr-1)')
            plt.ylabel('Altidude (m)')
            plt.legend()
            plt.show()
Esempio n. 35
0
def run_prepro_levels(rgi_version=None,
                      rgi_reg=None,
                      border=None,
                      output_folder='',
                      working_dir='',
                      dem_source='',
                      is_test=False,
                      demo=False,
                      test_rgidf=None,
                      test_intersects_file=None,
                      test_topofile=None,
                      test_crudir=None,
                      disable_mp=False,
                      timeout=0,
                      max_level=4,
                      logging_level='WORKFLOW'):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    dem_source : str
        which DEM source to use: default, SOURCE_NAME or ALL
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    demo : bool
        to run the prepro for the list of demo glaciers
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    disable_mp : bool
        disable multiprocessing
    max_level : int
        the maximum pre-processing level before stopping
    logging_level : str
        the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Input check
    if max_level not in [1, 2, 3, 4]:
        raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')

    # Time
    start = time.time()

    def _time_log():
        # Log util
        m, s = divmod(time.time() - start, 60)
        h, m = divmod(m, 60)
        log.workflow('OGGM prepro_levels is done! Time needed: '
                     '{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level=logging_level)

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = not disable_mp

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # Timeout
    cfg.PARAMS['task_timeout'] = timeout

    # For statistics
    climate_periods = [1920, 1960, 2000]

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    rgi_dir_name = 'RGI{}'.format(rgi_version)
    border_dir_name = 'b_{:03d}'.format(border)
    base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(
            utils.get_rgi_region_file(rgi_reg, version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(4)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    # Which DEM source?
    if dem_source.upper() == 'ALL':
        # This is the complex one, just do the job an leave
        log.workflow('Running prepro on ALL sources')
        for i, s in enumerate(utils.DEM_SOURCES):
            rs = i == 0
            rgidf['DEM_SOURCE'] = s
            log.workflow('Running prepro on sources: {}'.format(s))
            gdirs = workflow.init_glacier_regions(rgidf, reset=rs, force=rs)
            workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)

        # Compress all in output directory
        l_base_dir = os.path.join(base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar,
                                     gdirs,
                                     delete=False,
                                     base_dir=l_base_dir)
        utils.base_dir_to_tar(l_base_dir)

        _time_log()
        return

    if dem_source:
        # Force a given source
        rgidf['DEM_SOURCE'] = dem_source.upper()

    # L1 - go
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L1', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L1 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L1')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 1:
        _time_log()
        return

    # L2 - Tasks
    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    workflow.execute_entity_task(tasks.process_cru_data, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L2', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # L2 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L2')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 2:
        _time_log()
        return

    # L3 - Tasks
    task_list = [
        tasks.glacier_masks, tasks.compute_centerlines,
        tasks.initialize_flowlines, tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape, tasks.catchment_area,
        tasks.catchment_intersections, tasks.catchment_width_geom,
        tasks.catchment_width_correction, tasks.local_t_star,
        tasks.mu_star_calibration, tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion, tasks.filter_inversion_output,
        tasks.init_present_time_glacier
    ]
    for task in task_list:
        workflow.execute_entity_task(task, gdirs)

    # Glacier stats
    sum_dir = os.path.join(base_dir, 'L3', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)
    opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=climate_periods,
                                     path=opath)

    # L3 OK - compress all in output directory
    l_base_dir = os.path.join(base_dir, 'L3')
    workflow.execute_entity_task(utils.gdir_to_tar,
                                 gdirs,
                                 delete=False,
                                 base_dir=l_base_dir)
    utils.base_dir_to_tar(l_base_dir)
    if max_level == 3:
        _time_log()
        return

    # L4 - No tasks: add some stats for consistency and make the dirs small
    sum_dir = os.path.join(base_dir, 'L4', 'summary')
    utils.mkdir(sum_dir)
    opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
    utils.compile_glacier_statistics(gdirs, path=opath)

    # Copy mini data to new dir
    base_dir = os.path.join(base_dir, 'L4')
    mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir,
                                              gdirs,
                                              base_dir=base_dir)

    # L4 OK - compress all in output directory
    workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
    utils.base_dir_to_tar(base_dir)

    _time_log()
Esempio n. 36
0
def run_benchmark(rgi_version=None, rgi_reg=None, border=None,
                  output_folder='', working_dir='', is_test=False,
                  test_rgidf=None, test_intersects_file=None,
                  test_topofile=None, test_crudir=None):
    """Does the actual job.

    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    test_crudir : str
        for testing purposes only
    """

    # TODO: temporarily silence Fiona deprecation warnings
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)

    # Module logger
    log = logging.getLogger(__name__)

    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW')

    # Local paths
    utils.mkdir(working_dir)
    cfg.PATHS['working_dir'] = working_dir

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    odf = pd.DataFrame()

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(2)
    _add_time_to_df(odf, 'Read RGI', time.time()-start)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # Initialize working directories
    start = time.time()
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
    _add_time_to_df(odf, 'init_glacier_regions', time.time()-start)

    # Pre-download other files just in case
    if test_crudir is None:
        _ = utils.get_cru_file(var='tmp')
        _ = utils.get_cru_file(var='pre')
    else:
        cfg.PATHS['cru_dir'] = test_crudir

    # Tasks
    task_list = [
        tasks.process_cru_data,
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in task_list:
        start = time.time()
        workflow.execute_entity_task(task, gdirs)
        _add_time_to_df(odf, task.__name__, time.time()-start)

    # Runs
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, bias=0, seed=0,
                                 output_filesuffix='_tstar')
    _add_time_to_df(odf, 'run_random_climate_tstar_250', time.time()-start)

    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, y0=1995, seed=0,
                                 output_filesuffix='_commit')
    _add_time_to_df(odf, 'run_random_climate_commit_250', time.time()-start)

    # Compile results
    start = time.time()
    utils.compile_glacier_statistics(gdirs)
    _add_time_to_df(odf, 'compile_glacier_statistics', time.time()-start)

    start = time.time()
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=[1920, 1960, 2000])
    _add_time_to_df(odf, 'compile_climate_statistics', time.time()-start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_tstar')
    _add_time_to_df(odf, 'compile_run_output_tstar', time.time()-start)

    start = time.time()
    utils.compile_run_output(gdirs, filesuffix='_commit')
    _add_time_to_df(odf, 'compile_run_output_commit', time.time()-start)

    # Log
    opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
    odf.index.name = 'Task'
    odf.to_csv(opath)
    log.workflow('OGGM benchmarks is done!')
Esempio n. 37
0
cfg.PARAMS['invert_with_sliding'] = False
cfg.PARAMS['min_slope'] = 2
cfg.PARAMS['max_shape_param'] = 0.006
cfg.PARAMS['max_thick_to_width_ratio'] = 0.5
cfg.PARAMS['base_binsize'] = 100.
cfg.PARAMS['temp_use_local_gradient'] = False

# Either do calibration (takes a long time) or do itmix
do_calib = True
do_itmix = True

log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)

# For calibration
if do_calib:
    # gdirs = [gd for gd in gdirs if gd.glacier_type != 'Ice cap']
    # gdirs = [gd for gd in gdirs if gd.terminus_type == 'Land-terminating']

    # Basic tasks
    task_list = [
        itmix.glacier_masks_itmix,
        tasks.compute_centerlines,
        tasks.catchment_area,
        tasks.initialize_flowlines,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction
Esempio n. 38
0
    def setup_cache(self):

        setattr(full_workflow.setup_cache, "timeout", 360)

        utils.mkdir(self.testdir, reset=True)
        self.cfg_init()

        # Pre-download other files which will be needed later
        utils.get_cru_cl_file()
        utils.get_cru_file(var='tmp')
        utils.get_cru_file(var='pre')

        # Get the RGI glaciers for the run.
        rgi_list = ['RGI60-01.10299', 'RGI60-11.00897', 'RGI60-18.02342']
        rgidf = utils.get_rgi_glacier_entities(rgi_list)

        # We use intersects
        db = utils.get_rgi_intersects_region_file(version='61',
                                                  rgi_ids=rgi_list)
        cfg.set_intersects_db(db)

        # Sort for more efficient parallel computing
        rgidf = rgidf.sort_values('Area', ascending=False)

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.compute_downstream_line,
            tasks.compute_downstream_bedshape,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Climate tasks -- only data IO and tstar interpolation!
        execute_entity_task(tasks.process_cru_data, gdirs)
        execute_entity_task(tasks.local_mustar, gdirs)
        execute_entity_task(tasks.apparent_mb, gdirs)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)
        # We use the default parameters for this run
        execute_entity_task(tasks.mass_conservation_inversion, gdirs)
        execute_entity_task(tasks.filter_inversion_output, gdirs)

        # Final preparation for the run
        execute_entity_task(tasks.init_present_time_glacier, gdirs)

        # Random climate representative for the tstar climate, without bias
        # In an ideal world this would imply that the glaciers remain stable,
        # but it doesn't have to be so
        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            bias=0,
                            nyears=100,
                            output_filesuffix='_tstar')

        execute_entity_task(tasks.run_constant_climate,
                            gdirs,
                            y0=1990,
                            nyears=100,
                            output_filesuffix='_pd')

        # Compile output
        utils.glacier_characteristics(gdirs)
        utils.compile_run_output(gdirs, filesuffix='_tstar')
        utils.compile_run_output(gdirs, filesuffix='_pd')
        utils.compile_climate_input(gdirs)

        return gdirs
Esempio n. 39
0
def single_flowline_glacier_directory(rgi_id, reset=False, prepro_border=80):
    """Prepare a GlacierDirectory for PyGEM (single flowline to start with)

    Parameters
    ----------
    rgi_id : str
        the rgi id of the glacier
    reset : bool
        set to true to delete any pre-existing files. If false (the default),
        the directory won't be re-downloaded if already available locally in
        order to spare time.
    prepro_border : int
        the size of the glacier map: 10, 80, 160, 250

    Returns
    -------
    a GlacierDirectory object
    """

    if type(rgi_id) != str:
        raise ValueError('We expect rgi_id to be a string')
    if 'RGI60-' not in rgi_id:
        raise ValueError('OGGM currently expects IDs to start with RGI60-')

    cfg.initialize()
    wd = utils.gettempdir(dirname='pygem-{}-b{}'.format(rgi_id, prepro_border),
                          reset=reset)
    cfg.PATHS['working_dir'] = wd
    cfg.PARAMS['use_multiple_flowlines'] = False

    # Check if folder is already processed
    try:
        gdir = utils.GlacierDirectory(rgi_id)
        gdir.read_pickle('model_flowlines')
        # If the above works the directory is already processed, return
        return gdir
    except OSError:
        pass

    # If not ready, we download the preprocessed data for this glacier
    gdirs = workflow.init_glacier_regions([rgi_id],
                                          from_prepro_level=2,
                                          prepro_border=prepro_border)
    # Compute all the stuff
    list_talks = [
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.catchment_area,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.compute_downstream_bedshape,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in list_talks:
        # The order matters!
        workflow.execute_entity_task(task, gdirs)

    return gdirs[0]
Esempio n. 40
0
    def test_optimize_inversion(self):

        # Download the RGI file for the run
        # Make a new dataframe of those
        rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))

        # Go - initialize working directories
        gdirs = workflow.init_glacier_regions(rgidf)

        # Preprocessing tasks
        task_list = [
            tasks.glacier_masks,
            tasks.compute_centerlines,
            tasks.initialize_flowlines,
            tasks.catchment_area,
            tasks.catchment_intersections,
            tasks.catchment_width_geom,
            tasks.catchment_width_correction,
            tasks.process_cru_data,
            tasks.local_t_star,
            tasks.mu_star_calibration,
        ]
        for task in task_list:
            execute_entity_task(task, gdirs)

        # Reference data
        gdir = gdirs[0]
        df = self.get_ref_data(gdir)

        # Inversion tasks
        execute_entity_task(tasks.prepare_for_inversion, gdirs)

        glen_a = cfg.PARAMS['inversion_glen_a']
        fs = cfg.PARAMS['inversion_fs']

        def to_optimize(x):
            tasks.mass_conservation_inversion(gdir,
                                              glen_a=glen_a * x[0],
                                              fs=fs * x[1])
            tasks.distribute_thickness_per_altitude(gdir)
            with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
                thick = ds.distributed_thickness.isel(x=('z', df['i']),
                                                      y=('z', df['j']))
                out = (np.abs(thick - df.thick)).mean()
            return out

        opti = optimization.minimize(to_optimize, [1., 1.],
                                     bounds=((0.01, 10), (0.01, 10)),
                                     tol=0.1)
        # Check results and save.
        execute_entity_task(tasks.mass_conservation_inversion, gdirs,
                            glen_a=glen_a*opti['x'][0],
                            fs=0)
        execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)

        with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
            df['oggm'] = ds.distributed_thickness.isel(x=('z', df['i']),
                                                       y=('z', df['j']))
            ds['ref'] = xr.zeros_like(ds.distributed_thickness) * np.NaN
            ds['ref'].data[df['j'], df['i']] = df['thick']

        rmsd = ((df.oggm - df.thick) ** 2).mean() ** .5
        assert rmsd < 60

        dfm = df.mean()
        np.testing.assert_allclose(dfm.thick, dfm.oggm, 10)
        if do_plot:
            import matplotlib.pyplot as plt
            df.plot(kind='scatter', x='oggm', y='thick')
            plt.axis('equal')
            f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
            ds.ref.plot(ax=ax1)
            ds.distributed_thickness.plot(ax=ax2)
            plt.tight_layout()
            plt.show()
Esempio n. 41
0
def up_to_inversion(reset=False):
    """Run the tasks you want."""

    # test directory
    if not os.path.exists(TEST_DIR):
        os.makedirs(TEST_DIR)
    if reset:
        clean_dir(TEST_DIR)

    # Init
    cfg.initialize()

    # Use multiprocessing
    cfg.PARAMS['use_multiprocessing'] = not ON_TRAVIS

    # Working dir
    cfg.PATHS['working_dir'] = TEST_DIR

    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['dem_file'] = get_demo_file('srtm_oeztal.tif')

    # Set up the paths and other stuffs
    cfg.set_divides_db(get_demo_file('HEF_divided.shp'))
    cfg.PATHS['wgms_rgi_links'] = get_demo_file('RGI_WGMS_oeztal.csv')
    cfg.PATHS['glathida_rgi_links'] = get_demo_file('RGI_GLATHIDA_oeztal.csv')

    # Read in the RGI file
    rgi_file = get_demo_file('rgi_oeztal.shp')
    rgidf = gpd.GeoDataFrame.from_file(rgi_file)

    # Params
    cfg.PARAMS['border'] = 70
    cfg.PARAMS['use_inversion_params'] = True

    # Go
    gdirs = workflow.init_glacier_regions(rgidf)

    try:
        flowline.init_present_time_glacier(gdirs[0])
    except Exception:
        reset = True

    if reset:
        # First preprocessing tasks
        workflow.gis_prepro_tasks(gdirs)

        # Climate related tasks
        # See if CRU is running
        cfg.PARAMS['temp_use_local_gradient'] = False
        cfg.PATHS['climate_file'] = '~'
        cru_dir = get_demo_file('cru_ts3.23.1901.2014.tmp.dat.nc')
        cfg.PATHS['cru_dir'] = os.path.dirname(cru_dir)
        workflow.climate_tasks(gdirs)

        # Use histalp for the actual test
        cfg.PARAMS['temp_use_local_gradient'] = True
        cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oeztal.nc')
        cfg.PATHS['cru_dir'] = '~'
        workflow.climate_tasks(gdirs)

        # Inversion
        workflow.inversion_tasks(gdirs)

    return gdirs
Esempio n. 42
0
def initialization_selection():
    # -------------
    # Initialization
    # -------------
    cfg.initialize()

    # working directories
    cfg.PATHS['working_dir'] = mbcfg.PATHS['working_dir']

    cfg.PATHS['rgi_version'] = mbcfg.PARAMS['rgi_version']

    # We are running the calibration ourselves
    cfg.PARAMS['run_mb_calibration'] = True

    # No need for intersects since this has an effect on the inversion only
    cfg.PARAMS['use_intersects'] = False

    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True

    # Set to True for operational runs
    # maybe also here?
    cfg.PARAMS['continue_on_error'] = False

    # set negative flux filtering to false. should be standard soon
    cfg.PARAMS['filter_for_neg_flux'] = False

    # Pre-download other files which will be needed later
    _ = utils.get_cru_file(var='tmp')
    _ = utils.get_cru_file(var='pre')
    rgi_dir = utils.get_rgi_dir(version=cfg.PATHS['rgi_version'])

    # Get the reference glacier ids (they are different for each RGI version)
    df, _ = utils.get_wgms_files()
    rids = df['RGI{}0_ID'.format(cfg.PATHS['rgi_version'])]

    # Make a new dataframe with those (this takes a while)
    rgidf = []
    for reg in df['RGI_REG'].unique():
        if reg == '19':
            continue  # we have no climate data in Antarctica
        if mbcfg.PARAMS['region'] is not None\
                and reg != mbcfg.PARAMS['region']:
            continue

        fn = '*' + reg + '_rgi{}0_*.shp'.format(cfg.PATHS['rgi_version'])
        fs = list(sorted(glob(os.path.join(rgi_dir, '*', fn))))[0]
        sh = gpd.read_file(fs)
        rgidf.append(sh.loc[sh.RGIId.isin(rids)])
    rgidf = pd.concat(rgidf)
    rgidf.crs = sh.crs  # for geolocalisation

    # reduce Europe to Histalp area (exclude Pyrenees, etc...)
    if mbcfg.PARAMS['histalp']:
        rgidf = rgidf.loc[(rgidf.CenLon >= 4) & (rgidf.CenLon < 20) &
                          (rgidf.CenLat >= 43) & (rgidf.CenLat < 47)]

    # We have to check which of them actually have enough mb data.
    # Let OGGM do it:
    gdirs = workflow.init_glacier_regions(rgidf)
    # We need to know which period we have data for

    if mbcfg.PARAMS['histalp']:
        cfg.PATHS['climate_file'] = mbcfg.PATHS['histalpfile']
        execute_entity_task(tasks.process_custom_climate_data, gdirs)
    else:
        execute_entity_task(tasks.process_cru_data, gdirs, print_log=False)

    gdirs = utils.get_ref_mb_glaciers(gdirs)
    # Keep only these
    rgidf = rgidf.loc[rgidf.RGIId.isin([g.rgi_id for g in gdirs])]

    # Save
    rgidf.to_file(os.path.join(cfg.PATHS['working_dir'],
                               'mb_ref_glaciers.shp'))

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    # Go - initialize working directories
    gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)

    return gdirs
With_calving = True

# Run only for Marine terminating
glac_type = [0, 2]
keep_glactype = [(i not in glac_type) for i in rgidf.TermType]
rgidf = rgidf.iloc[keep_glactype]

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting run for RGI reg: ' + rgi_region)
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
# -----------------------------------
gdirs = workflow.init_glacier_regions(rgidf)

k = 2.4

# Get terminus widths
data_link = os.path.join(MAIN_PATH,
                         'input_data/observations_widths_depths.csv')
dfmac = pd.read_csv(data_link, index_col=0)


# Defining a calving function
def calving_from_depth(gdir, k):
    """ Finds a calving flux based on the approaches proposed by
        Huss and Hock, (2015) and Oerlemans and Nick (2005).
        We take the initial output of the model and surface elevation data
        to calculate the water depth of the calving front.
Esempio n. 44
0
                               'glaciers_with_no_racmo_data.csv')

    d_no_data = pd.read_csv(no_data)
    ids_no_data = d_no_data.RGIId.values
    keep_no_data = [(i not in ids_no_data) for i in rgidf.RGIId]
    rgidf = rgidf.iloc[keep_no_data]

    cfg.PATHS['working_dir'] = path
    print(cfg.PATHS['working_dir'])
    cfg.PARAMS['border'] = 20
    cfg.PARAMS['use_tar_shapefiles'] = False
    cfg.PARAMS['use_intersects'] = True
    cfg.PARAMS['use_compression'] = False
    cfg.PARAMS['compress_climate_netcdf'] = False

    gdirs = workflow.init_glacier_regions(rgidf, reset=False)

    for gdir in gdirs:

        # Get inversion output
        inv_c = gdir.read_pickle('inversion_output')[-1]

        d = {'thick_end_fls': inv_c['thick'][-5:],
             'width_end_fls': inv_c['width'][-5:],
             'is_rectangular': inv_c['is_rectangular'][-5:],
             'slope': inv_c['slope_angle'][-5:]}

        data_frame = pd.DataFrame(data=d)

        data_frame.to_csv(os.path.join(exp_dir_output, gdir.rgi_id + '.csv'))