Ejemplo n.º 1
0
    def _get_2d_annual_climate(self, heights, year):
        # Avoid code duplication with a getter routine
        year = np.floor(year)
        if self.repeat:
            year = self.ys + (year - self.ys) % (self.ye - self.ys + 1)
        if year < self.ys or year > self.ye:
            raise ValueError('year {} out of the valid time bounds: '
                             '[{}, {}]'.format(year, self.ys, self.ye))
        pok = np.where(self.years == year)[0]
        if len(pok) < 1:
            raise ValueError('Year {} not in record'.format(int(year)))

        # Read timeseries
        itemp = self.temp[pok] + self.temp_bias
        iprcp = self.prcp[pok] * self.prcp_bias
        igrad = self.grad[pok]

        # For each height pixel:
        # Compute temp and tempformelt (temperature above melting threshold)
        heights = np.asarray(heights)
        npix = len(heights)
        grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
        grad_temp *= (heights.repeat(12).reshape(grad_temp.shape) -
                      self.ref_hgt)
        temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
        temp2dformelt = temp2d - self.t_melt
        clip_min(temp2dformelt, 0, out=temp2dformelt)

        # Compute solid precipitation from total precipitation
        prcp = np.atleast_2d(iprcp).repeat(npix, 0)
        fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)
        prcpsol = prcp * clip_array(fac, 0, 1)

        return temp2d, temp2dformelt, prcp, prcpsol
Ejemplo n.º 2
0
def add_consensus_thickness(gdir, base_url=None):
    """Add the consensus thickness estimate to the gridded_data file.

    varname: consensus_ice_thickness

    Parameters
    ----------
    gdir ::py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    base_url : str
        where to find the thickness data. Default is
        https://cluster.klima.uni-bremen.de/~fmaussion/icevol/composite
    """

    if base_url is None:
        base_url = default_base_url
    if not base_url.endswith('/'):
        base_url += '/'

    rgi_str = gdir.rgi_id
    rgi_reg_str = rgi_str[:8]

    url = base_url + rgi_reg_str + '/' + rgi_str + '_thickness.tif'
    input_file = utils.file_downloader(url)

    dsb = salem.GeoTiff(input_file)
    thick = utils.clip_min(dsb.get_vardata(), 0)
    in_volume = thick.sum() * dsb.grid.dx**2
    thick = gdir.grid.map_gridded_data(thick, dsb.grid, interp='linear')

    # Correct for volume
    thick = utils.clip_min(thick.filled(0), 0)
    out_volume = thick.sum() * gdir.grid.dx**2
    if out_volume > 0:
        thick *= in_volume / out_volume

    # We mask zero ice as nodata
    thick = np.where(thick == 0, np.NaN, thick)

    # Write
    with utils.ncDataset(gdir.get_filepath('gridded_data'), 'a') as nc:

        vn = 'consensus_ice_thickness'
        if vn in nc.variables:
            v = nc.variables[vn]
        else:
            v = nc.createVariable(vn, 'f4', (
                'y',
                'x',
            ), zlib=True)
        v.units = 'm'
        ln = 'Ice thickness from the consensus estimate'
        v.long_name = ln
        v.base_url = base_url
        v[:] = thick
Ejemplo n.º 3
0
def filter_inversion_output(gdir):
    """Filters the last few grid points after the physically-based inversion.

    For various reasons (but mostly: the equilibrium assumption), the last few
    grid points on a glacier flowline are often noisy and create unphysical
    depressions. Here we try to correct for that. It is not volume conserving,
    but area conserving.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    """

    if gdir.is_tidewater:
        # No need for filter in tidewater case
        return

    if not gdir.has_file('downstream_line'):
        raise InvalidWorkflowError('filter_inversion_output now needs a '
                                   'previous call to the '
                                   'compute_dowstream_line and '
                                   'compute_downstream_bedshape tasks')

    dic_ds = gdir.read_pickle('downstream_line')
    bs = np.average(dic_ds['bedshapes'][:3])

    n = -5

    cls = gdir.read_pickle('inversion_output')
    cl = cls[-1]

    # First guess thickness based on width
    w = cl['width'][n:]
    s = w**3 * bs / 6
    h = 3 / 2 * s / w

    # Smoothing things out a bit
    hts = np.append(np.append(cl['thick'][n - 3:n], h), 0)
    h = utils.smooth1d(hts, 3)[n - 1:-1]

    # Recompute bedshape based on that
    bs = utils.clip_min(4 * h / w**2, cfg.PARAMS['mixed_min_shape'])

    # OK, done
    s = w**3 * bs / 6

    cl['thick'][n:] = 3 / 2 * s / w
    cl['volume'][n:] = s * cl['dx']
    cl['is_trapezoid'][n:] = False
    cl['is_rectangular'][n:] = False

    gdir.write_pickle(cls, 'inversion_output')

    # output the volume here - this simplifies code for some downstream funcs
    return np.sum([np.sum(cl['volume']) for cl in cls])
Ejemplo n.º 4
0
    def step(self, dt):
        """Advance one step."""

        div_q, dt_cfl = self.diffusion_upstream_2d()

        dt_use = utils.clip_scalar(np.min([dt_cfl, dt]), 0, self.max_dt)

        self.ice_thick = utils.clip_min(
            self.surface_h + (self.get_mb() + div_q) * dt_use - self.bed_topo,
            0)

        # Next step
        self.t += dt_use
        return dt
Ejemplo n.º 5
0
    def get_monthly_climate(self, heights, year=None):
        """Monthly climate information at given heights.

        Note that prcp is corrected with the precipitation factor and that
        all other model biases (temp and prcp) are applied.

        Returns
        -------
        (temp, tempformelt, prcp, prcpsol)
        """

        y, m = floatyear_to_date(year)
        if self.repeat:
            y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)
        if y < self.ys or y > self.ye:
            raise ValueError('year {} out of the valid time bounds: '
                             '[{}, {}]'.format(y, self.ys, self.ye))
        pok = np.where((self.years == y) & (self.months == m))[0][0]

        # Read timeseries
        itemp = self.temp[pok] + self.temp_bias
        iprcp = self.prcp[pok] * self.prcp_bias
        igrad = self.grad[pok]

        # For each height pixel:
        # Compute temp and tempformelt (temperature above melting threshold)
        npix = len(heights)
        temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)
        tempformelt = temp - self.t_melt
        clip_min(tempformelt, 0, out=tempformelt)

        # Compute solid precipitation from total precipitation
        prcp = np.ones(npix) * iprcp
        fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)
        prcpsol = prcp * clip_array(fac, 0, 1)

        return temp, tempformelt, prcp, prcpsol
Ejemplo n.º 6
0
def process_lmr_data(gdir,
                     fpath_temp=None,
                     fpath_precip=None,
                     year_range=('1951', '1980'),
                     filesuffix='',
                     **kwargs):
    """Read, process and store the Last Millennium Reanalysis (LMR) data for this glacier.

    LMR data: https://atmos.washington.edu/~hakim/lmr/LMRv2/

    LMR data is annualised in anomaly format relative to 1951-1980. We
    create synthetic timeseries from the reference data.

    It stores the data in a format that can be used by the OGGM mass balance
    model and in the glacier directory.

    Parameters
    ----------
    fpath_temp : str
        path to the temp file (default: LMR v2.1 from server above)
    fpath_precip : str
        path to the precip file (default: LMR v2.1 from server above)
    year_range : tuple of str
        the year range for which you want to compute the anomalies. Default
        for LMR is `('1951', '1980')`
    filesuffix : str
        append a suffix to the filename (useful for ensemble experiments).

    **kwargs: any kwarg to be passed to ref:`process_gcm_data`
    """

    # Get the path of GCM temperature & precipitation data
    base_url = 'https://atmos.washington.edu/%7Ehakim/lmr/LMRv2/'
    if fpath_temp is None:
        with utils.get_lock():
            fpath_temp = utils.file_downloader(
                base_url + 'air_MCruns_ensemble_mean_LMRv2.1.nc')
    if fpath_precip is None:
        with utils.get_lock():
            fpath_precip = utils.file_downloader(
                base_url + 'prate_MCruns_ensemble_mean_LMRv2.1.nc')

    # Glacier location
    glon = gdir.cenlon
    glat = gdir.cenlat

    # Read the GCM files
    with xr.open_dataset(fpath_temp, use_cftime=True) as tempds, \
            xr.open_dataset(fpath_precip, use_cftime=True) as precipds:

        # Check longitude conventions
        if tempds.lon.min() >= 0 and glon <= 0:
            glon += 360

        # Take the closest to the glacier
        # Should we consider GCM interpolation?
        temp = tempds.air.sel(lat=glat, lon=glon, method='nearest')
        precip = precipds.prate.sel(lat=glat, lon=glon, method='nearest')

        # Currently we just take the mean of the ensemble, although
        # this is probably not advised. The GCM climate will correct
        # anyways
        temp = temp.mean(dim='MCrun')
        precip = precip.mean(dim='MCrun')

        # Precip unit is kg/m^2/s we convert to mm month since we apply the anomaly after
        precip = precip * 30.5 * (60 * 60 * 24)

        # Back to [-180, 180] for OGGM
        temp.lon.values = temp.lon if temp.lon <= 180 else temp.lon - 360
        precip.lon.values = precip.lon if precip.lon <= 180 else precip.lon - 360

    # OK now we have to turn these annual timeseries in monthly data
    # We take the ref climate
    fpath = gdir.get_filepath('climate_historical')
    with xr.open_dataset(fpath) as ds_ref:
        ds_ref = ds_ref.sel(time=slice(*year_range))

        loc_tmp = ds_ref.temp.groupby('time.month').mean()
        loc_pre = ds_ref.prcp.groupby('time.month').mean()

        # Make time coord
        t = np.cumsum([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] *
                      len(temp))
        t = cftime.num2date(np.append([0], t[:-1]),
                            'days since 0000-01-01 00:00:00',
                            calendar='noleap')

        temp = xr.DataArray(
            (loc_tmp.data + temp.data[:, np.newaxis]).flatten(),
            coords={
                'time': t,
                'lon': temp.lon,
                'lat': temp.lat
            },
            dims=('time', ))

        # For precip the std dev is very small - lets keep it as is for now but
        # this is a bit ridiculous. We clip to zero here to be sure
        precip = utils.clip_min(
            (loc_pre.data + precip.data[:, np.newaxis]).flatten(), 0)
        precip = xr.DataArray(precip,
                              dims=('time', ),
                              coords={
                                  'time': t,
                                  'lon': temp.lon,
                                  'lat': temp.lat
                              })

    process_gcm_data(gdir,
                     filesuffix=filesuffix,
                     prcp=precip,
                     temp=temp,
                     year_range=year_range,
                     calendar='noleap',
                     source='lmr',
                     **kwargs)
Ejemplo n.º 7
0
def mb_climate_on_height(gdir, heights, *, time_range=None, year_range=None):
    """Mass-balance climate of the glacier at a specific height

    Reads the glacier's monthly climate data file and computes the
    temperature "energies" (temp above 0) and solid precipitation at the
    required height.

    All MB parameters are considered here! (i.e. melt temp, precip scaling
    factor, etc.)

    Parameters
    ----------
    gdir : GlacierDirectory
        the glacier directory
    heights: ndarray
        a 1D array of the heights (in meter) where you want the data
    time_range : [datetime, datetime], optional
        default is to read all data but with this you
        can provide a [t0, t1] bounds (inclusive).
    year_range : [int, int], optional
        Provide a [y0, y1] year range to get the data for specific
        (hydrological) years only. Easier to use than the time bounds above.

    Returns
    -------
    (time, tempformelt, prcpsol)::
        - time: array of shape (nt,)
        - tempformelt:  array of shape (len(heights), nt)
        - prcpsol:  array of shape (len(heights), nt)
    """

    if year_range is not None:
        sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
        em = sm - 1 if (sm > 1) else 12
        t0 = datetime.datetime(year_range[0] - 1, sm, 1)
        t1 = datetime.datetime(year_range[1], em, 1)
        return mb_climate_on_height(gdir, heights, time_range=[t0, t1])

    # Parameters
    temp_all_solid = cfg.PARAMS['temp_all_solid']
    temp_all_liq = cfg.PARAMS['temp_all_liq']
    temp_melt = cfg.PARAMS['temp_melt']
    prcp_fac = cfg.PARAMS['prcp_scaling_factor']
    default_grad = cfg.PARAMS['temp_default_gradient']
    g_minmax = cfg.PARAMS['temp_local_gradient_bounds']

    # Read file
    igrad = None
    with utils.ncDataset(gdir.get_filepath('climate_historical')) as nc:
        # time
        time = nc.variables['time']
        time = netCDF4.num2date(time[:], time.units)
        if time_range is not None:
            p0 = np.where(time == time_range[0])[0]
            try:
                p0 = p0[0]
            except IndexError:
                raise MassBalanceCalibrationError('time_range[0] not found in '
                                                  'file')
            p1 = np.where(time == time_range[1])[0]
            try:
                p1 = p1[0]
            except IndexError:
                raise MassBalanceCalibrationError('time_range[1] not found in '
                                                  'file')
        else:
            p0 = 0
            p1 = len(time) - 1

        time = time[p0:p1 + 1]

        # Read timeseries
        itemp = nc.variables['temp'][p0:p1 + 1]
        iprcp = nc.variables['prcp'][p0:p1 + 1]
        if 'gradient' in nc.variables:
            igrad = nc.variables['gradient'][p0:p1 + 1]
            # Security for stuff that can happen with local gradients
            igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
            igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])
        ref_hgt = nc.ref_hgt

    # Default gradient?
    if igrad is None:
        igrad = itemp * 0 + default_grad

    # Correct precipitation
    iprcp *= prcp_fac

    # For each height pixel:
    # Compute temp and tempformelt (temperature above melting threshold)
    npix = len(heights)
    grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
    grad_temp *= (heights.repeat(len(time)).reshape(grad_temp.shape) - ref_hgt)
    temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
    temp2dformelt = temp2d - temp_melt
    temp2dformelt = utils.clip_min(temp2dformelt, 0)
    # Compute solid precipitation from total precipitation
    prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)
    fac = 1 - (temp2d - temp_all_solid) / (temp_all_liq - temp_all_solid)
    fac = utils.clip_array(fac, 0, 1)
    prcpsol = prcpsol * fac

    return time, temp2dformelt, prcpsol
Ejemplo n.º 8
0
def find_inversion_calving_loop(gdir,
                                initial_water_depth=None,
                                max_ite=30,
                                stop_after_convergence=True,
                                fixed_water_depth=False):
    """Iterative search for a calving flux compatible with the bed inversion.

    See Recinos et al 2019 for details.

    Parameters
    ----------
    initial_water_depth : float
        the initial water depth starting the loop (for sensitivity experiments
        or to fix it to an observed value). The default is to use 1/3 of the
        terminus elevation if > 10 m, and 10 m otherwise
    max_ite : int
        the maximal number of iterations allowed before raising an error
    stop_after_convergence : bool
        continue to loop after convergence is reached
        (for sensitivity experiments)
    fixed_water_depth : bool
        fix the water depth and let the frontal altitude vary instead
    """

    # Shortcuts
    from oggm.core import climate, inversion
    from oggm.exceptions import MassBalanceCalibrationError

    # Input
    if initial_water_depth is None:
        fl = gdir.read_pickle('inversion_flowlines')[-1]
        initial_water_depth = utils.clip_min(fl.surface_h[-1] / 3, 10)

    rho = cfg.PARAMS['ice_density']

    # We accept values down to zero before stopping
    cfg.PARAMS['min_mu_star'] = 0

    # Start iteration
    i = 0
    cfg.PARAMS['clip_mu_star'] = False
    odf = pd.DataFrame()
    mu_is_zero = False
    while i < max_ite:

        # Calculates a calving flux from model output
        if i == 0:
            # First call we set to zero (it's just to be sure we start
            # from a non-calving glacier)
            f_calving = 0
        elif i == 1:
            # Second call, we set a small positive calving to start with

            # Default is to get the thickness from free board and
            # initial water depth
            thick = None
            if fixed_water_depth:
                # This leaves the free board open for change
                thick = initial_water_depth + 1
            out = calving_flux_from_depth(gdir,
                                          water_depth=initial_water_depth,
                                          thick=thick,
                                          fixed_water_depth=fixed_water_depth)
            f_calving = out['flux']
        elif cfg.PARAMS['clip_mu_star']:
            # If we had to clip mu, the inversion calving becomes the real
            # flux, i.e. not compatible with calving law but with the
            # inversion
            fl = gdir.read_pickle('inversion_flowlines')[-1]
            f_calving = fl.flux[-1] * (gdir.grid.dx**2) * 1e-9 / rho
            mu_is_zero = True
        else:
            # Otherwise it is parameterized by the calving law
            if fixed_water_depth:
                out = calving_flux_from_depth(gdir,
                                              water_depth=initial_water_depth,
                                              fixed_water_depth=True)
                f_calving = out['flux']
            else:
                f_calving = calving_flux_from_depth(gdir)['flux']

        # Give it back to the inversion and recompute
        gdir.inversion_calving_rate = f_calving

        # At this step we might raise a MassBalanceCalibrationError
        try:
            climate.local_t_star(gdir)
            df = gdir.read_json('local_mustar')
        except MassBalanceCalibrationError as e:
            assert 'mu* out of specified bounds' in str(e)
            # When this happens we clip mu* to zero and store the
            # bad value (just for plotting)
            cfg.PARAMS['clip_mu_star'] = True
            df = gdir.read_json('local_mustar')
            df['mu_star_glacierwide'] = float(str(e).split(':')[-1])
            climate.local_t_star(gdir)

        climate.mu_star_calibration(gdir)

        inversion.prepare_for_inversion(gdir, add_debug_var=True)
        v_inv, _ = inversion.mass_conservation_inversion(gdir)
        if fixed_water_depth:
            out = calving_flux_from_depth(gdir,
                                          water_depth=initial_water_depth,
                                          fixed_water_depth=True)
        else:
            out = calving_flux_from_depth(gdir)

        # Store the data
        odf.loc[i, 'calving_flux'] = f_calving
        odf.loc[i, 'mu_star'] = df['mu_star_glacierwide']
        odf.loc[i, 'calving_law_flux'] = out['flux']
        odf.loc[i, 'width'] = out['width']
        odf.loc[i, 'thick'] = out['thick']
        odf.loc[i, 'water_depth'] = out['water_depth']
        odf.loc[i, 'free_board'] = out['free_board']

        # Do we have to do another_loop? Start testing at 5th iteration
        calving_flux = odf.calving_flux.values
        if stop_after_convergence and i > 4:
            # We want to make sure that we don't converge by chance
            # so we test on last two iterations
            conv = (np.allclose(calving_flux[[-1, -2]],
                                [out['flux'], out['flux']],
                                rtol=0.01))
            if mu_is_zero or conv:
                break
        i += 1

    # Write output
    odf.index.name = 'iterations'
    odf.to_csv(gdir.get_filepath('calving_loop'))

    # Restore defaults
    cfg.PARAMS['min_mu_star'] = 1.
    cfg.PARAMS['clip_mu_star'] = False

    return odf
Ejemplo n.º 9
0
def process_gcm_data(gdir,
                     filesuffix='',
                     prcp=None,
                     temp=None,
                     year_range=('1961', '1990'),
                     scale_stddev=True,
                     time_unit=None,
                     calendar=None,
                     source=''):
    """ Applies the anomaly method to GCM climate data

    This function can be applied to any GCM data, if it is provided in a
    suitable :py:class:`xarray.DataArray`. See Parameter description for
    format details.

    For CESM-LME a specific function :py:func:`tasks.process_cesm_data` is
    available which does the preprocessing of the data and subsequently calls
    this function.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        where to write the data
    filesuffix : str
        append a suffix to the filename (useful for ensemble experiments).
    prcp : :py:class:`xarray.DataArray`
        | monthly total precipitation [mm month-1]
        | Coordinates:
        | lat float64
        | lon float64
        | time: cftime object
    temp : :py:class:`xarray.DataArray`
        | monthly temperature [K]
        | Coordinates:
        | lat float64
        | lon float64
        | time cftime object
    year_range : tuple of str
        the year range for which you want to compute the anomalies. Default
        is `('1961', '1990')`
    scale_stddev : bool
        whether or not to scale the temperature standard deviation as well
    time_unit : str
        The unit conversion for NetCDF files. It must be adapted to the
        length of the time series. The default is to choose
        it ourselves based on the starting year.
        For example: 'days since 0850-01-01 00:00:00'
    calendar : str
        If you use an exotic calendar (e.g. 'noleap')
    source : str
        For metadata: the source of the climate data
    """

    # Standard sanity checks
    months = temp['time.month']
    if months[0] != 1:
        raise ValueError('We expect the files to start in January!')
    if months[-1] < 10:
        raise ValueError('We expect the files to end in December!')

    if (np.abs(temp['lon']) > 180) or (np.abs(prcp['lon']) > 180):
        raise ValueError('We expect the longitude coordinates to be within '
                         '[-180, 180].')

    # from normal years to hydrological years
    sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
    if sm != 1:
        prcp = prcp[sm - 1:sm - 13].load()
        temp = temp[sm - 1:sm - 13].load()

    assert len(prcp) // 12 == len(
        prcp) / 12, 'Somehow we didn\'t get full years'
    assert len(temp) // 12 == len(
        temp) / 12, 'Somehow we didn\'t get full years'

    # Get the reference data to apply the anomaly to
    fpath = gdir.get_filepath('climate_historical')
    with xr.open_dataset(fpath) as ds_ref:

        ds_ref = ds_ref.sel(time=slice(*year_range))

        # compute monthly anomalies
        # of temp
        if scale_stddev:
            # This is a bit more arithmetic
            ts_tmp_sel = temp.sel(time=slice(*year_range))
            if len(ts_tmp_sel) // 12 != len(ts_tmp_sel) / 12:
                raise InvalidParamsError('year_range cannot contain the first'
                                         'or last calendar year in the series')
            if ((len(ts_tmp_sel) // 12) % 2) == 1:
                raise InvalidParamsError('We need an even number of years '
                                         'for this to work')
            ts_tmp_std = ts_tmp_sel.groupby('time.month').std(dim='time')
            std_fac = ds_ref.temp.groupby('time.month').std(
                dim='time') / ts_tmp_std
            if sm != 1:
                # Just to avoid useless roll
                std_fac = std_fac.roll(month=13 - sm, roll_coords=True)
            std_fac = np.tile(std_fac.data, len(temp) // 12)
            # We need an even number of years for this to work
            win_size = len(ts_tmp_sel) + 1

            def roll_func(x, axis=None):
                x = x[:, ::12]
                n = len(x[0, :]) // 2
                xm = np.nanmean(x, axis=axis)
                return xm + (x[:, n] - xm) * std_fac

            temp = temp.rolling(time=win_size, center=True,
                                min_periods=1).reduce(roll_func)

        ts_tmp_sel = temp.sel(time=slice(*year_range))
        if len(ts_tmp_sel.time) != len(ds_ref.time):
            raise InvalidParamsError('The reference climate period and the '
                                     'GCM period after window selection do '
                                     'not match.')
        ts_tmp_avg = ts_tmp_sel.groupby('time.month').mean(dim='time')
        ts_tmp = temp.groupby('time.month') - ts_tmp_avg
        # of precip -- scaled anomalies
        ts_pre_avg = prcp.sel(time=slice(*year_range))
        ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
        ts_pre_ano = prcp.groupby('time.month') - ts_pre_avg
        # scaled anomalies is the default. Standard anomalies above
        # are used later for where ts_pre_avg == 0
        ts_pre = prcp.groupby('time.month') / ts_pre_avg

        # for temp
        loc_tmp = ds_ref.temp.groupby('time.month').mean()
        ts_tmp = ts_tmp.groupby('time.month') + loc_tmp

        # for prcp
        loc_pre = ds_ref.prcp.groupby('time.month').mean()
        # scaled anomalies
        ts_pre = ts_pre.groupby('time.month') * loc_pre
        # standard anomalies
        ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
        # Correct infinite values with standard anomalies
        ts_pre.values = np.where(np.isfinite(ts_pre.values), ts_pre.values,
                                 ts_pre_ano.values)
        # The previous step might create negative values (unlikely). Clip them
        ts_pre.values = utils.clip_min(ts_pre.values, 0)

        assert np.all(np.isfinite(ts_pre.values))
        assert np.all(np.isfinite(ts_tmp.values))

        gdir.write_monthly_climate_file(temp.time.values,
                                        ts_pre.values,
                                        ts_tmp.values,
                                        float(ds_ref.ref_hgt),
                                        prcp.lon.values,
                                        prcp.lat.values,
                                        time_unit=time_unit,
                                        calendar=calendar,
                                        file_name='gcm_data',
                                        source=source,
                                        filesuffix=filesuffix)
Ejemplo n.º 10
0
def distribute_thickness_interp(gdir,
                                add_slope=True,
                                smooth_radius=None,
                                varname_suffix=''):
    """Compute a thickness map by interpolating between centerlines and border.

    IMPORTANT: this is NOT what has been used for ITMIX. We used
    distribute_thickness_per_altitude for ITMIX and global ITMIX.

    This is a rather cosmetic task, not relevant for OGGM but for ITMIX.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    add_slope : bool
        whether a corrective slope factor should be used or not
    smooth_radius : int
        pixel size of the gaussian smoothing. Default is to use
        cfg.PARAMS['smooth_window'] (i.e. a size in meters). Set to zero to
        suppress smoothing.
    varname_suffix : str
        add a suffix to the variable written in the file (for experiments)
    """

    # Variables
    grids_file = gdir.get_filepath('gridded_data')
    # See if we have the masks, else compute them
    with utils.ncDataset(grids_file) as nc:
        has_masks = 'glacier_ext_erosion' in nc.variables
    if not has_masks:
        from oggm.core.gis import gridded_attributes
        gridded_attributes(gdir)

    with utils.ncDataset(grids_file) as nc:
        glacier_mask = nc.variables['glacier_mask'][:]
        glacier_ext = nc.variables['glacier_ext_erosion'][:]
        ice_divides = nc.variables['ice_divides'][:]
        if add_slope:
            slope_factor = nc.variables['slope_factor'][:]
        else:
            slope_factor = 1.

    # Thickness to interpolate
    thick = glacier_ext * np.NaN
    thick[(glacier_ext - ice_divides) == 1] = 0.
    # TODO: domain border too, for convenience for a start
    thick[0, :] = 0.
    thick[-1, :] = 0.
    thick[:, 0] = 0.
    thick[:, -1] = 0.

    # Along the lines
    cls = gdir.read_pickle('inversion_output')
    fls = gdir.read_pickle('inversion_flowlines')
    vs = []
    for cl, fl in zip(cls, fls):
        vs.extend(cl['volume'])
        x, y = utils.tuple2int(fl.line.xy)
        thick[y, x] = cl['thick']
    init_vol = np.sum(vs)

    # Interpolate
    xx, yy = gdir.grid.ij_coordinates
    pnan = np.nonzero(~np.isfinite(thick))
    pok = np.nonzero(np.isfinite(thick))
    points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
    inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
    thick[pnan] = griddata(points, np.ravel(thick[pok]), inter, method='cubic')
    utils.clip_min(thick, 0, out=thick)

    # Slope
    thick *= slope_factor

    # Smooth
    dx = gdir.grid.dx
    if smooth_radius != 0:
        if smooth_radius is None:
            smooth_radius = np.rint(cfg.PARAMS['smooth_window'] / dx)
        thick = gaussian_blur(thick, np.int(smooth_radius))
        thick = np.where(glacier_mask, thick, 0.)

    # Re-mask
    thick[glacier_mask == 0] = np.NaN
    assert np.all(np.isfinite(thick[glacier_mask == 1]))

    # Conserve volume
    tmp_vol = np.nansum(thick * dx**2)
    thick *= init_vol / tmp_vol

    # write
    grids_file = gdir.get_filepath('gridded_data')
    with utils.ncDataset(grids_file, 'a') as nc:
        vn = 'distributed_thickness' + varname_suffix
        if vn in nc.variables:
            v = nc.variables[vn]
        else:
            v = nc.createVariable(vn, 'f4', (
                'y',
                'x',
            ), zlib=True)
        v.units = '-'
        v.long_name = 'Distributed ice thickness'
        v[:] = thick

    return thick
Ejemplo n.º 11
0
def calving_flux_from_depth(gdir,
                            k=None,
                            water_depth=None,
                            thick=None,
                            fixed_water_depth=False):
    """Finds a calving flux from the calving front thickness.

    Approach based on Huss and Hock, (2015) and Oerlemans and Nick (2005).
    We take the initial output of the model and surface elevation data
    to calculate the water depth of the calving front.

    Parameters
    ----------
    gdir : GlacierDirectory
    k : float
        calving constant
    water_depth :
        the default is to compute the water_depth from ice thickness
        at the terminus and altitude. Set this to force the water depth
        to a certain value
    thick :
        Set this to force the ice thickness to a certain value (for
        sensitivity experiments).
    fixed_water_depth :
        If we have water depth from Bathymetry we fix the water depth
        and forget about the free-board

    Returns
    -------
    A dictionary containing:
    - the calving flux in [km3 yr-1]
    - the frontal width in m
    - the frontal thickness in m
    - the frontal water depth in m
    - the frontal free board in m
    """

    # Defaults
    if k is None:
        k = cfg.PARAMS['inversion_calving_k']

    # Read inversion output
    cl = gdir.read_pickle('inversion_output')[-1]
    fl = gdir.read_pickle('inversion_flowlines')[-1]

    # Altitude at the terminus and frontal width
    t_altitude = utils.clip_min(fl.surface_h[-1], 0)
    width = fl.widths[-1] * gdir.grid.dx

    # Calving formula
    if thick is None:
        thick = cl['thick'][-1]
    if water_depth is None:
        water_depth = thick - t_altitude
    elif not fixed_water_depth:
        # Correct thickness with prescribed water depth
        # If fixed_water_depth=True then we forget about t_altitude
        thick = water_depth + t_altitude

    flux = k * thick * water_depth * width / 1e9

    if fixed_water_depth:
        # Recompute free board before returning
        t_altitude = thick - water_depth

    return {
        'flux': utils.clip_min(flux, 0),
        'width': width,
        'thick': thick,
        'water_depth': water_depth,
        'free_board': t_altitude
    }
Ejemplo n.º 12
0
def prepare_for_inversion(gdir,
                          add_debug_var=False,
                          invert_with_rectangular=True,
                          invert_all_rectangular=False):
    """Prepares the data needed for the inversion.

    Mostly the mass flux and slope angle, the rest (width, height) was already
    computed. It is then stored in a list of dicts in order to be faster.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    """

    # variables
    fls = gdir.read_pickle('inversion_flowlines')

    towrite = []
    for fl in fls:

        # Distance between two points
        dx = fl.dx * gdir.grid.dx

        # Widths
        widths = fl.widths * gdir.grid.dx

        # Heights
        hgt = fl.surface_h
        angle = -np.gradient(hgt, dx)  # beware the minus sign

        # Flux needs to be in [m3 s-1] (*ice* velocity * surface)
        # fl.flux is given in kg m-2 yr-1, rho in kg m-3, so this should be it:
        rho = cfg.PARAMS['ice_density']
        flux = fl.flux * (gdir.grid.dx**2) / cfg.SEC_IN_YEAR / rho

        # Clip flux to 0
        if np.any(flux < -0.1):
            log.warning('(%s) has negative flux somewhere', gdir.rgi_id)
        utils.clip_min(flux, 0, out=flux)

        if fl.flows_to is None and gdir.inversion_calving_rate == 0:
            if not np.allclose(flux[-1], 0., atol=0.1):
                # TODO: this test doesn't seem meaningful here
                msg = ('({}) flux at terminus should be zero, but is: '
                       '{.4f} m3 ice s-1'.format(gdir.rgi_id, flux[-1]))
                raise RuntimeError(msg)
            flux[-1] = 0.

        # Shape
        is_rectangular = fl.is_rectangular
        if not invert_with_rectangular:
            is_rectangular[:] = False
        if invert_all_rectangular:
            is_rectangular[:] = True

        # Optimisation: we need to compute this term of a0 only once
        flux_a0 = np.where(is_rectangular, 1, 1.5)
        flux_a0 *= flux / widths

        # Add to output
        cl_dic = dict(dx=dx,
                      flux_a0=flux_a0,
                      width=widths,
                      slope_angle=angle,
                      is_rectangular=is_rectangular,
                      is_last=fl.flows_to is None)
        if add_debug_var:
            cl_dic['flux'] = flux
            cl_dic['hgt'] = hgt
        towrite.append(cl_dic)

    # Write out
    gdir.write_pickle(towrite, 'inversion_input')
Ejemplo n.º 13
0
def distribute_thickness_per_altitude(gdir,
                                      add_slope=True,
                                      smooth_radius=None,
                                      dis_from_border_exp=0.25,
                                      varname_suffix=''):
    """Compute a thickness map by redistributing mass along altitudinal bands.

    This is a rather cosmetic task, not relevant for OGGM but for ITMIX.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    add_slope : bool
        whether a corrective slope factor should be used or not
    smooth_radius : int
        pixel size of the gaussian smoothing. Default is to use
        cfg.PARAMS['smooth_window'] (i.e. a size in meters). Set to zero to
        suppress smoothing.
    dis_from_border_exp : float
        the exponent of the distance from border mask
    varname_suffix : str
        add a suffix to the variable written in the file (for experiments)
    """

    # Variables
    grids_file = gdir.get_filepath('gridded_data')
    # See if we have the masks, else compute them
    with utils.ncDataset(grids_file) as nc:
        has_masks = 'glacier_ext_erosion' in nc.variables
    if not has_masks:
        from oggm.core.gis import gridded_attributes
        gridded_attributes(gdir)

    with utils.ncDataset(grids_file) as nc:
        topo_smoothed = nc.variables['topo_smoothed'][:]
        glacier_mask = nc.variables['glacier_mask'][:]
        dis_from_border = nc.variables['dis_from_border'][:]
        if add_slope:
            slope_factor = nc.variables['slope_factor'][:]
        else:
            slope_factor = 1.

    # Along the lines
    cls = gdir.read_pickle('inversion_output')
    fls = gdir.read_pickle('inversion_flowlines')
    hs, ts, vs, xs, ys = [], [], [], [], []
    for cl, fl in zip(cls, fls):
        hs = np.append(hs, fl.surface_h)
        ts = np.append(ts, cl['thick'])
        vs = np.append(vs, cl['volume'])
        x, y = fl.line.xy
        xs = np.append(xs, x)
        ys = np.append(ys, y)
    init_vol = np.sum(vs)

    # Assign a first order thickness to the points
    # very inefficient inverse distance stuff
    thick = glacier_mask * np.NaN
    for y in range(thick.shape[0]):
        for x in range(thick.shape[1]):
            phgt = topo_smoothed[y, x]
            # take the ones in a 100m range
            starth = 100.
            while True:
                starth += 10
                pok = np.nonzero(np.abs(phgt - hs) <= starth)[0]
                if len(pok) != 0:
                    break
            sqr = np.sqrt((xs[pok] - x)**2 + (ys[pok] - y)**2)
            pzero = np.where(sqr == 0)
            if len(pzero[0]) == 0:
                thick[y, x] = np.average(ts[pok], weights=1 / sqr)
            elif len(pzero[0]) == 1:
                thick[y, x] = ts[pzero]
            else:
                raise RuntimeError('We should not be there')

    # Distance from border (normalized)
    dis_from_border = dis_from_border**dis_from_border_exp
    dis_from_border /= np.mean(dis_from_border[glacier_mask == 1])
    thick *= dis_from_border

    # Slope
    thick *= slope_factor

    # Smooth
    dx = gdir.grid.dx
    if smooth_radius != 0:
        if smooth_radius is None:
            smooth_radius = np.rint(cfg.PARAMS['smooth_window'] / dx)
        thick = gaussian_blur(thick, np.int(smooth_radius))
        thick = np.where(glacier_mask, thick, 0.)

    # Re-mask
    utils.clip_min(thick, 0, out=thick)
    thick[glacier_mask == 0] = np.NaN
    assert np.all(np.isfinite(thick[glacier_mask == 1]))

    # Conserve volume
    tmp_vol = np.nansum(thick * dx**2)
    thick *= init_vol / tmp_vol

    # write
    with utils.ncDataset(grids_file, 'a') as nc:
        vn = 'distributed_thickness' + varname_suffix
        if vn in nc.variables:
            v = nc.variables[vn]
        else:
            v = nc.createVariable(vn, 'f4', (
                'y',
                'x',
            ), zlib=True)
        v.units = '-'
        v.long_name = 'Distributed ice thickness'
        v[:] = thick

    return thick
Ejemplo n.º 14
0
def local_t_star(gdir, *, ref_df=None, tstar=None, bias=None):
    """Compute the local t* and associated glacier-wide mu*.

    If ``tstar`` and ``bias`` are not provided, they will be interpolated from
    the reference t* list.

    Note: the glacier wide mu* is here just for indication. It might be
    different from the flowlines' mu* in some cases.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    ref_df : :py:class:`pandas.DataFrame`, optional
        replace the default calibration list with your own.
    tstar: int, optional
        the year where the glacier should be equilibrium
    bias: float, optional
        the associated reference bias
    """

    # Relevant mb params
    params = [
        'temp_default_gradient', 'temp_all_solid', 'temp_all_liq', 'temp_melt',
        'prcp_scaling_factor'
    ]

    if tstar is None or bias is None:
        # Do our own interpolation
        if ref_df is None:
            if not cfg.PARAMS['run_mb_calibration']:
                # Make some checks and use the default one
                climate_info = gdir.read_json('climate_info')
                source = climate_info['baseline_climate_source']
                ok_source = ['CRU TS4.01', 'CRU TS3.23', 'HISTALP']
                if not np.any(s in source.upper() for s in ok_source):
                    msg = ('If you are using a custom climate file you should '
                           'run your own MB calibration.')
                    raise MassBalanceCalibrationError(msg)
                v = gdir.rgi_version[0]  # major version relevant

                # Check that the params are fine
                s = 'cru4' if 'CRU' in source else 'histalp'
                vn = 'oggm_ref_tstars_rgi{}_{}_calib_params'.format(v, s)
                for k in params:
                    if cfg.PARAMS[k] != cfg.PARAMS[vn][k]:
                        msg = ('The reference t* you are trying to use was '
                               'calibrated with different MB parameters. You '
                               'might have to run the calibration manually.')
                        raise MassBalanceCalibrationError(msg)
                ref_df = cfg.PARAMS['oggm_ref_tstars_rgi{}_{}'.format(v, s)]
            else:
                # Use the the local calibration
                fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
                ref_df = pd.read_csv(fp)

        # Compute the distance to each glacier
        distances = utils.haversine(gdir.cenlon, gdir.cenlat, ref_df.lon,
                                    ref_df.lat)

        # Take the 10 closest
        aso = np.argsort(distances)[0:9]
        amin = ref_df.iloc[aso]
        distances = distances[aso]**2

        # If really close no need to divide, else weighted average
        if distances.iloc[0] <= 0.1:
            tstar = amin.tstar.iloc[0]
            bias = amin.bias.iloc[0]
        else:
            tstar = int(np.average(amin.tstar, weights=1. / distances))
            bias = np.average(amin.bias, weights=1. / distances)

    # Add the climate related params to the GlacierDir to make sure
    # other tools cannot fool around without re-calibration
    out = gdir.read_json('climate_info')
    out['mb_calib_params'] = {k: cfg.PARAMS[k] for k in params}
    gdir.write_json(out, 'climate_info')

    # We compute the overall mu* here but this is mostly for testing
    # Climate period
    mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
    yr = [tstar - mu_hp, tstar + mu_hp]

    # Do we have a calving glacier?
    cmb = calving_mb(gdir)

    log.info('(%s) local mu* computation for t*=%d', gdir.rgi_id, tstar)

    # Get the corresponding mu
    years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir, year_range=yr)
    assert len(years) == (2 * mu_hp + 1)

    # mustar is taking calving into account (units of specific MB)
    mustar = (np.mean(prcp_yr) - cmb) / np.mean(temp_yr)
    if not np.isfinite(mustar):
        raise MassBalanceCalibrationError('{} has a non finite '
                                          'mu'.format(gdir.rgi_id))

    # Clip it?
    if cfg.PARAMS['clip_mu_star']:
        mustar = utils.clip_min(mustar, 0)

    # If mu out of bounds, raise
    if not (cfg.PARAMS['min_mu_star'] <= mustar <= cfg.PARAMS['max_mu_star']):
        raise MassBalanceCalibrationError('mu* out of specified bounds: '
                                          '{:.2f}'.format(mustar))

    # Scalars in a small dict for later
    df = dict()
    df['rgi_id'] = gdir.rgi_id
    df['t_star'] = int(tstar)
    df['bias'] = bias
    df['mu_star_glacierwide'] = mustar
    gdir.write_json(df, 'local_mustar')
Ejemplo n.º 15
0
def merged_glacier_masks(gdir, geometry):
    """Makes a gridded mask of a merged glacier outlines.

    This is a simplified version of glacier_masks. We don't need fancy
    corrections or smoothing here: The flowlines for the actual model run are
    based on a proper call of glacier_masks.

    This task is only to get outlines etc. for visualization!

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        where to write the data
    geometry: shapely.geometry.multipolygon.MultiPolygon
        united outlines of the merged glaciers
    """

    # open srtm tif-file:
    dem = read_geotiff_dem(gdir)

    if np.min(dem) == np.max(dem):
        raise RuntimeError('({}) min equal max in the DEM.'
                           .format(gdir.rgi_id))

    # Clip topography to 0 m a.s.l.
    utils.clip_min(dem, 0, out=dem)

    # Interpolate shape to a regular path
    glacier_poly_hr = tolist(geometry)

    for nr, poly in enumerate(glacier_poly_hr):
        # transform geometry to map
        _geometry = salem.transform_geometry(poly, to_crs=gdir.grid.proj)
        glacier_poly_hr[nr] = _interp_polygon(_geometry, gdir.grid.dx)

    glacier_poly_hr = shpg.MultiPolygon(glacier_poly_hr)

    # Transform geometry into grid coordinates
    # It has to be in pix center coordinates because of how skimage works
    def proj(x, y):
        grid = gdir.grid.center_grid
        return grid.transform(x, y, crs=grid.proj)

    glacier_poly_hr = shapely.ops.transform(proj, glacier_poly_hr)

    # simple trick to correct invalid polys:
    # http://stackoverflow.com/questions/20833344/
    # fix-invalid-polygon-python-shapely
    glacier_poly_hr = glacier_poly_hr.buffer(0)
    if not glacier_poly_hr.is_valid:
        raise RuntimeError('This glacier geometry is not valid.')

    # Rounded geometry to nearest nearest pix
    # I can not use _polyg
    # glacier_poly_pix = _polygon_to_pix(glacier_poly_hr)
    def project(x, y):
        return np.rint(x).astype(np.int64), np.rint(y).astype(np.int64)

    glacier_poly_pix = shapely.ops.transform(project, glacier_poly_hr)
    glacier_poly_pix_iter = tolist(glacier_poly_pix)

    # Compute the glacier mask (currently: center pixels + touched)
    nx, ny = gdir.grid.nx, gdir.grid.ny
    glacier_mask = np.zeros((ny, nx), dtype=np.uint8)
    glacier_ext = np.zeros((ny, nx), dtype=np.uint8)

    for poly in glacier_poly_pix_iter:
        (x, y) = poly.exterior.xy
        glacier_mask[skdraw.polygon(np.array(y), np.array(x))] = 1
        for gint in poly.interiors:
            x, y = tuple2int(gint.xy)
            glacier_mask[skdraw.polygon(y, x)] = 0
            glacier_mask[y, x] = 0  # on the nunataks, no
        x, y = tuple2int(poly.exterior.xy)
        glacier_mask[y, x] = 1
        glacier_ext[y, x] = 1

    # Last sanity check based on the masked dem
    tmp_max = np.max(dem[np.where(glacier_mask == 1)])
    tmp_min = np.min(dem[np.where(glacier_mask == 1)])
    if tmp_max < (tmp_min + 1):
        raise RuntimeError('({}) min equal max in the masked DEM.'
                           .format(gdir.rgi_id))

    # write out the grids in the netcdf file
    with GriddedNcdfFile(gdir, reset=True) as nc:

        v = nc.createVariable('topo', 'f4', ('y', 'x', ), zlib=True)
        v.units = 'm'
        v.long_name = 'DEM topography'
        v[:] = dem

        v = nc.createVariable('glacier_mask', 'i1', ('y', 'x', ), zlib=True)
        v.units = '-'
        v.long_name = 'Glacier mask'
        v[:] = glacier_mask

        v = nc.createVariable('glacier_ext', 'i1', ('y', 'x', ), zlib=True)
        v.units = '-'
        v.long_name = 'Glacier external boundaries'
        v[:] = glacier_ext

        # add some meta stats and close
        nc.max_h_dem = np.max(dem)
        nc.min_h_dem = np.min(dem)
        dem_on_g = dem[np.where(glacier_mask)]
        nc.max_h_glacier = np.max(dem_on_g)
        nc.min_h_glacier = np.min(dem_on_g)

    geometries = dict()
    geometries['polygon_hr'] = glacier_poly_hr
    geometries['polygon_pix'] = glacier_poly_pix
    geometries['polygon_area'] = geometry.area
    gdir.write_pickle(geometries, 'geometries')
Ejemplo n.º 16
0
def prepare_for_inversion(gdir,
                          add_debug_var=False,
                          invert_with_rectangular=True,
                          invert_all_rectangular=False,
                          invert_with_trapezoid=True,
                          invert_all_trapezoid=False):
    """Prepares the data needed for the inversion.

    Mostly the mass flux and slope angle, the rest (width, height) was already
    computed. It is then stored in a list of dicts in order to be faster.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    """

    # variables
    fls = gdir.read_pickle('inversion_flowlines')

    towrite = []
    for fl in fls:

        # Distance between two points
        dx = fl.dx * gdir.grid.dx

        # Widths
        widths = fl.widths * gdir.grid.dx

        # Heights
        hgt = fl.surface_h
        angle = -np.gradient(hgt, dx)  # beware the minus sign

        # Flux needs to be in [m3 s-1] (*ice* velocity * surface)
        # fl.flux is given in kg m-2 yr-1, rho in kg m-3, so this should be it:
        rho = cfg.PARAMS['ice_density']
        flux = fl.flux * (gdir.grid.dx**2) / cfg.SEC_IN_YEAR / rho

        # Clip flux to 0
        if np.any(flux < -0.1):
            log.info('(%s) has negative flux somewhere', gdir.rgi_id)
        utils.clip_min(flux, 0, out=flux)

        if np.sum(flux <= 0) > 1 and len(fls) == 1:
            raise RuntimeError("More than one grid point has zero or "
                               "negative flux: this should not happen.")

        if fl.flows_to is None and gdir.inversion_calving_rate == 0:
            if not np.allclose(flux[-1], 0., atol=0.1):
                # TODO: this test doesn't seem meaningful here
                msg = ('({}) flux at terminus should be zero, but is: '
                       '{.4f} m3 ice s-1'.format(gdir.rgi_id, flux[-1]))
                raise RuntimeError(msg)

            # This contradicts the statement above which has been around for
            # quite some time, for the reason that it is a quality check: per
            # construction, the flux at the last grid point should be zero
            # HOWEVER, it is also meaningful to have a non-zero ice thickness
            # at the last grid point. Therefore, we add some artificial
            # flux here (an alternative would be to pmute the flux on a
            # staggered grid but I actually like the QC and its easier)
            # note that this value will be ignored if one uses the filter
            # task afterwards
            flux[-1] = flux[-2] / 3  # this is totally arbitrary

        if fl.flows_to is not None and flux[-1] <= 0:
            # Same for tributaries
            flux[-1] = flux[-2] / 3  # this is totally arbitrary

        # Shape
        is_rectangular = fl.is_rectangular
        if not invert_with_rectangular:
            is_rectangular[:] = False
        if invert_all_rectangular:
            is_rectangular[:] = True

        # Trapezoid is new - might not be available
        is_trapezoid = getattr(fl, 'is_trapezoid', None)
        if is_trapezoid is None:
            is_trapezoid = fl.is_rectangular * False
        if not invert_with_trapezoid:
            is_rectangular[:] = False
        if invert_all_trapezoid:
            is_trapezoid[:] = True

        # Optimisation: we need to compute this term of a0 only once
        flux_a0 = np.where(is_rectangular, 1, 1.5)
        flux_a0 *= flux / widths

        # Add to output
        cl_dic = dict(dx=dx,
                      flux_a0=flux_a0,
                      width=widths,
                      slope_angle=angle,
                      is_rectangular=is_rectangular,
                      is_trapezoid=is_trapezoid,
                      flux=flux,
                      is_last=fl.flows_to is None,
                      hgt=hgt,
                      invert_with_trapezoid=invert_with_trapezoid)
        towrite.append(cl_dic)

    # Write out
    gdir.write_pickle(towrite, 'inversion_input')
Ejemplo n.º 17
0
def process_cru_data(gdir, tmp_file=None, pre_file=None, y0=None, y1=None,
                     output_filesuffix=None):
    """Processes and writes the CRU baseline climate data for this glacier.

    Interpolates the CRU TS data to the high-resolution CL2 climatologies
    (provided with OGGM) and writes everything to a NetCDF file.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    tmp_file : str
        path to the CRU temperature file (defaults to the current OGGM chosen
        CRU version)
    pre_file : str
        path to the CRU precip file (defaults to the current OGGM chosen
        CRU version)
    y0 : int
        the starting year of the timeseries to write. The default is to take
        the entire time period available in the file, but with this kwarg
        you can shorten it (to save space or to crop bad data)
    y1 : int
        the starting year of the timeseries to write. The default is to take
        the entire time period available in the file, but with this kwarg
        you can shorten it (to save space or to crop bad data)
    output_filesuffix : str
        this add a suffix to the output file (useful to avoid overwriting
        previous experiments)
    """

    if cfg.PATHS.get('climate_file', None):
        warnings.warn("You seem to have set a custom climate file for this "
                      "run, but are using the default CRU climate "
                      "file instead.")

    if cfg.PARAMS['baseline_climate'] != 'CRU':
        raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
                                 "set to CRU")

    # read the climatology
    ncclim = salem.GeoNetcdf(get_cru_cl_file())

    # and the TS data
    if tmp_file is None:
        tmp_file = get_cru_file('tmp')
    if pre_file is None:
        pre_file = get_cru_file('pre')
    nc_ts_tmp = salem.GeoNetcdf(tmp_file, monthbegin=True)
    nc_ts_pre = salem.GeoNetcdf(pre_file, monthbegin=True)

    # set temporal subset for the ts data (hydro years)
    sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
    em = sm - 1 if (sm > 1) else 12
    yrs = nc_ts_pre.time.year
    y0 = yrs[0] if y0 is None else y0
    y1 = yrs[-1] if y1 is None else y1

    nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
                         t1='{}-{:02d}-01'.format(y1, em))
    nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
                         t1='{}-{:02d}-01'.format(y1, em))
    time = nc_ts_pre.time
    ny, r = divmod(len(time), 12)
    assert r == 0

    lon = gdir.cenlon
    lat = gdir.cenlat

    # This is guaranteed to work because I prepared the file (I hope)
    ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)

    # get climatology data
    loc_hgt = ncclim.get_vardata('elev')
    loc_tmp = ncclim.get_vardata('temp')
    loc_pre = ncclim.get_vardata('prcp')
    loc_lon = ncclim.get_vardata('lon')
    loc_lat = ncclim.get_vardata('lat')

    # see if the center is ok
    if not np.isfinite(loc_hgt[1, 1]):
        # take another candidate where finite
        isok = np.isfinite(loc_hgt)

        # wait: some areas are entirely NaNs, make the subset larger
        _margin = 1
        while not np.any(isok):
            _margin += 1
            ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
            loc_hgt = ncclim.get_vardata('elev')
            isok = np.isfinite(loc_hgt)
        if _margin > 1:
            log.debug('(%s) I had to look up for far climate pixels: %s',
                      gdir.rgi_id, _margin)

        # Take the first candidate (doesn't matter which)
        lon, lat = ncclim.grid.ll_coordinates
        lon = lon[isok][0]
        lat = lat[isok][0]
        # Resubset
        ncclim.set_subset()
        ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
        loc_hgt = ncclim.get_vardata('elev')
        loc_tmp = ncclim.get_vardata('temp')
        loc_pre = ncclim.get_vardata('prcp')
        loc_lon = ncclim.get_vardata('lon')
        loc_lat = ncclim.get_vardata('lat')

    assert np.isfinite(loc_hgt[1, 1])
    isok = np.isfinite(loc_hgt)
    hgt_f = loc_hgt[isok].flatten()
    assert len(hgt_f) > 0.

    # Should we compute the gradient?
    use_grad = cfg.PARAMS['temp_use_local_gradient']
    ts_grad = None
    if use_grad and len(hgt_f) >= 5:
        ts_grad = np.zeros(12) * np.NaN
        for i in range(12):
            loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
            slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
            ts_grad[i] = slope if (p_val < 0.01) else np.NaN
        # convert to a timeseries and hydrological years
        ts_grad = ts_grad.tolist()
        ts_grad = ts_grad[em:] + ts_grad[0:em]
        ts_grad = np.asarray(ts_grad * ny)

    # maybe this will throw out of bounds warnings
    nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
    nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)

    # compute monthly anomalies
    # of temp
    ts_tmp = nc_ts_tmp.get_vardata('tmp', as_xarray=True)
    ts_tmp_avg = ts_tmp.sel(time=slice('1961-01-01', '1990-12-01'))
    ts_tmp_avg = ts_tmp_avg.groupby('time.month').mean(dim='time')
    ts_tmp = ts_tmp.groupby('time.month') - ts_tmp_avg
    # of precip
    ts_pre = nc_ts_pre.get_vardata('pre', as_xarray=True)
    ts_pre_avg = ts_pre.sel(time=slice('1961-01-01', '1990-12-01'))
    ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
    ts_pre_ano = ts_pre.groupby('time.month') - ts_pre_avg
    # scaled anomalies is the default. Standard anomalies above
    # are used later for where ts_pre_avg == 0
    ts_pre = ts_pre.groupby('time.month') / ts_pre_avg

    # interpolate to HR grid
    if np.any(~np.isfinite(ts_tmp[:, 1, 1])):
        # Extreme case, middle pix is not valid
        # take any valid pix from the 3*3 (and hope there's one)
        found_it = False
        for idi in range(2):
            for idj in range(2):
                if np.all(np.isfinite(ts_tmp[:, idj, idi])):
                    ts_tmp[:, 1, 1] = ts_tmp[:, idj, idi]
                    ts_pre[:, 1, 1] = ts_pre[:, idj, idi]
                    ts_pre_ano[:, 1, 1] = ts_pre_ano[:, idj, idi]
                    found_it = True
        if not found_it:
            msg = '({}) there is no climate data'.format(gdir.rgi_id)
            raise MassBalanceCalibrationError(msg)
    elif np.any(~np.isfinite(ts_tmp)):
        # maybe the side is nan, but we can do nearest
        ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
                                              interp='nearest')
        ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
                                              interp='nearest')
        ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
                                                  nc_ts_pre.grid,
                                                  interp='nearest')
    else:
        # We can do bilinear
        ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
                                              interp='linear')
        ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
                                              interp='linear')
        ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
                                                  nc_ts_pre.grid,
                                                  interp='linear')

    # take the center pixel and add it to the CRU CL clim
    # for temp
    loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
                           coords={'month': ts_tmp_avg.month})
    ts_tmp = xr.DataArray(ts_tmp[:, 1, 1], dims=['time'],
                          coords={'time': time})
    ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
    # for prcp
    loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
                           coords={'month': ts_pre_avg.month})
    ts_pre = xr.DataArray(ts_pre[:, 1, 1], dims=['time'],
                          coords={'time': time})
    ts_pre_ano = xr.DataArray(ts_pre_ano[:, 1, 1], dims=['time'],
                              coords={'time': time})
    # scaled anomalies
    ts_pre = ts_pre.groupby('time.month') * loc_pre
    # standard anomalies
    ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
    # Correct infinite values with standard anomalies
    ts_pre.values = np.where(np.isfinite(ts_pre.values),
                             ts_pre.values,
                             ts_pre_ano.values)
    # The last step might create negative values (unlikely). Clip them
    ts_pre.values = utils.clip_min(ts_pre.values, 0)

    # done
    loc_hgt = loc_hgt[1, 1]
    loc_lon = loc_lon[1]
    loc_lat = loc_lat[1]
    assert np.isfinite(loc_hgt)
    assert np.all(np.isfinite(ts_pre.values))
    assert np.all(np.isfinite(ts_tmp.values))

    gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
                                    loc_hgt, loc_lon, loc_lat,
                                    filesuffix=output_filesuffix,
                                    gradient=ts_grad,
                                    source=nc_ts_tmp._nc.title[:10])

    ncclim._nc.close()
    nc_ts_tmp._nc.close()
    nc_ts_pre._nc.close()
Ejemplo n.º 18
0
def process_dummy_cru_file(gdir, sigma_temp=2, sigma_prcp=0.5, seed=None,
                           y0=None, y1=None, output_filesuffix=None):
    """Create a simple baseline climate file for this glacier - for testing!

    This simply reproduces the climatology with a little randomness in it.

    TODO: extend the functionality by allowing a monthly varying sigma

    Parameters
    ----------
    gdir : GlacierDirectory
        the glacier directory
    sigma_temp : float
        the standard deviation of the random timeseries (set to 0 for constant
        ts)
    sigma_prcp : float
        the standard deviation of the random timeseries (set to 0 for constant
        ts)
    seed : int
        the RandomState seed
    y0 : int
        the starting year of the timeseries to write. The default is to take
        the entire time period available in the file, but with this kwarg
        you can shorten it (to save space or to crop bad data)
    y1 : int
        the starting year of the timeseries to write. The default is to take
        the entire time period available in the file, but with this kwarg
        you can shorten it (to save space or to crop bad data)
    output_filesuffix : str
        this add a suffix to the output file (useful to avoid overwriting
        previous experiments)
    """

    # read the climatology
    clfile = get_cru_cl_file()
    ncclim = salem.GeoNetcdf(clfile)

    # set temporal subset for the ts data (hydro years)
    sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
    em = sm - 1 if (sm > 1) else 12

    y0 = 1901 if y0 is None else y0
    y1 = 2018 if y1 is None else y1

    time = pd.date_range(start='{}-{:02d}-01'.format(y0, sm),
                         end='{}-{:02d}-01'.format(y1, em),
                         freq='MS')
    ny, r = divmod(len(time), 12)
    assert r == 0

    lon = gdir.cenlon
    lat = gdir.cenlat

    # This is guaranteed to work because I prepared the file (I hope)
    ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)

    # get climatology data
    loc_hgt = ncclim.get_vardata('elev')
    loc_tmp = ncclim.get_vardata('temp')
    loc_pre = ncclim.get_vardata('prcp')
    loc_lon = ncclim.get_vardata('lon')
    loc_lat = ncclim.get_vardata('lat')

    # see if the center is ok
    if not np.isfinite(loc_hgt[1, 1]):
        # take another candidate where finite
        isok = np.isfinite(loc_hgt)

        # wait: some areas are entirely NaNs, make the subset larger
        _margin = 1
        while not np.any(isok):
            _margin += 1
            ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
            loc_hgt = ncclim.get_vardata('elev')
            isok = np.isfinite(loc_hgt)
        if _margin > 1:
            log.debug('(%s) I had to look up for far climate pixels: %s',
                      gdir.rgi_id, _margin)

        # Take the first candidate (doesn't matter which)
        lon, lat = ncclim.grid.ll_coordinates
        lon = lon[isok][0]
        lat = lat[isok][0]
        # Resubset
        ncclim.set_subset()
        ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
        loc_hgt = ncclim.get_vardata('elev')
        loc_tmp = ncclim.get_vardata('temp')
        loc_pre = ncclim.get_vardata('prcp')
        loc_lon = ncclim.get_vardata('lon')
        loc_lat = ncclim.get_vardata('lat')

    assert np.isfinite(loc_hgt[1, 1])
    isok = np.isfinite(loc_hgt)
    hgt_f = loc_hgt[isok].flatten()
    assert len(hgt_f) > 0.

    # Should we compute the gradient?
    use_grad = cfg.PARAMS['temp_use_local_gradient']
    ts_grad = None
    if use_grad and len(hgt_f) >= 5:
        ts_grad = np.zeros(12) * np.NaN
        for i in range(12):
            loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
            slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
            ts_grad[i] = slope if (p_val < 0.01) else np.NaN
        # convert to a timeseries and hydrological years
        ts_grad = ts_grad.tolist()
        ts_grad = ts_grad[em:] + ts_grad[0:em]
        ts_grad = np.asarray(ts_grad * ny)

    # Make DataArrays
    rng = np.random.RandomState(seed)
    loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
                           coords={'month': np.arange(1, 13)})
    ts_tmp = rng.randn(len(time)) * sigma_temp
    ts_tmp = xr.DataArray(ts_tmp, dims=['time'],
                          coords={'time': time})

    loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
                           coords={'month': np.arange(1, 13)})
    ts_pre = utils.clip_min(rng.randn(len(time)) * sigma_prcp + 1, 0)
    ts_pre = xr.DataArray(ts_pre, dims=['time'],
                          coords={'time': time})

    # Create the time series
    ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
    ts_pre = ts_pre.groupby('time.month') * loc_pre

    # done
    loc_hgt = loc_hgt[1, 1]
    loc_lon = loc_lon[1]
    loc_lat = loc_lat[1]
    assert np.isfinite(loc_hgt)

    gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
                                    loc_hgt, loc_lon, loc_lat,
                                    gradient=ts_grad,
                                    filesuffix=output_filesuffix,
                                    source='CRU CL2 and some randomness')
    ncclim._nc.close()
Ejemplo n.º 19
0
    def _get_tempformelt(self, temp, pok):
        """ Helper function to compute tempformelt to avoid code duplication
        in get_monthly_climate() and _get2d_annual_climate()

        If using this again outside of this class, need to remove the "self",
        such as for 'mb_climate_on_height' in climate.py, that has no self....
        (would need to change temp, t_melt ,temp_std, mb_type, N, loop)

        Input: stuff that is different for the different methods
            temp: temperature time series
            pok: indices of time series

        Returns
        -------
        (tempformelt)
        """

        tempformelt_without_std = temp - self.t_melt

        # computations change only if 'mb_daily' as mb_type!
        if self.mb_type == 'mb_monthly' or self.mb_type == 'mb_real_daily':
            tempformelt = tempformelt_without_std
        elif self.mb_type == 'mb_daily':

            itemp_std = self.temp_std[pok]

            tempformelt_with_std = np.full(np.shape(tempformelt_without_std),
                                           np.NaN)
            # matrix with N values that are distributed around 0
            # showing how much fake 'daily' values vary from the mean
            z_scores_mean = stats.norm.ppf(
                np.arange(1 / self.N - 1 / (2 * self.N), 1, 1 / self.N))

            z_std = np.matmul(
                np.atleast_2d(z_scores_mean).T, np.atleast_2d(itemp_std))

            # there are two possibilities,
            # not using the loop is most of the times faster
            if self.loop is False:
                # without the loop: but not much faster ..
                tempformelt_daily = np.atleast_3d(tempformelt_without_std).T + \
                                    np.atleast_3d(z_std)
                clip_min(tempformelt_daily, 0, out=tempformelt_daily)
                tempformelt_with_std = tempformelt_daily.mean(axis=0).T
            else:
                shape_tfm = np.shape(tempformelt_without_std)
                tempformelt_with_std = np.full(shape_tfm, np.NaN)
                z_std = np.matmul(
                    np.atleast_2d(z_scores_mean).T, np.atleast_2d(itemp_std))
                for h in np.arange(0, np.shape(tempformelt_without_std)[0]):
                    h_tfm_daily_ = np.atleast_2d(tempformelt_without_std[h, :])
                    h_tempformelt_daily = h_tfm_daily_ + z_std
                    clip_min(h_tempformelt_daily, 0, out=h_tempformelt_daily)
                    h_tempformelt_monthly = h_tempformelt_daily.mean(axis=0)
                    tempformelt_with_std[h, :] = h_tempformelt_monthly
            tempformelt = tempformelt_with_std

        else:
            raise InvalidParamsError('mb_type can only be "mb_monthly,\
                                     mb_daily or mb_real_daily" ')
        #  replace all values below zero to zero
        clip_min(tempformelt, 0, out=tempformelt)

        return tempformelt
Ejemplo n.º 20
0
def process_dem(gdir):
    """Reads the DEM from the tiff, attempts to fill voids and apply smooth.

    The data is then written to `gridded_data.nc`.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        where to write the data
    """

    # open srtm tif-file:
    dem = read_geotiff_dem(gdir)

    # Grid
    nx = gdir.grid.nx
    ny = gdir.grid.ny

    # Correct the DEM
    valid_mask = np.isfinite(dem)
    if np.all(~valid_mask):
        raise InvalidDEMError('Not a single valid grid point in DEM')

    if np.any(~valid_mask):
        # We interpolate
        if np.sum(~valid_mask) > (0.25 * nx * ny):
            log.warning('({}) more than 25% NaNs in DEM'.format(gdir.rgi_id))
        xx, yy = gdir.grid.ij_coordinates
        pnan = np.nonzero(~valid_mask)
        pok = np.nonzero(valid_mask)
        points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
        inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
        try:
            dem[pnan] = griddata(points, np.ravel(dem[pok]), inter,
                                 method='linear')
        except ValueError:
            raise InvalidDEMError('DEM interpolation not possible.')
        log.warning(gdir.rgi_id + ': DEM needed interpolation.')
        gdir.add_to_diagnostics('dem_needed_interpolation', True)
        gdir.add_to_diagnostics('dem_invalid_perc', len(pnan[0]) / (nx * ny))

    isfinite = np.isfinite(dem)
    if np.any(~isfinite):
        # interpolation will still leave NaNs in DEM:
        # extrapolate with NN if needed (e.g. coastal areas)
        xx, yy = gdir.grid.ij_coordinates
        pnan = np.nonzero(~isfinite)
        pok = np.nonzero(isfinite)
        points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
        inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
        try:
            dem[pnan] = griddata(points, np.ravel(dem[pok]), inter,
                                 method='nearest')
        except ValueError:
            raise InvalidDEMError('DEM extrapolation not possible.')
        log.warning(gdir.rgi_id + ': DEM needed extrapolation.')
        gdir.add_to_diagnostics('dem_needed_extrapolation', True)
        gdir.add_to_diagnostics('dem_extrapol_perc', len(pnan[0]) / (nx * ny))

    if np.min(dem) == np.max(dem):
        raise InvalidDEMError('({}) min equal max in the DEM.'
                              .format(gdir.rgi_id))

    # Clip topography to 0 m a.s.l.
    utils.clip_min(dem, 0, out=dem)

    # Smooth DEM?
    if cfg.PARAMS['smooth_window'] > 0.:
        gsize = np.rint(cfg.PARAMS['smooth_window'] / gdir.grid.dx)
        smoothed_dem = gaussian_blur(dem, np.int(gsize))
    else:
        smoothed_dem = dem.copy()

    # Write to file
    with GriddedNcdfFile(gdir, reset=True) as nc:

        v = nc.createVariable('topo', 'f4', ('y', 'x',), zlib=True)
        v.units = 'm'
        v.long_name = 'DEM topography'
        v[:] = dem

        v = nc.createVariable('topo_smoothed', 'f4', ('y', 'x',), zlib=True)
        v.units = 'm'
        v.long_name = ('DEM topography smoothed with radius: '
                       '{:.1} m'.format(cfg.PARAMS['smooth_window']))
        v[:] = smoothed_dem

        # If there was some invalid data store this as well
        v = nc.createVariable('topo_valid_mask', 'i1', ('y', 'x',), zlib=True)
        v.units = '-'
        v.long_name = 'DEM validity mask according to geotiff input (1-0)'
        v[:] = valid_mask.astype(int)

        # add some meta stats and close
        nc.max_h_dem = np.max(dem)
        nc.min_h_dem = np.min(dem)