def merge_first_year_to_historic(future_fname):
    '''
    Read last year of real sea-ice data, and first year of new data
    Merge them over the first month
    '''
    dir_in_hist = '/group_workspaces/jasmin2/primavera1/WP6/forcing/HadISST2_submit/v1.2/'
    fice = 'siconc_input4MIPs_SSTsAndSeaIce_HighResMIP_MOHC-HadISST-2-2-0-0-0_gn_' + str(
        FIRST_FUTURE_YEAR - 1) + '0101-' + str(FIRST_FUTURE_YEAR -
                                               1) + '1231.nc'

    sic = iris.load_cube(dir_in_hist + fice)
    icc.add_month_number(sic, 'time', name='month')
    month_constraint_dec = iris.Constraint(
        coord_values={'month': lambda l: l == 12})
    month_constraint_jan = iris.Constraint(
        coord_values={'month': lambda l: l == 1})
    sic_hist = sic.extract(month_constraint_dec)

    sic_future = iris.load_cube(future_fname)
    sic_future_month = sic_future.extract(month_constraint_jan)
    ndays = sic_future_month.shape[0]

    for iday in range(0, ndays):
        weight = float(iday) / float(ndays - 1)
        sic_future.data[iday, :, :] = sic_hist.data[-1, :, :] * (
            1.0 - weight) + sic_future_month.data[iday, :, :] * weight

    iris.save(sic_future, future_fname[:-3] + '_merged.nc')
    cmd = 'mv ' + future_fname + ' ' + future_fname[:-3] + '_unmerged.nc'
    os.system(cmd)
    cmd = 'mv ' + future_fname[:-3] + '_merged.nc' + ' ' + future_fname
    os.system(cmd)
Ejemplo n.º 2
0
def add_extra_time_coords(cube):
    """
    Adds new coordinate for indexing a given simulation based on model and
    ensemble and adds additional time coordinates for unit manipulation
    """
    if not cube.coords('year'):
        icc.add_year(cube, 'time')
    if not cube.coords('month'):
        icc.add_month(cube, 'time')
    if not cube.coords('month_number'):
        icc.add_month_number(cube, 'time')
    if not cube.coords('day_of_month'):
        icc.add_day_of_month(cube, 'time')
    if not cube.coords('hour'):
        icc.add_hour(cube, 'time')
    return cube
def select_certain_months(cubes, lbmon):
    """
    Select data from CubeList that matches the specified months.

    :param CubeList cubes: Iris CubeList.
    :param list lbmon: List with month numbers, e.g. lbmon=[5,6,7] for Mai,
        June, and July.
    :returns: CubeList with Cubes containing only data for the specified mnth.
    :rtype: CubeList
    :raises: `AssertionError` if `cubes` is not an `iris.cube.CubeList`.
    """
    # add 'month number' coordinate
    add_time_coord = {
        'monthly':
        lambda cube: coord_cat.add_month_number(
            cube, 'time', name='month_number'),
        'seasonal':
        lambda cube: coord_cat.add_season(cube, 'time', name='clim_season'),
        'annual':
        lambda cube: coord_cat.add_season_year(
            cube, 'time', name='season_year')
    }
    assert isinstance(cubes, iris.cube.CubeList)

    for cube in cubes:
        add_time_coord['monthly'](cube)

    # filter by month number
    month_constraint = iris.Constraint(month_number=lbmon)
    return cubes.extract(
        month_constraint)  # CubeList.extract returns always CubeList
Ejemplo n.º 4
0
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, 'my_year')
        ccat.add_day_of_month(cube, time_coord, 'my_day_of_month')
        ccat.add_day_of_year(cube, time_coord, 'my_day_of_year')

        ccat.add_month(cube, time_coord, 'my_month')
        with warnings.catch_warnings(record=True):
            ccat.add_month_shortname(cube, time_coord, 'my_month_shortname')
        ccat.add_month_fullname(cube, time_coord, 'my_month_fullname')
        ccat.add_month_number(cube, time_coord, 'my_month_number')

        ccat.add_weekday(cube, time_coord, 'my_weekday')
        ccat.add_weekday_number(cube, time_coord, 'my_weekday_number')
        with warnings.catch_warnings(record=True):
            ccat.add_weekday_shortname(cube, time_coord,
                                       'my_weekday_shortname')
        ccat.add_weekday_fullname(cube, time_coord, 'my_weekday_fullname')

        ccat.add_season(cube, time_coord, 'my_season')
        ccat.add_season_number(cube, time_coord, 'my_season_number')
        with warnings.catch_warnings(record=True):
            ccat.add_season_month_initials(cube, time_coord,
                                           'my_season_month_initials')
        ccat.add_season_year(cube, time_coord, 'my_season_year')

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube,
                                   'my_month_in_quarter',
                                   time_coord,
                                   _month_in_quarter)

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == 'i':
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
def reinit_broken_time(cube_anom, cube_clim, climstart, climend):
    """ the time coordinates are a big mess (given as floats in years A.D.)
    best to reinitialize them from scratch
    """
    logger.info("Reinitializing broken time coordinate")
    time_raw = cube_anom.coord('time')

    n_years, n_add_mon = len(time_raw.points) // 12, len(time_raw.points) % 12
    start_year = int(time_raw.points[0])
    n_days = (n_years + n_add_mon / 12) * 365.25 + 50  # have some extra length
    climcenter = (climend - climstart) // 2

    times = iris.coords.DimCoord(
        np.arange(int(n_days), dtype=float),
        var_name='time',
        standard_name='time',
        long_name='time',
        units=cf_units.Unit('days since {}-01-01 00:00:00'.format(start_year),
                            calendar=cf_units.CALENDAR_STANDARD))

    # init a dummy cube to enable coord_categorisation
    dummycube = iris.cube.Cube(np.zeros(int(n_days), np.int),
                               dim_coords_and_dims=[(times, 0)])
    coord_categorisation.add_year(dummycube, 'time', name='year')
    coord_categorisation.add_month_number(dummycube, 'time', name='month')

    # build timecoord for the anomaly cube
    dummycube = dummycube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    dummycube = dummycube[:(n_years * 12 + n_add_mon)]
    timecoord_anom = dummycube.coord('time')

    # build timecoord for the climatology cube
    dummycube_clim = dummycube.extract(
        iris.Constraint(year=lambda cell: cell == climstart + climcenter))
    timecoord_clim = dummycube_clim.coord('time')

    # change to the new time coordinates
    cube_anom.remove_coord('time')
    cube_anom.add_dim_coord(timecoord_anom, 0)
    cube_clim.add_dim_coord(timecoord_clim, 0)

    # convert time units to standard
    utils.convert_timeunits(cube_anom, 1950)
    utils.convert_timeunits(cube_clim, 1950)

    return (cube_anom, cube_clim)
def hovmoller(target_dir):
    fname = os.path.join(DATA_ZOO, 'PP', 'ostia', 'ostia_sst_200604_201009_N216.pp')
    cube = iris.load_cube(fname, iris.Constraint('surface_temperature', latitude=lambda v: -5 < v < 5))
    
    iris_cat.add_month_number(cube, cube.coord('time'), 'month')
    iris_cat.add_year(cube, cube.coord('time'), 'year')
    
    monthly_mean = cube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    monthly_mean.remove_coord('month')
    monthly_mean.remove_coord('year')
    
    # make time the dimension coordinate (wont be needed once Bill has #22)
    t = monthly_mean.coord('time')
    monthly_mean.remove_coord(t)
    monthly_mean.add_dim_coord(t, 0)
    
    iris.save(monthly_mean, os.path.join(target_dir, 'ostia_monthly.nc'))
Ejemplo n.º 7
0
def climatology(cube, kind='month'):
    """Calculate a climatology for a cube.  Can do monthly or yearly.

    Args:
        cube (iris.cube.Cube)
        kind (Optional[str]): 'month' or 'year'

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    if 'month' not in aux_coords:
        cat.add_month(cube, 'time')
        cat.add_month_number(cube, 'time')
    out = cube.aggregated_by(kind, iris.analysis.MEAN)

    # If the data don't start in January the time coordinate will no longer
    # be monotonic. Fix this.
    if (kind == 'month') and (not out.coord('time').is_monotonic()):

        # Reorder the data so January is first.
        jan_index = np.where(out.coord('month').points == 'Jan')[0][0]
        ntim = 12
        sort_indices = range(jan_index, ntim) + range(0, jan_index)
        out = out[sort_indices]

        # Create a new time coordinate which is monotonic.
        startyear = int(out.coord('time').units.num2date(0).year)
        newtime_points = [
            netcdftime.datetime(startyear + (m / 12), (m % 12) + 1, 1)
            for m in out.coord('month_number').points.astype(int) - 1
        ]
        time_units = out.coord('time').units
        newtime_points = time_units.date2num(newtime_points)
        newtime = iris.coords.DimCoord(newtime_points,
                                       units=time_units,
                                       standard_name='time')
        data_dim = out.coord_dims('time')[0]
        out.remove_coord('time')
        out.add_dim_coord(newtime, data_dim)

    return out
Ejemplo n.º 8
0
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, 'my_year')
        ccat.add_day_of_month(cube, time_coord, 'my_day_of_month')
        ccat.add_day_of_year(cube, time_coord, 'my_day_of_year')

        ccat.add_month(cube, time_coord, 'my_month')
        with warnings.catch_warnings(record=True):
            ccat.add_month_shortname(cube, time_coord, 'my_month_shortname')
        ccat.add_month_fullname(cube, time_coord, 'my_month_fullname')
        ccat.add_month_number(cube, time_coord, 'my_month_number')

        ccat.add_weekday(cube, time_coord, 'my_weekday')
        ccat.add_weekday_number(cube, time_coord, 'my_weekday_number')
        with warnings.catch_warnings(record=True):
            ccat.add_weekday_shortname(cube, time_coord,
                                       'my_weekday_shortname')
        ccat.add_weekday_fullname(cube, time_coord, 'my_weekday_fullname')

        ccat.add_season(cube, time_coord, 'my_season')
        ccat.add_season_number(cube, time_coord, 'my_season_number')
        with warnings.catch_warnings(record=True):
            ccat.add_season_month_initials(cube, time_coord,
                                           'my_season_month_initials')
        ccat.add_season_year(cube, time_coord, 'my_season_year')

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube, 'my_month_in_quarter', time_coord,
                                   _month_in_quarter)

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == 'i':
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
def add_time_names(cube, year=False, month_number=False, day_of_year=False):
    '''
    Add additional coordiate names to time dimension
    '''
    if year:
        icc.add_year(cube, 'time', name='year')

    if month_number:
        try:
            icc.add_month_number(cube, 'time', name='month')
        except:
            pass

    if day_of_year:
        try:
            icc.add_day_of_year(cube, 'time', name='day_of_year')
        except:
            pass
Ejemplo n.º 10
0
    def test_basic(self):
        #make a series of 'day numbers' for the time, that slide across month boundaries
        day_numbers =  np.arange(0, 600, 27, dtype=np.int32)
        
        cube = iris.cube.Cube(day_numbers, long_name='test cube', units='metres')

        #use day numbers as data values also (don't actually use this for anything)
        cube.data = day_numbers 
        
        time_coord = iris.coords.DimCoord(
            day_numbers, standard_name='time', units=iris.unit.Unit('days since epoch', 'gregorian'))
        cube.add_dim_coord(time_coord, 0)

        #add test coordinates for examples wanted    
        ccat.add_year(cube, time_coord)
        ccat.add_day_of_month(cube, 'time')    #NB test passing coord-name instead of coord itself

        ccat.add_month(cube, time_coord)
        ccat.add_month_shortname(cube, time_coord, name='month_short')
        ccat.add_month_fullname(cube, time_coord, name='month_full')
        ccat.add_month_number(cube, time_coord, name='month_number')
        
        ccat.add_weekday(cube, time_coord)
        ccat.add_weekday_number(cube, time_coord, name='weekday_number')
        ccat.add_weekday_shortname(cube, time_coord, name='weekday_short')
        ccat.add_weekday_fullname(cube, time_coord, name='weekday_full')

        ccat.add_season(cube, time_coord)
        ccat.add_season_number(cube, time_coord, name='season_number')
        ccat.add_season_month_initials(cube, time_coord, name='season_months')
        ccat.add_season_year(cube, time_coord, name='year_ofseason')
        
        #also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube, 'month_in_quarter', time_coord, _month_in_quarter)

        for coord_name in ['month_number', 'month_in_quarter', 'weekday_number', 'season_number', 'year_ofseason', 'year', 'day']:
            cube.coord(coord_name).points = cube.coord(coord_name).points.astype(np.int64)

        #check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
Ejemplo n.º 11
0
def climatology(cube, kind='month'):
    """Calculate a climatology for a cube.  Can do monthly or yearly.

    Args:
        cube (iris.cube.Cube)
        kind (Optional[str]): 'month' or 'year'

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    if 'month' not in aux_coords:
        cat.add_month(cube, 'time')
        cat.add_month_number(cube, 'time')
    out = cube.aggregated_by(kind, iris.analysis.MEAN)

    # If the data don't start in January the time coordinate will no longer
    # be monotonic. Fix this.
    if (kind == 'month') and (not out.coord('time').is_monotonic()):

        # Reorder the data so January is first.
        jan_index = np.where(out.coord('month').points == 'Jan')[0][0]
        ntim = 12
        sort_indices = range(jan_index, ntim) + range(0, jan_index)
        out = out[sort_indices]

        # Create a new time coordinate which is monotonic.
        startyear = int(out.coord('time').units.num2date(0).year)
        newtime_points = [netcdftime.datetime(startyear + (m / 12), (m % 12) + 1, 1)
                          for m in out.coord('month_number').points.astype(int)-1]
        time_units = out.coord('time').units
        newtime_points = time_units.date2num(newtime_points)
        newtime = iris.coords.DimCoord(newtime_points, units=time_units,
                                       standard_name='time')
        data_dim = out.coord_dims('time')[0]
        out.remove_coord('time')
        out.add_dim_coord(newtime, data_dim)

    return out
def hovmoller(target_dir):
    fname = os.path.join(DATA_ZOO, 'PP', 'ostia',
                         'ostia_sst_200604_201009_N216.pp')
    cube = iris.load_cube(
        fname,
        iris.Constraint('surface_temperature', latitude=lambda v: -5 < v < 5))

    iris_cat.add_month_number(cube, cube.coord('time'), 'month')
    iris_cat.add_year(cube, cube.coord('time'), 'year')

    monthly_mean = cube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    monthly_mean.remove_coord('month')
    monthly_mean.remove_coord('year')

    # make time the dimension coordinate (wont be needed once Bill has #22)
    t = monthly_mean.coord('time')
    monthly_mean.remove_coord(t)
    monthly_mean.add_dim_coord(t, 0)

    iris.save(monthly_mean, os.path.join(target_dir, 'ostia_monthly.nc'))
def add_future_time_info(start_cube):
    '''
    Fix calendar so that future_cube has dates starting from start_cube+1
    '''
    future_cube = start_cube.copy()

    for coord in ['year', 'month']:
        try:
            future_cube.remove_coord(coord)
        except:
            pass

    print 'start future year ', future_cube.coord('time').points[0:50]
    print 'start future year ', future_cube.coord('time').points[-50:]
    # transform time dimension to make this go from Jan onwards
    start_period2 = start_cube.coord('year').points[-1] + 1
    start_period1 = start_cube.coord('year').points[0]
    print 'start period 1,2 ', start_period1, start_period2

    # find difference between date wanted and date have
    time_delta = datetime.date(start_period2, 1, 1) - datetime.date(
        start_period1, 1, 1)
    print 'time delta ', time_delta
    time_units = start_cube.coord('time').units
    print 'time_units ', time_units
    if 'hours' in str(time_units):
        time_delta_unit = 24.
    else:
        time_delta_unit = 1.

    # add this delta on to time coordinate
    future_cube.coord('time').points = future_cube.coord(
        'time').points + time_delta.days * time_delta_unit

    icc.add_year(future_cube, 'time', name='year')
    icc.add_month_number(future_cube, 'time', name='month')

    print 'end future year ', future_cube.coord('time').points[0:50]
    print 'end future year ', future_cube.coord('year').points[0:50]

    return future_cube
Ejemplo n.º 14
0
def monthly_running_mean(infile):
    '''
    Try removing mean seasonal cycle from all data
    Then perhaps filtering the rest (or else extracting the trend for each month
    '''
    file_runningmean = infile[:-3] + '_running_monthlymean.nc'
    if os.path.exists(file_runningmean):
        return file_runningmean
    c = iris.load_cube(infile)
    icc.add_month_number(c, 'time', name='month')
    icc.add_year(c, 'time', name='year')

    #cube_anomaly = sst_future.remove_monthly_mean_time_avg(c)
    cube_mean = sst_future.monthly_mean_running_time_avg(c)

    #iris.save(cube_anomaly, infile[:-3]+'_remove_monthlymean.nc', unlimited_dimensions = ['time'], fill_value = 1.0e20)
    iris.save(cube_mean,
              file_runningmean,
              unlimited_dimensions=['time'],
              fill_value=1.0e20)

    return file_runningmean
Ejemplo n.º 15
0
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, "my_year")
        ccat.add_day_of_month(cube, time_coord, "my_day_of_month")
        ccat.add_day_of_year(cube, time_coord, "my_day_of_year")

        ccat.add_month(cube, time_coord, "my_month")
        ccat.add_month_fullname(cube, time_coord, "my_month_fullname")
        ccat.add_month_number(cube, time_coord, "my_month_number")

        ccat.add_weekday(cube, time_coord, "my_weekday")
        ccat.add_weekday_number(cube, time_coord, "my_weekday_number")
        ccat.add_weekday_fullname(cube, time_coord, "my_weekday_fullname")

        ccat.add_season(cube, time_coord, "my_season")
        ccat.add_season_number(cube, time_coord, "my_season_number")
        ccat.add_season_year(cube, time_coord, "my_season_year")

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(
            cube, "my_month_in_quarter", time_coord, _month_in_quarter
        )

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == "i":
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ("categorisation", "quickcheck.cml"))
Ejemplo n.º 16
0
def draw_for_date():
    cubes = iris.load(T_FILE_PATH)
    sst = cubes[1]

    coord_categorisation.add_month_number(sst, "time")
    coord_categorisation.add_day_of_month(sst, "time")

    sst_sel = sst.extract(iris.Constraint(month_number=7, day_of_month=1))
    sst_sel.data = np.ma.masked_where(sst_sel.data == 0, sst_sel.data)

    b, lons, lats = nemo_commons.get_basemap_and_coordinates_from_file(T_FILE_PATH)

    #Plot the data
    fig = plt.figure()

    x, y = b(lons, lats)
    img = b.pcolormesh(x, y, sst_sel.data)
    b.colorbar(img)
    b.drawcoastlines()
    fname = "sst_1july_1958.jpeg"
    if not os.path.isdir(NEMO_IMAGES_DIR):
        os.mkdir(NEMO_IMAGES_DIR)
    fig.savefig(os.path.join(NEMO_IMAGES_DIR, fname))
Ejemplo n.º 17
0
    def get_seasonal_mean_sst(self, start_year=None, end_year=None, season_to_months=None):

        """

        :param start_year:
        :param end_year:
        :param season_to_months:
        :return: dict(year -> season -> field)
        """

        def group_key(c, val):
            for k, months in season_to_months.items():
                if val in months:
                    return k

        result = {}
        for the_year in range(start_year, end_year + 1):
            result[the_year] = {}
            data_path = self.year_to_path[the_year]
            cube = iris.load_cube(data_path, "Sea Surface temperature")
            print(cube)
            coord_categorisation.add_month_number(cube, "time")
            coord_categorisation.add_categorised_coord(cube, "season", "month_number", group_key)

            assert isinstance(cube, Cube)
            seas_mean = cube.aggregated_by(["season"], iris.analysis.MEAN)

            assert isinstance(seas_mean, Cube)
            assert isinstance(self.basemap, Basemap)

            for the_season in list(season_to_months.keys()):
                c = iris.Constraint(season=the_season)
                the_mean = seas_mean.extract(c)
                assert isinstance(the_mean, Cube)
                result[the_year][the_season] = the_mean.data.transpose()

        return result
Ejemplo n.º 18
0
def draw_for_date():
    cubes = iris.load(T_FILE_PATH)
    sst = cubes[1]

    coord_categorisation.add_month_number(sst, "time")
    coord_categorisation.add_day_of_month(sst, "time")

    sst_sel = sst.extract(iris.Constraint(month_number=7, day_of_month=1))
    sst_sel.data = np.ma.masked_where(sst_sel.data == 0, sst_sel.data)

    b, lons, lats = nemo_commons.get_basemap_and_coordinates_from_file(
        T_FILE_PATH)

    #Plot the data
    fig = plt.figure()

    x, y = b(lons, lats)
    img = b.pcolormesh(x, y, sst_sel.data)
    b.colorbar(img)
    b.drawcoastlines()
    fname = "sst_1july_1958.jpeg"
    if not os.path.isdir(NEMO_IMAGES_DIR):
        os.mkdir(NEMO_IMAGES_DIR)
    fig.savefig(os.path.join(NEMO_IMAGES_DIR, fname))
Ejemplo n.º 19
0
def main(cfg):
    """Run the diagnostic."""
    ###########################################################################
    # Read recipe data
    ###########################################################################

    # Dataset data containers
    data = e.Datasets(cfg)
    logging.debug("Found datasets in recipe:\n%s", data)

    # Variables
    var = e.Variables(cfg)
    logging.debug("Found variables in recipe:\n%s", var)

    # Check for tas and rlnst
    if not var.vars_available('pr', 'ua', 'va', 'ts'):
        raise ValueError("This diagnostic needs 'pr', 'ua', " +
                         " 'va', and 'ts'")

    available_exp = list(group_metadata(cfg['input_data'].values(), 'exp'))

    if 'historical' not in available_exp:
        raise ValueError("The diagnostic needs an historical experiment " +
                         " and one other experiment.")

    if len(available_exp) != 2:
        raise ValueError("The diagnostic needs an two model experiments: " +
                         " onehistorical and one other one.")

    available_exp.remove('historical')
    future_exp = available_exp[0]
    ###########################################################################
    # Read data
    ###########################################################################

    # Create iris cube for each dataset and save annual means
    for dataset_path in data:
        cube = iris.load(dataset_path)[0]
        cat.add_month_number(cube, 'time', name='month_number')
        # MJJAS mean (monsoon season)
        cube = cube[np.where(
            np.absolute(cube.coord('month_number').points - 7) <= 2)]
        cube = cube.collapsed('time', iris.analysis.MEAN)

        short_name = data.get_info(n.SHORT_NAME, dataset_path)
        if short_name == 'pr':
            # convert from kg m-2 s-1 to mm d-1
            # cube.convert_units('mm d-1') doesn't work.
            cube.data = cube.data * (60.0 * 60.0 * 24.0)
            cube.units = 'mm d-1'
            # Possible because all data must be interpolated to the same grid.
            if 'lats' not in locals():
                lats = cube.coord('latitude').points
                lons = cube.coord('longitude').points

        data.set_data(cube.data, dataset_path)
    ###########################################################################
    # Process data
    ###########################################################################

    data_ar = substract_li(cfg, data, lats, lons, future_exp)

    # data_ar {"datasets": datasets, "ar_diff_rain": ar_diff_rain,
    #          "ar_diff_ua": ar_diff_ua, "ar_diff_va": ar_diff_va,
    #          "ar_hist_rain": ar_hist_rain, "mism_diff_rain": mism_diff_rain,
    #          "mwp_hist_rain": mwp_hist_rain}

    plot_rain_and_wind(cfg, 'Multi-model_mean',
                       {'ar_diff_rain': data_ar["ar_diff_rain"],
                        'ar_diff_ua': data_ar["ar_diff_ua"],
                        'ar_diff_va': data_ar["ar_diff_va"],
                        'lats': lats, 'lons': lons}, future_exp)

    # Regression between mean ISM rain difference and historical rain
    reg2d = get_reg_2d_li(data_ar["mism_diff_rain"], data_ar["ar_hist_rain"],
                          lats, lons)

    plot_2dcorrelation_li(cfg, reg2d, lats, lons)

    plot_reg_li(cfg, data_ar, future_exp)

    # Regression between mean WP rain and rain difference for each location
    reg2d_wp = get_reg_2d_li(data_ar["mwp_hist_rain"], data_ar["ar_diff_rain"],
                             lats, lons)

    data_ar2 = correct_li(data_ar, lats, lons, reg2d_wp)
    # return {"datasets": data["datasets"], "ar_diff_cor": ar_diff_cor,
    #         "proj_err": proj_err, "mism_diff_cor": mism_diff_cor,
    #         "mism_hist_rain": mism_hist_rain, "mwp_hist_cor": mwp_hist_cor}

    plot_reg_li2(cfg, data_ar["datasets"], data_ar["mism_diff_rain"],
                 data_ar2["mism_diff_cor"], data_ar2["mism_hist_rain"])

    plot_rain(cfg, 'Multi-model mean rainfall change due to model error',
              np.mean(data_ar2["proj_err"], axis=2), lats, lons)
    plot_rain(cfg, 'Corrected multi-model mean rainfall change',
              np.mean(data_ar2["ar_diff_cor"], axis=2), lats, lons)
Ejemplo n.º 20
0
def add_time_coord_cats(cube):
    """
    This function takes in an iris cube, and adds a range of
    numeric co-ordinate categorisations to it. Depending
    on the data, not all of the coords added will be relevant.

    args
    ----
    cube: iris cube that has a coordinate called 'time'

    Returns
    -------
    Cube: cube that has new time categorisation coords added

    Notes
    -----
    test

    A simple example:

    >>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
    >>> cube = iris.load_cube(file)
    >>> coord_names = [coord.name() for coord in cube.coords()]
    >>> print((', '.join(coord_names)))
    time, grid_latitude, grid_longitude
    >>> ccube = add_time_coord_cats(cube)
    >>> coord_names = [coord.name() for coord in ccube.coords()]
    >>> print((', '.join(coord_names)))
    time, grid_latitude, grid_longitude, day_of_month, day_of_year, month, \
month_number, season, season_number, year
    >>> # print every 50th value of the added time cat coords
    ... for c in coord_names[3:]:
    ...     print(ccube.coord(c).long_name)
    ...     print(ccube.coord(c).points[::50])
    ...
    day_of_month
    [ 1 21 11  1 21 11  1 21]
    day_of_year
    [  1  51 101 151 201 251 301 351]
    month
    ['Jan' 'Feb' 'Apr' 'Jun' 'Jul' 'Sep' 'Nov' 'Dec']
    month_number
    [ 1  2  4  6  7  9 11 12]
    season
    ['djf' 'djf' 'mam' 'jja' 'jja' 'son' 'son' 'djf']
    season_number
    [0 0 1 2 2 3 3 0]
    year
    [2000 2000 2000 2000 2000 2000 2000 2000]

    """

    # most errors pop up when you try to add a coord that has
    # previously been added, or the cube doesn't contain the
    # necessary attribute.

    ccube = cube.copy()

    # numeric
    try:
        iccat.add_day_of_year(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    try:
        iccat.add_day_of_month(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    try:
        iccat.add_month_number(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    try:
        iccat.add_season_number(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    try:
        iccat.add_year(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    # strings
    try:
        iccat.add_month(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    try:
        iccat.add_season(ccube, "time")
    except AttributeError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
    except ValueError as err:
        print(("add_time_coord_cats: {}, skipping . . . ".format(err)))

    return ccube
def fix_sst_under_ice(dir_in, filename_sice_func_sst_month_pickle, year,
                      months, year_last_real_data, sst_fixed_year):

    fnames_sst = sorted(
        glob.glob(os.path.join(dir_in, 'tos*' + year + '0101-*.nc')))
    fnames_ice = sorted(
        glob.glob(os.path.join(dir_in, 'siconc*' + year + '0101-*.nc')))
    fout_year = sst_fixed_year

    if os.path.exists(fout_year):
        return

    year_min = int(os.path.basename(fnames_sst[0]).split('_')[-1][0:4])
    NYEARS = len(fnames_sst)
    print 'NYEARS ', NYEARS

    cube_mon = iris.cube.CubeList()

    # read calculated relationship
    fh = open(filename_sice_func_sst_month_pickle, 'r')
    mean_freq_sice = pickle.load(fh)
    fh.close()

    mean_freq_sice_daily = sic_functions.interpolate_histogram(
        year, mean_freq_sice)

    for f_sst, f_ice in zip(fnames_sst, fnames_ice):
        year = f_sst.split('_')[-1][0:4]
        year1 = f_ice.split('_')[-1][0:4]
        iy = int(year) - year_min
        print 'process ', f_sst, year, iy

        if year != year1:
            raise Exception('Paired SST and sea-ice years not same ' + year +
                            ' ' + year1)
        sst = iris.load_cube(f_sst)
        ice = iris.load_cube(f_ice)
        icc.add_month_number(sst, 'time', name='month')
        icc.add_month_number(ice, 'time', name='month')

        if os.path.exists(fout_year):
            continue

        files_fixed = []
        for im in months:
            print 'processing month ', im
            month = im + 1
            month_no = iris.Constraint(
                coord_values={'month': lambda l: l == month})
            sst_mon = sst.extract(month_no)
            ice_mon = ice.extract(month_no)

            print('calc the fixed SST under siconc ')
            sst_fixed = fix_sst_based_on_siconc(sst_mon, ice_mon,
                                                mean_freq_sice_daily, im)
            fout = fout_year[:-3] + '_' + str(month).zfill(2) + '.nc'
            iris.save(sst_fixed, fout, unlimited_dimensions=['time'])
            print('saved file ', fout)
            files_fixed.append(fout)

        cmd = 'ncrcat -O ' + ' '.join(files_fixed) + ' ' + fout_year
        print 'cmd ', cmd
        os.system(cmd)
        for f in files_fixed:
            os.remove(f)
Ejemplo n.º 22
0
def adjust_bias(obs_hist,
                sim_hist,
                sim_fut,
                realize_cubes=False,
                anonymous_dimension_name=None,
                halfwin_upper_bound_climatology=0,
                n_processes=1,
                **kwargs):
    """
    Adjusts biases grid cell by grid cell.

    Parameters
    ----------
    obs_hist : iris cube
        Cube of observed climate data representing the historical or training
        time period.
    sim_hist : iris cube
        Cube of simulated climate data representing the historical or training
        time period.
    sim_fut : iris cube
        Cube of simulated climate data representing the future or application
        time period.
    realize_cubes : boolean, optional
        Realize data of obs_hist, sim_hist, and sim_fut before beginning the
        bias adjustment grid cell by grid cell.
    anonymous_dimension_name : str, optional
        Used to name the first anonymous dimension of obs_hist, sim_hist, and
        sim_fut.
    halfwin_upper_bound_climatology : int, optional
        Determines the length of running windows used in the calculations of
        climatologies of upper bounds that is used to rescale all values of
        obs_hist, sim_hist, and sim_fut to values <= 1 before bias adjustment.
        The window length is set to halfwin_upper_bound_climatology * 2 + 1
        time steps. If halfwin_upper_bound_climatology == 0 then no rescaling
        is done.
    n_processes : int, optional
        Number of processes used for parallel processing.

    Returns
    -------
    sim_fut_ba : iris cube
        Result of bias adjustment.

    Other Parameters
    ----------------
    **kwargs : Passed on to adjust_bias_one_location.

    """
    # put iris cubes into dictionary
    cubes = {'obs_hist': obs_hist, 'sim_hist': sim_hist, 'sim_fut': sim_fut}

    space_shape = None
    for key, cube in cubes.items():
        # get cube shape beyond time axis
        if space_shape is None: space_shape = cube.shape[1:]
        else:
            assert space_shape == cube.shape[1:], 'cube shapes not compatible'
        # load iris cube data into memory
        if realize_cubes: d = cube.data
        # make sure the proleptic gregorian calendar is used in all input files
        uf.assert_calendar(cube, 'proleptic_gregorian')
        # make sure that time is the leading coordinate
        uf.assert_coord_axis(cube, 'time', 0)
        # name the first anonymous dimension
        uf.name_first_anonymous_dimension(cube, anonymous_dimension_name)
        # prepare scaling by upper bound climatology
        if halfwin_upper_bound_climatology: icc.add_day_of_year(cube, 'time')
        # prepare bias adjustment calendar month by calendar month
        icc.add_month_number(cube, 'time')
        # prepare detrending and cube concatenation
        icc.add_year(cube, 'time')

    # adjust every location individually using multiprocessing
    print('adjusting at location ...')
    abol = partial(
        adjust_bias_one_location,
        halfwin_upper_bound_climatology=halfwin_upper_bound_climatology,
        **kwargs)
    pool = mp.Pool(n_processes, maxtasksperchild=1000)
    time_series_adjusted = pool.imap(
        abol,
        zip(obs_hist.slices('time'), sim_hist.slices('time'),
            sim_fut.slices('time')))
    pool.close()

    # replace time series in sim_fut by the adjusted time series
    sim_fut_ba = sim_fut
    d = sim_fut_ba.data
    for i_location, tsa in zip(np.ndindex(space_shape), time_series_adjusted):
        d[(slice(None, None), ) + i_location] = tsa
        print(i_location)

    # remove auxiliary coordinates
    sim_fut_ba.remove_coord('year')
    sim_fut_ba.remove_coord('month_number')
    if halfwin_upper_bound_climatology: sim_fut_ba.remove_coord('day_of_year')

    return sim_fut_ba
Ejemplo n.º 23
0
def make_trend_files_for_models(model, CMIP5_ref):
    # read the merged monthly SST for 1950-2100 (datafile)
    # extract data between 2012_2016 (sst_cube)
    #   aggregate this by year (sst_cube_year)
    #   and aggregate by month (monthly_mean)
    #   subtract this monthly_mean from each year (yearly_monthly_trend) - should be called delta
    #   save as allyrs_sst.nc
    # read the land sea mask
    # mask the yearly_monthly_trend file with this mask
    #   save as _allyrs_masked_sst.nc
    #

    global_temp_change = {}
    run = CMIP5_ref[model]
    print run
    runid = run + '/' + model
    print runid
    datafile = os.path.join(savedir, model + '_monthly_1950_2120_tos.nc')
    print datafile
    if not os.path.exists(savedir): os.makedirs(savedir)

    file_trend = os.path.join(savedir,
                              model + '_month_annual_1950_2120_delta.nc')
    if not os.path.exists(file_trend[:-3] + '_allyrs_masked_sst.nc'):

        # Read SST data and calculate annual means
        # the callback method only works well is there are full years of 12 months data
        print 'start analysis'
        cube_tmp = iris.load(datafile,
                             variable1['tos'],
                             callback=cmip5_callback)
        if len(cube_tmp) == 0:
            cube_tmp = iris.load(datafile,
                                 variable2['tos'],
                                 callback=cmip5_callback)
        for c in cube_tmp:
            print c.coords('time')
            print
        print cube_tmp

        sst_cube_full = cube_tmp.concatenate_cube()
        icc.add_year(sst_cube_full, 'time', name='year')
        icc.add_month_number(sst_cube_full, 'time', name='month')

        # convert to SST if surface temperature
        #if sst_cube_full.data[0,...].max() >= 100.: sst_cube_full -= 273.15

        # now the surface temperature over sea-ice is below -1.8, so reset it to -1.8
        #sst_cube_full.data[sst_cube_full.data < -1.8] = -1.8
        print 'sst_cube_full ', sst_cube_full

        # extract all years 10 years either side of 2015 (where we want delta to be about zero)
        year_centre = 2015
        YEARS_between = iris.Constraint(
            coord_values={
                'year': lambda l: year_centre - 10 <= l <= year_centre + 10
            })

        sst_cube = sst_cube_full.extract(YEARS_between)

        # now decompose this cube into its components: spatial time-mean, spatial linear trend,
        # mean seasonal cycle centred around year_centre
        monthly_mean = sst_cube.aggregated_by('month', iris.analysis.MEAN)
        print monthly_mean

        # remove mean seasonal cycle from all months
        yearly_monthly_trend = sst_cube_full.copy()
        for m in range(0, 12):
            inds = np.where(sst_cube_full.coord('month').points == m + 1)[0]
            for i in inds:
                yearly_monthly_trend.data[i] -= monthly_mean.data[m]
        iris.save(yearly_monthly_trend,
                  file_trend[:-3] + '_allyrs_sst.nc',
                  fill_value=1.0e20)

        # add the model mask
        mask_path = form_BADC_path_mask1(runid)
        print mask_path
        if not os.path.exists(mask_path):
            print 'mask path not exist'
            mask_path = form_BADC_path_mask2(runid)
        mask = iris.load_cube(os.path.join(mask_path, '*.nc'),
                              'sea_area_fraction')
        if 'IPSL' in run:
            # need to remove wrap rows from NEMO
            remove_nemo_wrap(os.path.join(mask_path, '*.nc'),
                             os.path.join(savedir, 'ipsl_mask.nc'))
            mask = iris.load_cube(os.path.join(savedir, 'ipsl_mask.nc'))
            land = np.where(mask.data.mask == True)
            mask.data[...] = 1.0
            mask.data[land[0], land[1]] = 0.0
        print mask
        land = np.where(mask.data == 0.0)
        mask_land = yearly_monthly_trend[0].copy()
        add_mask(mask_land)
        add_mask(yearly_monthly_trend)
        print mask_land
        mask_land.data[...] = 1.0
        mask_land.data[land] = 0.0
        #mask_land.data[land] = np.ma.masked
        mask_land.data.mask[land] = True
        yearly_monthly_trend *= mask_land
        yearly_monthly_trend.data.mask[:, ...] = mask_land.data.mask[...]

        cube_filled = fill_in_masked_data(yearly_monthly_trend)
        iris.save(cube_filled,
                  file_trend[:-3] + '_allyrs_masked_sst.nc',
                  fill_value=1.0e20)
Ejemplo n.º 24
0
def time_filter_by_month(infile):
    '''
    Filter each month individually with a simple rolling window filter
    To reduce the interannual variability magnitude, since this is being provided by HadISST2.2
    Lanczos filter
    Note this removes the first and last n years (1/n filter)
    '''
    c = iris.load_cube(infile)
    icc.add_month_number(c, 'time', name='month')
    icc.add_year(c, 'time', name='year')

    cube_list_filtered = iris.cube.CubeList()

    # window length in years
    window = 11
    # Construct 5-year low pass filter
    wgts5 = low_pass_weights(window, 1. / 7.)
    fmonths = {}
    for mon in range(1, 13):
        fmonths[mon] = []
    for mon in range(1, 13):
        fout_month = infile[:-3] + '_filtered_' + str(mon).zfill(
            2) + '_7_11.nc'
        fmonths[mon].append(fout_month)
        if not os.path.exists(fout_month):
            print 'filter month ', mon
            con_mon = iris.Constraint(
                coord_values={'month': lambda l: l == mon})
            c_mon = c.extract(con_mon)
            c5 = c_mon.rolling_window('time',
                                      iris.analysis.SUM,
                                      len(wgts5),
                                      weights=wgts5)
            cube_list_filtered.append(c5)
            iris.save(c5,
                      fout_month,
                      unlimited_dimensions=['time'],
                      fill_value=1.0e20)
            fmonths[mon].append(fout_month)
        else:
            c5 = iris.load_cube(fout_month)

        year_start = int(c5.coord('year').points[0])
        year_end = int(c5.coord('year').points[-1])
    print 'years ', year_start, year_end
    years = range(year_start, year_end + 1)

    fout_all_years_filtered = infile[:-3] + '_filtered_' + str(
        years[0]) + '-' + str(years[-1]) + '_7_11.nc'

    c_all_yr_l = iris.cube.CubeList()
    fnames = []
    for year in years:
        con_yr = iris.Constraint(coord_values={'year': lambda l: l == year})
        c_yr_l = iris.cube.CubeList()
        fyear = []
        for im in range(1, 13):
            c = iris.load_cube(fmonths[im])
            c.coord('year').bounds = None
            c_yr = c.extract(con_yr)
            #c_yr.remove_coord('month')
            c_yr_l.append(c_yr)
        c_this_yr = c_yr_l.merge_cube()
        c_this_yr.remove_coord('month')
        c_this_yr.remove_coord('year')
        c_yr_l.append(c_this_yr)
        fout = infile[:-3] + '_filtered_' + str(year) + '_7_11.nc'
        fnames.append(fout)
        iris.save(c_this_yr,
                  fout,
                  unlimited_dimensions=['time'],
                  fill_value=1.0e20)
    cmd = 'ncrcat ' + ' '.join(fnames) + ' ' + fout_all_years_filtered
    subprocess.call(cmd, shell=True)
    #    c_all_yr = c_yr_l.concatenate_cube()
    #    iris.save(c_all_yr, fout_all_years_filtered, unlimited_dimensions = ['time'], fill_value = 1.0e20)

    return fout_all_years_filtered
Ejemplo n.º 25
0
def mainfunc(run):
    """Main function in stratospheric assessment code."""
    metrics = dict()

    # Set up to only run for 10 year period (eventually)
    year_cons = dict(from_dt=run['from_monthly'], to_dt=run['to_monthly'])

    # Read zonal mean U (lbproc=192) and add month number to metadata
    ucube = load_run_ss(
        run, 'monthly', 'eastward_wind', lbproc=192, **year_cons)
    # Although input data is a zonal mean, iris does not recognise it as such
    # and just reads it as having a single longitudinal coordinate. This
    # removes longitude as a dimension coordinate and makes it a scalar
    # coordinate in line with how a zonal mean would be described.
    # Is there a better way of doing this?
    ucube_cds = [cdt.standard_name for cdt in ucube.coords()]
    if 'longitude' in ucube_cds:
        ucube = ucube.collapsed('longitude', iris.analysis.MEAN)
    if not ucube.coord('latitude').has_bounds():
        ucube.coord('latitude').guess_bounds()
    # check for month_number
    aux_coord_names = [aux_coord.var_name for aux_coord in ucube.aux_coords]
    if 'month_number' not in aux_coord_names:
        icc.add_month_number(ucube, 'time', name='month_number')

    # Read zonal mean T (lbproc=192) and add clim month and season to metadata
    tcube = load_run_ss(
        run, 'monthly', 'air_temperature', lbproc=192,
        **year_cons)  # m01s30i204
    # Although input data is a zonal mean, iris does not recognise it as such
    # and just reads it as having a single longitudinal coordinate. This
    # removes longitude as a dimension coordinate and makes it a scalar
    # coordinate in line with how a zonal mean would be described.
    # Is there a better way of doing this?
    tcube_cds = [cdt.standard_name for cdt in tcube.coords()]
    if 'longitude' in tcube_cds:
        tcube = tcube.collapsed('longitude', iris.analysis.MEAN)
    if not tcube.coord('latitude').has_bounds():
        tcube.coord('latitude').guess_bounds()
    aux_coord_names = [aux_coord.var_name for aux_coord in tcube.aux_coords]
    if 'month' not in aux_coord_names:
        icc.add_month(tcube, 'time', name='month')
    if 'clim_season' not in aux_coord_names:
        icc.add_season(tcube, 'time', name='clim_season')

    # Read zonal mean q (lbproc=192) and add clim month and season to metadata
    qcube = load_run_ss(
        run, 'monthly', 'specific_humidity', lbproc=192,
        **year_cons)  # m01s30i205
    # Although input data is a zonal mean, iris does not recognise it as such
    # and just reads it as having a single longitudinal coordinate. This
    # removes longitude as a dimension coordinate and makes it a scalar
    # coordinate in line with how a zonal mean would be described.
    # Is there a better way of doing this?
    qcube_cds = [cdt.standard_name for cdt in qcube.coords()]
    if 'longitude' in qcube_cds:
        qcube = qcube.collapsed('longitude', iris.analysis.MEAN)
    if not qcube.coord('latitude').has_bounds():
        qcube.coord('latitude').guess_bounds()
    aux_coord_names = [aux_coord.var_name for aux_coord in qcube.aux_coords]
    if 'month' not in aux_coord_names:
        icc.add_month(qcube, 'time', name='month')
    if 'clim_season' not in aux_coord_names:
        icc.add_season(qcube, 'time', name='clim_season')

    # Calculate PNJ metrics
    pnj_metrics(run, ucube, metrics)

    # Calculate QBO metrics
    qbo_metrics(run, ucube, metrics)

    # Calculate polar temperature metrics
    tpole_metrics(run, tcube, metrics)

    # Calculate equatorial temperature metrics
    teq_metrics(run, tcube, metrics)

    # Calculate tropical temperature metrics
    t_metrics(run, tcube, metrics)

    # Calculate tropical water vapour metric
    q_metrics(run, qcube, metrics)

    # Summary metric
    summary_metric(metrics)

    # Make sure all metrics are of type float
    # Need at the moment to populate metrics files
    for key, value in metrics.items():
        metrics[key] = float(value)

    return metrics
def calc_sice_func_sst_relationship(dir_in,
                                    filename_sice_func_sst_month_pickle, year):

    fnames_sst = sorted(
        glob.glob(os.path.join(dir_in, 'tos*' + year + '0101-*.nc')))
    fnames_ice = sorted(
        glob.glob(os.path.join(dir_in, 'siconc*' + year + '0101-*.nc')))

    print 'choose ', fnames_sst

    NYEARS = len(fnames_sst)
    print 'NYEARS ', NYEARS

    cube_mon = iris.cube.CubeList()

    mean_freq_sice = np.ma.zeros((BIN['SICE']['NBINS'], 2, 12))

    for f_sst, f_ice in zip(fnames_sst, fnames_ice):
        year = f_sst.split('_')[-1][0:4]
        year1 = f_ice.split('_')[-1][0:4]
        print 'process ', f_sst, year
        if year != year1:
            raise Exception('Paired SST and sea-ice years no same ' + year +
                            ' ' + year1)
        sst = iris.load_cube(f_sst)
        ice = iris.load_cube(f_ice)
        icc.add_month_number(sst, 'time', name='month')
        icc.add_month_number(ice, 'time', name='month')

        for im in range(0, 12):
            print 'processing month ', im
            month = im + 1
            month_no = iris.Constraint(
                coord_values={'month': lambda l: l == month})
            sst_mon = sst.extract(month_no)
            ice_mon = ice.extract(month_no)

            mean_freq_sst_month_year, mean_freq_sice_month_year = sic_functions.sst_ice_relationship(
                sst_mon, ice_mon, do_freq_sst=True, do_freq_sic=True)
            mean_freq_sice[:, :, im] = mean_freq_sice_month_year

    # want to make the relationship monotonic
    # temperature decreases as siconc bin increases
    for im in range(0, 12):
        reset_nh = False
        reset_sh = False
        for ib, bin in enumerate(BIN['SICE']['XBINS'][:-1]):
            if ib > 0 and ib < BIN['SICE']['NBINS'] - 2:
                for ir in range(0, 2):
                    #if (mean_freq_sice[ib, ir, im] > np.amax(mean_freq_sice[ib:ib+5, ir, im])):
                    if (mean_freq_sice[ib, ir, im] >
                        (mean_freq_sice[ib - 1, ir, im])):
                        mean_freq_sice[ib, ir, im] = mean_freq_sice[ib - 1, ir,
                                                                    im]

    im = 0
    ir = 0
    for ib, bin in enumerate(BIN['SICE']['XBINS'][:-1]):
        print 'nh, im, bin, sst ', im, bin, mean_freq_sice[ib, ir, im]

    fh = open(filename_sice_func_sst_month_pickle, 'wb')
    pickle.dump(mean_freq_sice, fh)
    fh.close()
def generate_future_siconc_from_sst(mean_freq_days, year, months, ice_mask,
                                    ice_mask_min, ice_ref):
    '''
    Use the maximum ice extent, the SST and the pdf relationship between SST and sea-ice to generate a future sea ice concentration
    '''
    #dir_in = '/group_workspaces/jasmin2/primavera1/WP6/forcing/HadISST2_submit/v1.2/'
    if year >= 2016:
        #future_sst_file = os.path.join(DATADIR, 'future', 'full_sst_1950_2100_025_daily_fixed.nc')
        future_sst_file = os.path.join(
            savedir, 'sst', 'future_sst_' + str(year) + '_025_daily_v1.nc')
    else:
        if len(months) == 12:
            future_sst_file = os.path.join(
                DATADIR, 'hadisst2_tos_daily_' + str(year) +
                '_fixed_day_v1_monthly_under_ice.nc')
        elif len(months) == 1:
            future_sst_file = os.path.join(
                DATADIR, 'hadisst2_tos_daily_' + str(year) + '_fixed_' +
                str(months[0] + 1).zfill(2) + '.nc')
        else:
            raise Exception('Must be either 12 months or 1 ' +
                            str(len(months)))

    future_siconc_file = os.path.join(savedir_ice, 'siconc',
                                      'future_siconc_{}_025_daily_v1.nc')
    print 'read sst ', future_sst_file
    full_sst = iris.load_cube(future_sst_file)

    try:
        #icc.add_month_number(full_siconc, 'time', name = 'month')
        icc.add_month_number(full_sst, 'time', name='month')
    except:
        pass

    try:
        #icc.add_year(full_siconc, 'time', name = 'year')
        icc.add_year(full_sst, 'time', name='year')
    except:
        pass

    #print 'full cube ',full_siconc.coord('year')
    new_sice_year = iris.cube.CubeList()

    print 'YEAR ', year
    fout_year = future_siconc_file.format(str(year))
    new_sice_month = iris.cube.CubeList()
    year_con = iris.Constraint(coord_values={'year': lambda l: l == int(year)})
    sst_this_year = full_sst.extract(year_con)
    full_siconc = sst_this_year.copy()
    print 'cube year ', sst_this_year

    lat_shape = sst_this_year.shape[1]
    for im in months:
        if im > 0 and im < 11:
            mm1 = im - 1
            mp1 = im + 1
        elif im == 0:
            mm1 = 11
            mp1 = im + 1
        elif im == 11:
            mm1 = im - 1
            mp1 = 0

        month = im + 1
        month_con = iris.Constraint(
            coord_values={'month': lambda l: l == int(month)})
        sst_this_month = sst_this_year.extract(month_con)

        if im == 0:
            month_con_m1 = iris.Constraint(
                coord_values={'month': lambda l: l == 1})
        else:
            month_con_m1 = iris.Constraint(
                coord_values={'month': lambda l: l == int(month - 1)})
        sst_last_month = sst_this_year.extract(month_con_m1)

        if im == 11:
            month_con_p1 = iris.Constraint(
                coord_values={'month': lambda l: l == 11})
        else:
            month_con_p1 = iris.Constraint(
                coord_values={'month': lambda l: l == int(month + 1)})
        sst_next_month = sst_this_year.extract(month_con_p1)

        siconc_template = full_siconc.extract(month_con)
        coord_names = [coord.name() for coord in siconc_template.coords()]
        print coord_names
        if 'day_of_year' not in coord_names:
            icc.add_day_of_year(siconc_template, 'time', name='day_of_year')
        siconc_template.data[...] = 0.0

        ndays = sst_this_month.shape[0]
        day_of_year = siconc_template.coord('day_of_year').points

        for day in range(ndays):
            day_number = day_of_year[day] - 1
            print 'process ', day, month, year, day_number
            for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
                sst_range = np.where((bin < sst_this_month.data[day, :, :]) & (
                    bin + 1 >= sst_this_month.data[day, :, :])
                                     & (ice_mask.data[im, :, :] == 1))
                nhemi = np.where(sst_range[0] > lat_shape / 2)
                shemi = np.where(sst_range[0] < lat_shape / 2)
                siconc_template.data[
                    day, sst_range[0][nhemi],
                    sst_range[1][nhemi]] = mean_freq_days[year][ib, 0,
                                                                day_number]
                siconc_template.data[
                    day, sst_range[0][shemi],
                    sst_range[1][shemi]] = mean_freq_days[year][ib, 1,
                                                                day_number]
            print 'completed ', day, month, year

        siconc_template.data.mask[:, sst_this_month[0].data.mask] = True
        siconc_template.data.mask[:, ice_ref.data.mask] = True
        new_sice_month.append(siconc_template)
    new_sice_cube = new_sice_month.concatenate_cube()
    add_metadata(new_sice_cube)
    print 'write to ', fout_year
    iris.save(new_sice_cube,
              fout_year,
              unlimited_dimensions=['time'],
              fill_value=1.0e20)

    return fout_year
def calc_sst_func_sice_relationship(dir_in,
                                    filename_sst_func_siconc_month_pickle,
                                    year):

    print 'choose ', fnames_sst

    NYEARS = len(fnames_sst)
    print 'NYEARS ', NYEARS

    cube_mon = iris.cube.CubeList()

    mean_freq_sst = np.ma.zeros((BIN['SST']['NBINS'], 2, 12))

    for f_sst, f_ice in zip(fnames_sst, fnames_ice):
        print 'process ', f_sst, year
        sst = iris.load_cube(f_sst)
        ice = iris.load_cube(f_ice)
        sst_coords = [co.name() for co in sst.aux_coords]
        ice_coords = [co.name() for co in ice.aux_coords]
        if not 'month' in sst_coords:
            icc.add_month_number(sst, 'time', name='month')
        if not 'month' in ice_coords:
            icc.add_month_number(ice, 'time', name='month')

        for im in range(0, 12):
            print 'processing month ', im
            month = im + 1
            month_no = iris.Constraint(
                coord_values={'month': lambda l: l == month})
            sst_mon = sst.extract(month_no)
            ice_mon = ice.extract(month_no)

            mean_freq_sst_month_year, mean_freq_sice_month_year = sic_functions.sst_ice_relationship(
                sst_mon, ice_mon, do_freq_sst=True, do_freq_sic=True)
            mean_freq_sst[:, :, im] = mean_freq_sst_month_year

            ir = 1
            for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
                print 'nh, im, ir, bin, siconc ', im, ir, bin, mean_freq_sst[
                    ib, ir, im]

    # want to make the relationship monotonic
    for im in range(0, 12):
        reset_nh = False
        reset_sh = False
        for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
            if ib > 0 and ib < BIN['SST']['NBINS'] - 2:
                for ir in range(0, 2):
                    if (mean_freq_sst[ib, ir, im] < np.amax(
                            mean_freq_sst[ib:ib + 5, ir, im])):
                        mean_freq_sst[ib, ir, im] = mean_freq_sst[ib - 1, ir,
                                                                  im]
                    elif (mean_freq_sst[ib, ir, im] > mean_freq_sst[ib - 1, ir,
                                                                    im]):
                        mean_freq_sst[ib, ir, im] = mean_freq_sst[ib - 1, ir,
                                                                  im]
                # once the siconc for a given sst hits zero, make sure all the rest are zero
            if mean_freq_sst[ib, 0, im] == 0:
                reset_nh = True
            if reset_nh:
                mean_freq_sst[ib:, 0, im] = 0.0
            if mean_freq_sst[ib, 1, im] == 0:
                reset_sh = True
            if reset_sh:
                mean_freq_sst[ib:, 1, im] = 0.0

    # try making the NH concentration less than 15% equal to zero (to remove wide edge in summer)
    min_ice_conc = 20.0
    for im in range(0, 12):
        for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
            if ib > 0 and ib < BIN['SST']['NBINS'] - 2:
                for ir in range(0, 1):
                    if (mean_freq_sst[ib, ir, im] < min_ice_conc):
                        mean_freq_sst[ib + 4:, ir, im] = 0.0

    im = 0
    ir = 1
    for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
        print 'nh, im, bin, siconc ', im, bin, mean_freq_sst[ib, ir, im]

    fh = open(filename_sst_func_siconc_month_pickle, 'wb')
    pickle.dump(mean_freq_sst, fh)
    fh.close()
Ejemplo n.º 29
0
def calc_sst_func_sice_relationship(dir_in, model,
                                    filename_sst_func_siconc_month_pickle,
                                    years):

    ftype = 'tos'
    sst_fixed = os.path.join(DATADIR, model + '_monthly_1950_2100_tos.nc')
    fnames_sst = os.path.join(dir_in, model + '_monthly_1950_2100_tos.nc')
    fnames_ice = os.path.join(dir_in, model + '_monthly_1950_2100_sic.nc')

    print 'choose ', fnames_sst

    cube_mon = iris.cube.CubeList()

    mean_freq_sst = np.ma.zeros((BIN['SST']['NBINS'], 2, 12))

    print 'process ', fnames_sst, fnames_ice
    sst1 = iris.load_cube(fnames_sst)
    sst1.convert_units('degC')
    ice1 = iris.load_cube(fnames_ice)
    sst_coords = [co.name() for co in sst1.aux_coords]
    ice_coords = [co.name() for co in ice1.aux_coords]
    if not 'year' in sst_coords:
        icc.add_year(sst1, 'time', name='year')
    if not 'year' in ice_coords:
        icc.add_year(ice1, 'time', name='year')

    years_con = iris.Constraint(
        coord_values={'year': lambda l: years[0] <= l <= years[-1]})
    sst = sst1.extract(years_con)
    ice = ice1.extract(years_con)

    if not 'month' in sst_coords:
        icc.add_month_number(sst, 'time', name='month')
    if not 'month' in ice_coords:
        icc.add_month_number(ice, 'time', name='month')

    for im in range(0, 12):
        print 'processing month ', im
        month = im + 1
        month_no = iris.Constraint(
            coord_values={'month': lambda l: l == month})
        sst_mon = sst.extract(month_no)
        ice_mon = ice.extract(month_no)

        mean_freq_sst_month_year = sic_functions.sst_ice_relationship(
            sst_mon, ice_mon, do_freq_sst=True)
        mean_freq_sst[:, :, im] = mean_freq_sst_month_year

        mean_freq_sst[0, :, im] = 100.0

        ir = 0
        for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
            print 'nh, im, ir, bin, siconc ', im, ir, bin, mean_freq_sst[ib,
                                                                         ir,
                                                                         im]

    do_mono = True

    if do_mono:
        # want to make the relationship monotonic
        for im in range(0, 12):
            reset_nh = False
            reset_sh = False
            for ir in range(0, 2):
                for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
                    if ib > 0 and ib < BIN['SST']['NBINS'] - 2:
                        current_val = mean_freq_sst[ib, ir, im]
                        max_rest = np.amax(mean_freq_sst[ib + 1:, ir, im])
                        if max_rest > current_val:
                            mean_freq_sst[ib, ir, im] = max_rest
                # once the siconc for a given sst hits zero, make sure all the rest are zero
                if mean_freq_sst[ib, 0, im] == 0:
                    reset_nh = True
                if reset_nh:
                    mean_freq_sst[ib:, 0, im] = 0.0
                if mean_freq_sst[ib, 1, im] == 0:
                    reset_sh = True
                if reset_sh:
                    mean_freq_sst[ib:, 1, im] = 0.0

    # try making the NH concentration less than 15% equal to zero (to remove wide edge in summer)


#    min_ice_conc = 20.0
#    for im in range(0,12):
#        for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
#            if ib > 0 and ib < BIN['SST']['NBINS']-2:
#                for ir in range(0,1):
#                    if (mean_freq_sst[ib, ir, im] < min_ice_conc):
#                            mean_freq_sst[ib+4:, ir, im] = 0.0

    im = 4
    ir = 0
    for ib, bin in enumerate(BIN['SST']['XBINS'][:-1]):
        print 'after mono, nh, im, bin, siconc ', im, bin, mean_freq_sst[ib,
                                                                         ir,
                                                                         im]

    fh = open(filename_sst_func_siconc_month_pickle, 'wb')
    pickle.dump(mean_freq_sst, fh)
    fh.close()
Ejemplo n.º 30
0
        if unit.name[:4] == 'hour':
            thirty_mins = 0.5
        elif unit.name[:6] == 'second':
            thirty_mins = 60 * 30
        elif unit.name[:3] == 'day':
            thirty_mins = 0.5 / 24
        else:
            raise ValueError(f"Don't know how to deal with: '{unit.name}'")

        print('Subtracting half an hour from time coord first')
        new_points = cube.coord('time').points - thirty_mins
        cube.coord('time').points = new_points

    iccat.add_year(cube, 'time')
    if freq == 'mon':
        iccat.add_month_number(cube, 'time')
        agg_by = ['month_number', 'year']
        remove_later = 'month_number'
    elif freq == 'day':
        iccat.add_day_of_year(cube, 'time')
        agg_by = ['day_of_year', 'year']
        remove_later = 'day_of_year'
    else:
        raise ValueError('Unrecognised frequency')

    # compute averages
    print(f'Computing {freq} average')
    means = cube.aggregated_by(agg_by, iris.analysis.MEAN)

    # remove no longer needed aux coords
    means.remove_coord(remove_later)
def ice_maximum_extent(years):
    '''
    Generate a 0/1 mask for any point which has ice concentration > 0 over time period
    Use this to mask future ice (make sure that you don't have ice where it never was before)
    '''
    dir_in = '/group_workspaces/jasmin2/primavera1/WP6/forcing/HadISST2_submit/v1.2/'
    year = 2015
    ice_hist_file = os.path.join(
        dir_in,
        'siconc_input4MIPs_SSTsAndSeaIce_HighResMIP_MOHC-HadISST-2-2-0-0-0_gn_'
        + str(year) + '0101-' + str(year) + '1231.nc')
    ice_ref = iris.load_cube(ice_hist_file)[0]

    ice_extent_filename = dir_in + 'ice_max_month_' + str(
        years[0]) + '-' + str(years[-1]) + '.nc'
    ice_extent_min_filename = dir_in + 'ice_min_month_' + str(
        years[0]) + '-' + str(years[-1]) + '.nc'

    if os.path.exists(ice_extent_filename) and os.path.exists(
            ice_extent_min_filename):
        ice_max_month = iris.load_cube(ice_extent_filename)
        ice_min_month = iris.load_cube(ice_extent_min_filename)
        return ice_max_month, ice_min_month, ice_ref

    ice_hist_l = iris.cube.CubeList()
    for year in years:
        ice_hist_file = os.path.join(
            dir_in,
            'siconc_input4MIPs_SSTsAndSeaIce_HighResMIP_MOHC-HadISST-2-2-0-0-0_gn_'
            + str(year) + '0101-' + str(year) + '1231.nc')
        ice = iris.load_cube(ice_hist_file, callback=history_period_callback)
        try:
            icc.add_month_number(ice, 'time', name='month')
        except:
            pass
        ice_hist_l.append(ice)
    ice_hist = ice_hist_l.concatenate_cube()

    cube_mask = ice_hist[:12].copy()
    cube_mask_min = ice_hist[:12].copy()
    for im in range(0, 12):
        mon = im + 1
        print 'process month ', mon
        month_constraint = iris.Constraint(month=mon)

        # calculate maximum ice conc over time period
        ice = ice_hist.extract(month_constraint)
        ice_max = ice.collapsed('time', iris.analysis.MAX)
        ice_max.remove_coord('month')
        ice_max.coord('time').bounds = None

        # calculate minimum ice conc over time period
        ice_min = ice.collapsed('time', iris.analysis.MIN)
        ice_min.remove_coord('month')
        ice_min.coord('time').bounds = None

        # calculate mask of 1 if any point ever has some ice
        cube_mask.data[im, :, :] = 0.0
        some_ice = np.where((ice_max.data > 1.0) & (ice_max.data <= 100.0))
        cube_mask.data[im, some_ice[0], some_ice[1]] = 1.0
        print 'cube_mask ', cube_mask[im].coord('time')

        # calculate mask of 1 if any point always has some ice
        cube_mask_min.data[im, :, :] = 0.0
        some_ice = np.where((ice_min.data > 80.0) & (ice_min.data <= 100.0))
        cube_mask_min.data[im, some_ice[0], some_ice[1]] = 1.0
        print 'cube_mask ', cube_mask_min[im].coord('time')

    iris.save(cube_mask, ice_extent_filename)
    iris.save(cube_mask_min, ice_extent_min_filename)

    return cube_mask, cube_mask_min, ice_ref
def smooth_sst_under_ice(sst_daily, ice_daily, ice_mask, fsst_yrm1, fsst_yrp1):
    '''
    Under ice, the SST can be very discontinuous, this coming from the original data
    Use monthly means and interpolate to create an alternative daily dataset of tos under ice (at some point in the year)

    '''
    sst_monmn = sst_daily.aggregated_by('month', iris.analysis.MEAN)
    cube_var_l = iris.cube.CubeList()

    for m in range(0, 12):
        if m > 0 and m < 11:
            mm1 = m - 1
            mp1 = m + 1
        elif m == 0:
            con_mon = iris.Constraint(
                coord_values={'month': lambda l: l == 12})
            sstm1_yr = iris.load_cube(fsst_yrm1)
            try:
                icc.add_month_number(sstm1_yr, 'time', name='month')
            except:
                pass
            sstm1_mon = sstm1_yr.extract(con_mon)
            sstm1_monmn = sstm1_mon.collapsed('time', iris.analysis.MEAN)
            mm1 = 11
            mp1 = m + 1
        elif m == 11:
            mm1 = m - 1
            con_mon = iris.Constraint(coord_values={'month': lambda l: l == 1})
            sstp1_yr = iris.load_cube(fsst_yrp1)
            try:
                icc.add_month_number(sstp1_yr, 'time', name='month')
            except:
                pass
            sstp1_mon = sstp1_yr.extract(con_mon)
            sstp1_monmn = sstp1_mon.collapsed('time', iris.analysis.MEAN)
            mp1 = 0

        con_mon = iris.Constraint(coord_values={'month': lambda l: l == m + 1})
        cube_mon = sst_daily.extract(con_mon)  # daily data
        #cube_ice_mon = cube_ice.extract(con_yr & con_mon) # daily data
        cube_var_mon = cube_mon.copy()

        ndays = cube_mon.shape[0]

        # check units
        #print 'units ',monthly_mean.units, cube_mon.units
        #if cube_mon.units != monthly_mean.units:
        #    monthly_mean.units = cube_mon.units

        for iday in range(0, ndays):
            weight = float(iday) / float(ndays - 1)
            w1 = (1.0 - weight) * 0.5
            w2 = 0.5
            w3 = weight * 0.5
            ice_points = np.where(ice_mask.data == 1.0)

            if m > 0 and m < 11:
                mon_day = w1 * sst_monmn.data[mm1, :, :] + w2 * sst_monmn.data[
                    m, :, :] + w3 * sst_monmn.data[mp1, :, :]
            elif m == 0:
                mon_day = w1 * sstm1_monmn.data[:, :] + w2 * sst_monmn.data[
                    m, :, :] + w3 * sst_monmn.data[mp1, :, :]
            elif m == 11:
                mon_day = w1 * sst_monmn.data[mm1, :, :] + w2 * sst_monmn.data[
                    m, :, :] + w3 * sstp1_monmn.data[:, :]

            cube_var_mon.data[iday, ice_points[0],
                              ice_points[1]] = mon_day[ice_points[0],
                                                       ice_points[1]]

        cube_var_l.append(cube_var_mon)
    cube_var = cube_var_l.concatenate_cube()

    return cube_var
Ejemplo n.º 33
0
def downscale(obs_fine,
              sim_coarse,
              realize_cubes=False,
              anonymous_dimension_name=None,
              n_processes=1,
              n_iterations=20,
              randomization_seed=None,
              **kwargs):
    """
    Applies the modified MBCn algorithm for statistical downscaling calendar
    month by calendar month and coarse grid cell by coarse grid cell.

    Parameters
    ----------
    obs_fine : iris cube
        Cube of observed climate data at fine spatial resolution.
    sim_coarse : iris cube
        Cube of simulated climate data coarse spatial resolution.
    realize_cubes : boolean, optional
        Realize data of obs_fine and sim_coarse before beginning the statistical
        downscaling coarse grid cell by coarse grid cell.
    anonymous_dimension_name : str, optional
        Used to name the first anonymous dimension of obs_fine and sim_coarse.
    n_processes : int, optional
        Number of processes used for parallel processing.
    n_iterations : int, optional
        Number of iterations used in the modified MBCn algorithm.
    randomization_seed : int, optional
        Used to seed the random number generator before generating random 
        rotation matrices for the modified MBCn algorithm.

    Returns
    -------
    sim_fine : iris cube
        Result of application of the modified MBCn algorithm.

    Other Parameters
    ----------------
    **kwargs : Passed on to downscale_one_location.

    """
    # put iris cubes into dictionary
    cubes = {
        'obs_fine': obs_fine,
        'sim_coarse': sim_coarse,
    }

    space_shapes = {}
    for key, cube in cubes.items():
        # get cube shape beyond time axis
        space_shapes[key] = cube.shape[1:]
        # load iris cube data into memory
        if realize_cubes: d = cube.data
        # make sure the proleptic gregorian calendar is used in all input files
        uf.assert_calendar(cube, 'proleptic_gregorian')
        # make sure that time is the leading coordinate
        uf.assert_coord_axis(cube, 'time', 0)
        # name the first anonymous dimension
        uf.name_first_anonymous_dimension(cube, anonymous_dimension_name)
        # prepare statistical downscaling calendar month by calendar month
        icc.add_month_number(cube, 'time')

    # derive downscaling factor from cube shapes beyond time axis
    downscaling_factor = uf.get_downscaling_factor(space_shapes['obs_fine'],
                                                   space_shapes['sim_coarse'])

    # get list of rotation matrices to be used for all locations and months
    n_fine_per_coarse = downscaling_factor**len(space_shapes['obs_fine'])
    if randomization_seed is not None: np.random.seed(randomization_seed)
    rotation_matrices = [
        uf.generateCREmatrix(n_fine_per_coarse) for i in range(n_iterations)
    ]

    # bilinearly interpolate sim_coarse to grid of obs_fine
    print('interpolating to fine grid ...')
    sim_coarse_remapbil = sim_coarse.regrid(obs_fine, iris.analysis.Linear())
    icc.add_year(sim_coarse_remapbil, 'time')
    years = list(np.unique(sim_coarse_remapbil.coord('year').points))

    # downscale every location individually using multiprocessing
    print('downscaling at coarse location ...')
    sdol = partial(downscale_one_location,
                   years=years,
                   rotation_matrices=rotation_matrices,
                   randomization_seed=randomization_seed,
                   **kwargs)
    pool = mp.Pool(n_processes, maxtasksperchild=1000)
    time_series_downscaled = pool.imap(
        sdol,
        zip(
            uf.StepSliceIterator(obs_fine, [0], downscaling_factor),
            sim_coarse.slices('time'),
            uf.StepSliceIterator(sim_coarse_remapbil, [0], downscaling_factor),
        ))
    pool.close()

    # replace time series in sim_coarse_remapbil by the downscaled time series
    sim_fine = sim_coarse_remapbil
    d = sim_fine.data
    i_locations_coarse = np.ndindex(space_shapes['sim_coarse'])
    for ilc, tsd in zip(i_locations_coarse, time_series_downscaled):
        ilf = tuple([
            slice(downscaling_factor * i, downscaling_factor * (i + 1))
            for i in ilc
        ])
        d[(slice(None, None), ) + ilf] = tsd
        print(ilc)

    # remove auxiliary coordinates
    sim_fine.remove_coord('year')
    sim_fine.remove_coord('month_number')

    return sim_fine