Exemple #1
0
	def add_times(cube, time):
		coord_cat.add_month(cube, time, name='month')
		coord_cat.add_season(cube, time, name='clim_season')
		coord_cat.add_year(cube, time, name='year')
		coord_cat.add_day_of_year(cube, time, name='day_number')
		coord_cat.add_season_year(cube, time, name='season_year')
		return cube
Exemple #2
0
def plotrun(cube, foldername, scaleLBound, scaleUBound):

    # Delete all the image files in the directory to ensure that only those
    # created in the loop end up in the movie.
    print("Deleting all .png files in the " + foldername + " directory...")
    SpawnCommand("rm " + foldername + "/*.png")

    # Add a new coordinate containing the year.
    icat.add_year(cube, 'time')

    # Set the end index for the loop over years, and do the loop.
    tmin = 0
    tmax = cube.shape[0]

    # We want the files to be numbered sequentially, starting
    # from 000.png; this is so that the ffmpeg command can grok them.
    index = 0
    #scaleBarArray = scaleBar(lowerBound, upperBound)
    for time in range(tmin, tmax):

        fig = plt.figure(figsize=(6, 3), dpi=200)
        rect = 0, 0, 1200, 600
        fig.add_axes(rect)
        geo_axes = plt.axes(projection=ccrs.PlateCarree())
        geo_axes.outline_patch.set_visible(False)
        plt.margins(0, 0)
        fig.subplots_adjust(left=0, right=1, bottom=0, top=1)

        iplt.contourf(cube[time],
                      10,
                      vmin=scaleLBound,
                      vmax=scaleUBound,
                      cmap='YlOrRd')
        plt.gca().coastlines(color='b')
        # plt.figure(frameon=False)

        # Extract the year value and display it (coordinates used are
        # those of the data).
        year = cube.coord('year')[time].points[0]

        plt.text(-160,
                 0,
                 year,
                 horizontalalignment='center',
                 size='large',
                 fontdict={'family': 'monospace'},
                 color='b')
        filename = str(foldername) + '/' + "%03d.png" % index
        print('Now plotting: ', filename)
        plt.savefig(filename, dpi=200)
        plt.close()
        index += 1

    # Now make the movie from the image files by spawning the ffmpeg command.
    # The options (of which there are many) are somewhat arcane, but these ones work.
    print("Converting images to movie...")
    options = ("-r 5 -vcodec png -y -i " + foldername +
               "/%03d.png -r 5 -vcodec msmpeg4v2 -qblur 0.01 -qscale 5 ")
    SpawnCommand("ffmpeg " + options + foldername + ".mp4")
Exemple #3
0
def main():

    # Delete all the image files in the current directory to ensure that only those
    # created in the loop end up in the movie.
    print "Deleting all .png files in this directory..."
    SpawnCommand("rm *.png")

    # Read all the temperature values.
    temperatures = iris.load_cube('temperatures.pp')
    
    # Get the range of values.
    minTemp = np.amin(temperatures.data)
    maxTemp = np.amax(temperatures.data)
    print "Range of temperatures is", minTemp, "to", maxTemp

    # Add a new coordinate containing the year.
    icat.add_year(temperatures, 'time')
    years = temperatures.coord('year')
    
    # Set the limits for the loop over years.  
    minTime = 0
    maxTime = temperatures.shape[0]

    print "Making images from year", years[minTime].points[0], "to", years[maxTime-1].points[0]

    for time in range(minTime, maxTime):

       # Contour plot the temperatures and add the coastline.
       iplt.contourf(temperatures[time], 10, vmin=minTemp, vmax=maxTemp, cmap='hot')
       plt.gca().coastlines()
       
       # We need to fix the boundary of the figure (otherwise we get a black border at left & top).
       # Cartopy removes matplotlib's axes.patch (which normally defines the boundary) and
       # replaces it with outline_patch and background_patch.  It's the former which is causing
       # the black border.  Get the axis object and make its outline patch invisible.
       ax = plt.gca()
       ax.outline_patch.set_visible(False)

       # Extract the year value and display it (coordinates used in locating the text are
       # those of the data).
       year = years[time].points[0]
       plt.text(0, -60, year, horizontalalignment='center') 
       
       # Now save the plot in an image file.  The files are numbered sequentially, starting
       # from 000.png; this is so that the ffmpeg command can grok them.
       filename = "%03d.png" % time
       plt.savefig(filename, bbox_inches='tight', pad_inches=0)
       
       # Discard the figure (otherwise the text will be overwritten
       # by the next iteration).
       plt.close()

    # Now make the movie from the image files by spawning the ffmpeg command.
    # The options (of which there are many) are somewhat arcane, but these ones work.
    print "Converting images to movie..."
    options = "-r 5 -vcodec png -y -i %03d.png -r 5 -vcodec msmpeg4v2 -qblur 0.01 -qscale 5"
    SpawnCommand("ffmpeg " + options + " plotTemperatures.avi")
def reform_data_iris_deangelis3b4(input_data):
    """Extract data from IRIS cubes and average or reformat them."""
    # Model data for 'tas', 'rsnstcs'
    cubes = {}
    for my_short_name in ['tas', 'rsnstcs']:
        # my_data: List of dictionaries
        my_data = select_metadata(input_data, short_name=my_short_name)
        # subdata: dictionary
        for subdata in my_data:
            cube = iris.load(subdata['filename'])[0]
            cat.add_year(cube, 'time', name='year')
            cube = cube.aggregated_by('year', iris.analysis.MEAN)
            experiment = subdata['exp']
            if experiment == 'abrupt-4xCO2':
                experiment = 'abrupt4xCO2'
            dataset = subdata['dataset']
            cubetuple = (dataset, my_short_name, experiment)
            if experiment == 'piControl':
                # DeAngelis use a 21 month running mean on piControl but the
                # full extend of 150 years abrupt4xCO2. I could not find out,
                # how they tread the edges, currently I just skip the mean for
                # the edges. This is not exacly the same as done in the paper,
                # small differences remain in extended data Fig 1,
                # but closer than other methods I
                # tried, e.g. skipping the edges.
                # For most data sets it would be also possible to
                # extend the piControl for 20 years, but then it would
                # not be centered means of piControl for each year of
                # abrupt4xCO2 any more.
                # cube_new = cube.rolling_window('time',iris.analysis.MEAN, 21)
                # endm10 = len(cube.coord('time').points) - 10
                # cube.data[10:endm10] = cube_new.data
                cube.data = scisi.savgol_filter(cube.data, 21, 1, axis=0)
            cubes[cubetuple] = cube.data

    # Model data and observations for 'rsnstcsnorm', and 'prw'
    for my_short_name in ['rsnstcsnorm', 'prw']:
        # my_data: List of dictionaries
        my_data = select_metadata(input_data, short_name=my_short_name)
        # subdata: dictionary
        for subdata in my_data:
            if 'exp' in subdata.keys():
                experiment = subdata['exp']
            else:
                experiment = 'nomodel'
            dataset = subdata['dataset']
            cubetuple = (dataset, my_short_name, experiment)
            if experiment in ['piControl', 'nomodel']:
                cube = iris.load(subdata['filename'])[0]
                total_len = len(cube.coord('time').points) * \
                    len(cube.coord('latitude').points) * \
                    len(cube.coord('longitude').points)
                data_new = np.reshape(cube.data, total_len)
                cubes[cubetuple] = data_new

    return cubes
Exemple #5
0
 def _yrs(cube):
     try:
         yrs = cube.coord('seasonyr').points
     except:
         try:
             yrs = cube.coord('year').points
         except:
             ica.add_year(cube, 'time', name='year')
             yrs = cube.coord('year').points
     return yrs
Exemple #6
0
def annual_mean(cube):
    """Calculate annual mean of a cube.

    Args:
        cube (iris.cube.Cube)

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    return cube.aggregated_by('year', iris.analysis.MEAN)
Exemple #7
0
def annual_mean(cube):
    """Calculate annual mean of a cube.

    Args:
        cube (iris.cube.Cube)

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    return cube.aggregated_by('year', iris.analysis.MEAN)
def add_extra_time_coords(cube):
    """
    Adds new coordinate for indexing a given simulation based on model and
    ensemble and adds additional time coordinates for unit manipulation
    """
    if not cube.coords('year'):
        icc.add_year(cube, 'time')
    if not cube.coords('month'):
        icc.add_month(cube, 'time')
    if not cube.coords('month_number'):
        icc.add_month_number(cube, 'time')
    if not cube.coords('day_of_month'):
        icc.add_day_of_month(cube, 'time')
    if not cube.coords('hour'):
        icc.add_hour(cube, 'time')
    return cube
def annual_mean(mycube):
    """
    Function to compute annual mean with MEAN.

    Chunks time in 365-day periods and computes means over them;
    Returns a cube.
    """
    coord_cat.add_year(mycube, 'time')
    yr_mean = mycube.aggregated_by('year', iris.analysis.MEAN)

    def spans_year(time):
        """Check for 12 months."""
        return (time.bound[1] - time.bound[0]) == 365

    t_bound = iris.Constraint(time=spans_year)
    return yr_mean.extract(t_bound)
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, 'my_year')
        ccat.add_day_of_month(cube, time_coord, 'my_day_of_month')
        ccat.add_day_of_year(cube, time_coord, 'my_day_of_year')

        ccat.add_month(cube, time_coord, 'my_month')
        with warnings.catch_warnings(record=True):
            ccat.add_month_shortname(cube, time_coord, 'my_month_shortname')
        ccat.add_month_fullname(cube, time_coord, 'my_month_fullname')
        ccat.add_month_number(cube, time_coord, 'my_month_number')

        ccat.add_weekday(cube, time_coord, 'my_weekday')
        ccat.add_weekday_number(cube, time_coord, 'my_weekday_number')
        with warnings.catch_warnings(record=True):
            ccat.add_weekday_shortname(cube, time_coord,
                                       'my_weekday_shortname')
        ccat.add_weekday_fullname(cube, time_coord, 'my_weekday_fullname')

        ccat.add_season(cube, time_coord, 'my_season')
        ccat.add_season_number(cube, time_coord, 'my_season_number')
        with warnings.catch_warnings(record=True):
            ccat.add_season_month_initials(cube, time_coord,
                                           'my_season_month_initials')
        ccat.add_season_year(cube, time_coord, 'my_season_year')

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube,
                                   'my_month_in_quarter',
                                   time_coord,
                                   _month_in_quarter)

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == 'i':
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
def hovmoller(target_dir):
    fname = os.path.join(DATA_ZOO, 'PP', 'ostia', 'ostia_sst_200604_201009_N216.pp')
    cube = iris.load_cube(fname, iris.Constraint('surface_temperature', latitude=lambda v: -5 < v < 5))
    
    iris_cat.add_month_number(cube, cube.coord('time'), 'month')
    iris_cat.add_year(cube, cube.coord('time'), 'year')
    
    monthly_mean = cube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    monthly_mean.remove_coord('month')
    monthly_mean.remove_coord('year')
    
    # make time the dimension coordinate (wont be needed once Bill has #22)
    t = monthly_mean.coord('time')
    monthly_mean.remove_coord(t)
    monthly_mean.add_dim_coord(t, 0)
    
    iris.save(monthly_mean, os.path.join(target_dir, 'ostia_monthly.nc'))
def reinit_broken_time(cube_anom, cube_clim, climstart, climend):
    """ the time coordinates are a big mess (given as floats in years A.D.)
    best to reinitialize them from scratch
    """
    logger.info("Reinitializing broken time coordinate")
    time_raw = cube_anom.coord('time')

    n_years, n_add_mon = len(time_raw.points) // 12, len(time_raw.points) % 12
    start_year = int(time_raw.points[0])
    n_days = (n_years + n_add_mon / 12) * 365.25 + 50  # have some extra length
    climcenter = (climend - climstart) // 2

    times = iris.coords.DimCoord(
        np.arange(int(n_days), dtype=float),
        var_name='time',
        standard_name='time',
        long_name='time',
        units=cf_units.Unit('days since {}-01-01 00:00:00'.format(start_year),
                            calendar=cf_units.CALENDAR_STANDARD))

    # init a dummy cube to enable coord_categorisation
    dummycube = iris.cube.Cube(np.zeros(int(n_days), np.int),
                               dim_coords_and_dims=[(times, 0)])
    coord_categorisation.add_year(dummycube, 'time', name='year')
    coord_categorisation.add_month_number(dummycube, 'time', name='month')

    # build timecoord for the anomaly cube
    dummycube = dummycube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    dummycube = dummycube[:(n_years * 12 + n_add_mon)]
    timecoord_anom = dummycube.coord('time')

    # build timecoord for the climatology cube
    dummycube_clim = dummycube.extract(
        iris.Constraint(year=lambda cell: cell == climstart + climcenter))
    timecoord_clim = dummycube_clim.coord('time')

    # change to the new time coordinates
    cube_anom.remove_coord('time')
    cube_anom.add_dim_coord(timecoord_anom, 0)
    cube_clim.add_dim_coord(timecoord_clim, 0)

    # convert time units to standard
    utils.convert_timeunits(cube_anom, 1950)
    utils.convert_timeunits(cube_clim, 1950)

    return (cube_anom, cube_clim)
Exemple #13
0
def climatology(cube, kind='month'):
    """Calculate a climatology for a cube.  Can do monthly or yearly.

    Args:
        cube (iris.cube.Cube)
        kind (Optional[str]): 'month' or 'year'

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    if 'month' not in aux_coords:
        cat.add_month(cube, 'time')
        cat.add_month_number(cube, 'time')
    out = cube.aggregated_by(kind, iris.analysis.MEAN)

    # If the data don't start in January the time coordinate will no longer
    # be monotonic. Fix this.
    if (kind == 'month') and (not out.coord('time').is_monotonic()):

        # Reorder the data so January is first.
        jan_index = np.where(out.coord('month').points == 'Jan')[0][0]
        ntim = 12
        sort_indices = range(jan_index, ntim) + range(0, jan_index)
        out = out[sort_indices]

        # Create a new time coordinate which is monotonic.
        startyear = int(out.coord('time').units.num2date(0).year)
        newtime_points = [
            netcdftime.datetime(startyear + (m / 12), (m % 12) + 1, 1)
            for m in out.coord('month_number').points.astype(int) - 1
        ]
        time_units = out.coord('time').units
        newtime_points = time_units.date2num(newtime_points)
        newtime = iris.coords.DimCoord(newtime_points,
                                       units=time_units,
                                       standard_name='time')
        data_dim = out.coord_dims('time')[0]
        out.remove_coord('time')
        out.add_dim_coord(newtime, data_dim)

    return out
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, 'my_year')
        ccat.add_day_of_month(cube, time_coord, 'my_day_of_month')
        ccat.add_day_of_year(cube, time_coord, 'my_day_of_year')

        ccat.add_month(cube, time_coord, 'my_month')
        with warnings.catch_warnings(record=True):
            ccat.add_month_shortname(cube, time_coord, 'my_month_shortname')
        ccat.add_month_fullname(cube, time_coord, 'my_month_fullname')
        ccat.add_month_number(cube, time_coord, 'my_month_number')

        ccat.add_weekday(cube, time_coord, 'my_weekday')
        ccat.add_weekday_number(cube, time_coord, 'my_weekday_number')
        with warnings.catch_warnings(record=True):
            ccat.add_weekday_shortname(cube, time_coord,
                                       'my_weekday_shortname')
        ccat.add_weekday_fullname(cube, time_coord, 'my_weekday_fullname')

        ccat.add_season(cube, time_coord, 'my_season')
        ccat.add_season_number(cube, time_coord, 'my_season_number')
        with warnings.catch_warnings(record=True):
            ccat.add_season_month_initials(cube, time_coord,
                                           'my_season_month_initials')
        ccat.add_season_year(cube, time_coord, 'my_season_year')

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube, 'my_month_in_quarter', time_coord,
                                   _month_in_quarter)

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == 'i':
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
def add_time_names(cube, year=False, month_number=False, day_of_year=False):
    '''
    Add additional coordiate names to time dimension
    '''
    if year:
        icc.add_year(cube, 'time', name='year')

    if month_number:
        try:
            icc.add_month_number(cube, 'time', name='month')
        except:
            pass

    if day_of_year:
        try:
            icc.add_day_of_year(cube, 'time', name='day_of_year')
        except:
            pass
    def test_basic(self):
        #make a series of 'day numbers' for the time, that slide across month boundaries
        day_numbers =  np.arange(0, 600, 27, dtype=np.int32)
        
        cube = iris.cube.Cube(day_numbers, long_name='test cube', units='metres')

        #use day numbers as data values also (don't actually use this for anything)
        cube.data = day_numbers 
        
        time_coord = iris.coords.DimCoord(
            day_numbers, standard_name='time', units=iris.unit.Unit('days since epoch', 'gregorian'))
        cube.add_dim_coord(time_coord, 0)

        #add test coordinates for examples wanted    
        ccat.add_year(cube, time_coord)
        ccat.add_day_of_month(cube, 'time')    #NB test passing coord-name instead of coord itself

        ccat.add_month(cube, time_coord)
        ccat.add_month_shortname(cube, time_coord, name='month_short')
        ccat.add_month_fullname(cube, time_coord, name='month_full')
        ccat.add_month_number(cube, time_coord, name='month_number')
        
        ccat.add_weekday(cube, time_coord)
        ccat.add_weekday_number(cube, time_coord, name='weekday_number')
        ccat.add_weekday_shortname(cube, time_coord, name='weekday_short')
        ccat.add_weekday_fullname(cube, time_coord, name='weekday_full')

        ccat.add_season(cube, time_coord)
        ccat.add_season_number(cube, time_coord, name='season_number')
        ccat.add_season_month_initials(cube, time_coord, name='season_months')
        ccat.add_season_year(cube, time_coord, name='year_ofseason')
        
        #also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(cube, 'month_in_quarter', time_coord, _month_in_quarter)

        for coord_name in ['month_number', 'month_in_quarter', 'weekday_number', 'season_number', 'year_ofseason', 'year', 'day']:
            cube.coord(coord_name).points = cube.coord(coord_name).points.astype(np.int64)

        #check values
        self.assertCML(cube, ('categorisation', 'quickcheck.cml'))
Exemple #17
0
def climatology(cube, kind='month'):
    """Calculate a climatology for a cube.  Can do monthly or yearly.

    Args:
        cube (iris.cube.Cube)
        kind (Optional[str]): 'month' or 'year'

    Returns:
        iris.cube.Cube

    """
    aux_coords = [aux_coord.name() for aux_coord in cube.aux_coords]
    if 'year' not in aux_coords:
        cat.add_year(cube, 'time')
    if 'month' not in aux_coords:
        cat.add_month(cube, 'time')
        cat.add_month_number(cube, 'time')
    out = cube.aggregated_by(kind, iris.analysis.MEAN)

    # If the data don't start in January the time coordinate will no longer
    # be monotonic. Fix this.
    if (kind == 'month') and (not out.coord('time').is_monotonic()):

        # Reorder the data so January is first.
        jan_index = np.where(out.coord('month').points == 'Jan')[0][0]
        ntim = 12
        sort_indices = range(jan_index, ntim) + range(0, jan_index)
        out = out[sort_indices]

        # Create a new time coordinate which is monotonic.
        startyear = int(out.coord('time').units.num2date(0).year)
        newtime_points = [netcdftime.datetime(startyear + (m / 12), (m % 12) + 1, 1)
                          for m in out.coord('month_number').points.astype(int)-1]
        time_units = out.coord('time').units
        newtime_points = time_units.date2num(newtime_points)
        newtime = iris.coords.DimCoord(newtime_points, units=time_units,
                                       standard_name='time')
        data_dim = out.coord_dims('time')[0]
        out.remove_coord('time')
        out.add_dim_coord(newtime, data_dim)

    return out
def hovmoller(target_dir):
    fname = os.path.join(DATA_ZOO, 'PP', 'ostia',
                         'ostia_sst_200604_201009_N216.pp')
    cube = iris.load_cube(
        fname,
        iris.Constraint('surface_temperature', latitude=lambda v: -5 < v < 5))

    iris_cat.add_month_number(cube, cube.coord('time'), 'month')
    iris_cat.add_year(cube, cube.coord('time'), 'year')

    monthly_mean = cube.aggregated_by(['year', 'month'], iris.analysis.MEAN)
    monthly_mean.remove_coord('month')
    monthly_mean.remove_coord('year')

    # make time the dimension coordinate (wont be needed once Bill has #22)
    t = monthly_mean.coord('time')
    monthly_mean.remove_coord(t)
    monthly_mean.add_dim_coord(t, 0)

    iris.save(monthly_mean, os.path.join(target_dir, 'ostia_monthly.nc'))
def add_future_time_info(start_cube):
    '''
    Fix calendar so that future_cube has dates starting from start_cube+1
    '''
    future_cube = start_cube.copy()

    for coord in ['year', 'month']:
        try:
            future_cube.remove_coord(coord)
        except:
            pass

    print 'start future year ', future_cube.coord('time').points[0:50]
    print 'start future year ', future_cube.coord('time').points[-50:]
    # transform time dimension to make this go from Jan onwards
    start_period2 = start_cube.coord('year').points[-1] + 1
    start_period1 = start_cube.coord('year').points[0]
    print 'start period 1,2 ', start_period1, start_period2

    # find difference between date wanted and date have
    time_delta = datetime.date(start_period2, 1, 1) - datetime.date(
        start_period1, 1, 1)
    print 'time delta ', time_delta
    time_units = start_cube.coord('time').units
    print 'time_units ', time_units
    if 'hours' in str(time_units):
        time_delta_unit = 24.
    else:
        time_delta_unit = 1.

    # add this delta on to time coordinate
    future_cube.coord('time').points = future_cube.coord(
        'time').points + time_delta.days * time_delta_unit

    icc.add_year(future_cube, 'time', name='year')
    icc.add_month_number(future_cube, 'time', name='month')

    print 'end future year ', future_cube.coord('time').points[0:50]
    print 'end future year ', future_cube.coord('year').points[0:50]

    return future_cube
Exemple #20
0
def monthly_running_mean(infile):
    '''
    Try removing mean seasonal cycle from all data
    Then perhaps filtering the rest (or else extracting the trend for each month
    '''
    file_runningmean = infile[:-3] + '_running_monthlymean.nc'
    if os.path.exists(file_runningmean):
        return file_runningmean
    c = iris.load_cube(infile)
    icc.add_month_number(c, 'time', name='month')
    icc.add_year(c, 'time', name='year')

    #cube_anomaly = sst_future.remove_monthly_mean_time_avg(c)
    cube_mean = sst_future.monthly_mean_running_time_avg(c)

    #iris.save(cube_anomaly, infile[:-3]+'_remove_monthlymean.nc', unlimited_dimensions = ['time'], fill_value = 1.0e20)
    iris.save(cube_mean,
              file_runningmean,
              unlimited_dimensions=['time'],
              fill_value=1.0e20)

    return file_runningmean
Exemple #21
0
    def test_basic(self):
        cube = self.cube
        time_coord = self.time_coord

        ccat.add_year(cube, time_coord, "my_year")
        ccat.add_day_of_month(cube, time_coord, "my_day_of_month")
        ccat.add_day_of_year(cube, time_coord, "my_day_of_year")

        ccat.add_month(cube, time_coord, "my_month")
        ccat.add_month_fullname(cube, time_coord, "my_month_fullname")
        ccat.add_month_number(cube, time_coord, "my_month_number")

        ccat.add_weekday(cube, time_coord, "my_weekday")
        ccat.add_weekday_number(cube, time_coord, "my_weekday_number")
        ccat.add_weekday_fullname(cube, time_coord, "my_weekday_fullname")

        ccat.add_season(cube, time_coord, "my_season")
        ccat.add_season_number(cube, time_coord, "my_season_number")
        ccat.add_season_year(cube, time_coord, "my_season_year")

        # also test 'generic' categorisation interface
        def _month_in_quarter(coord, pt_value):
            date = coord.units.num2date(pt_value)
            return (date.month - 1) % 3

        ccat.add_categorised_coord(
            cube, "my_month_in_quarter", time_coord, _month_in_quarter
        )

        # To ensure consistent results between 32-bit and 64-bit
        # platforms, ensure all the numeric categorisation coordinates
        # are always stored as int64.
        for coord in cube.coords():
            if coord.long_name is not None and coord.points.dtype.kind == "i":
                coord.points = coord.points.astype(np.int64)

        # check values
        self.assertCML(cube, ("categorisation", "quickcheck.cml"))
Exemple #22
0
def remap_trends_to_common_grid(model, CMIP5_ref, do_plot=False):
    # remap each model trend field onto a 1x1 grid, so that they can be meansed together
    # make a simple lonlat grid for cdo to remap to
    global_temp_change = {}
    files_remapped = []
    #pickle_file = os.path.join(savedir, 'trends_1x1_allmodels.pkl')

    file_trend = os.path.join(savedir,
                              model + '_month_annual_1950_2120_delta.nc')
    file_masked = file_trend[:-3] + '_allyrs_masked_sst.nc'
    file_masked_121 = file_trend[:-3] + '_allyrs_masked_sst_121.nc'

    if not os.path.exists(file_masked_121):
        c = iris.load_cube(file_masked)
        c_remove121 = gridpoint_zonal_filter(c, niter=3)
        iris.save(c_remove121, file_masked_121, fill_value=1.0e20)

    #file_1x1 = file_masked[:-3]+'_1x1.nc'
    file_1x1 = file_masked_121[:-3] + '_1x1.nc'
    files_remapped.append(file_1x1)

    if not os.path.exists(file_1x1):
        #remap_with_cdo(file_masked, file_1x1)
        remap_with_cdo(file_masked_121,
                       file_1x1,
                       togrid='1',
                       rtype='bil',
                       smooth=False)

    if do_plot:
        cube_ts_list = iris.load(file_1x1)
        trend_cube = cube_ts_list.concatenate_cube()
        guess_areas(trend_cube)
        icc.add_year(trend_cube, 'time', name='year')
        cube_ts = calculate_global_temp_change(trend_cube)
        global_temp_change[model] = cube_ts

    return files_remapped
def adjust_monthly_mean_edge(cube_monthly_mean_recent,
                             fname,
                             adjust_dec=False,
                             adjust_jan=False):
    '''
    Need to adjust Jan value of monthly_mean_recent to smoothly evolve from fname December
    '''
    cube = iris.load_cube(fname)
    cube.convert_units('degC')

    if adjust_dec:
        # need to change the Dec value to be that from the previous period
        monthly_mean = aggregate_mean(cube, coord='month')
        cube_previous = monthly_mean
        icc.add_year(cube_previous, 'time', name='year')
        return cube_previous

    if adjust_jan:
        # need to change the jan value to be that from the next period
        monthly_mean = aggregate_mean(cube, coord='month')
        cube_next = monthly_mean
        icc.add_year(cube_next, 'time', name='year')
        return cube_next
def main():

    # Delete all the image files in the current directory to ensure that only those
    # created in the loop end up in the movie.
    print("\nDeleting all .png files in this directory...")
    SpawnCommand("rm -f *.png")
    print("Deleting all .mp4 files in this directory...")
    SpawnCommand("rm -f *.mp4")

    # Read all the temperature values and create a single cube containing this data
    print("Loading the data...")
    cubes = iris.cube.CubeList([])
    months = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov', 'dec'
    ]
    for i in range(1850, 2015):
        for month in months:
            tempfile = 'tas_1850-2014/bc179a.p5' + str(i) + month + '.nc'
            cubes.append(iris.load_cube(tempfile))
    temperatures = cubes.merge_cube()
    print("Data downloaded! Now Processing...")

    # Get the range of values.
    minTemp = np.amin(temperatures.data)
    maxTemp = np.amax(temperatures.data)
    print("Range of temperatures is ", minTemp, "ºK to ", maxTemp, "ºK.")

    # Add a new coordinate containing the year.
    icat.add_year(temperatures, 'time')
    years = temperatures.coord('year')

    # Set the limits for the loop over years.
    minTime = 0
    maxTime = temperatures.shape[0]

    print("Making images from year", years[minTime].points[0], "to",
          years[maxTime - 1].points[0], "...")

    for time in range(minTime, maxTime):

        # Contour plot the temperatures and add the coastline.
        iplt.contourf(temperatures[time],
                      10,
                      vmin=minTemp,
                      vmax=maxTemp,
                      cmap='RdBu_r')
        plt.gca().coastlines()

        # We need to fix the boundary of the figure (otherwise we get a black border at left & top).
        # Cartopy removes matplotlib's axes.patch (which normally defines the boundary) and
        # replaces it with outline_patch and background_patch.  It's the former which is causing
        # the black border.  Get the axis object and make its outline patch invisible.
        ax = plt.gca()
        ax.outline_patch.set_visible(False)

        # Extract the year value and display it (coordinates used in locating the text are
        # those of the data).
        year = years[time].points[0]
        plt.text(0, -60, year, horizontalalignment='center')

        # Now save the plot in an image file.  The files are numbered sequentially, starting
        # from 000.png; this is so that the ffmpeg command can grok them.
        filename = "image-%04d.png" % time
        plt.savefig(filename, bbox_inches='tight', pad_inches=0)

        # Discard the figure (otherwise the text will be overwritten
        # by the next iteration).
        plt.close()
    print("images made! Now converting to .mp4...")

    SpawnCommand("ffmpeg -i image-%04d.png TemperatureVideo1.mp4")
    SpawnCommand(
        'ffmpeg -i TemperatureVideo1.mp4 -filter:v "setpts=2.0*PTS" TemperatureVideo.mp4'
    )
    print("Deleting the unneeded images...")
    SpawnCommand("rm -f *.png")
    SpawnCommand("rm -f TemperatureVideo1.mp4")
    print("Opening video...")
    myTime.sleep(5)
    SpawnCommand("open TemperatureVideo.mp4")
Exemple #25
0
def consecutive_dry_days(cube, period='year', length=6, threshold=1.):
    """
    calculate consecutive dry days within an iris.cube.Cube

    Args:

    * cube (iris.cube.Cube):
        An iris.cube.Cube holding precipiation amount in mm/day
    * period (string):
        Period over that the CDD will be calculated. Can be 'year', 'season'
        or 'month'. If period is 'season' or 'month' the CDD will be averaged
        over the years

    Kwargs:

    * length (int):
        The number of days without rainfall that define a dry period

    * threshold (float):
        The upper limit of daily rainfall in mm that indicates 'no precipitation'

    Returns:

        An iris.cube.CubeList that holds two iris.cube.Cubes with the longest
        period of dry days in the given period and the mean of the number of
        dry periods with respect to the given length
    """
    def _cdd_index(array, axis, threshold):
        """
        Calculate the consecutive dry days index.

        This function is used as an iris.analysis.Aggregator

        Args:

        * array (numpy.array or numpy.ma.array):
            array that holds the precipitation data
        
        * axis (int):
            the number of the time-axis

        * threshold (float):
            the threshold that indicates a precipiation-less day

        Returns:
            the aggregation result, collapsing the 'axis' dimension of the 'data' argument
        """
        from pycat.analysis.utils import _get_max_true_block_length, _get_true_block_lengths

        up_down = _get_true_block_lengths(array<threshold, axis)
        return _get_max_true_block_length(up_down)

    def _cdd_periods(array, axis, threshold, length):
        """
        Calculate the number of consecutive dry days periods.

        This function is used as an iris.analysis.Aggregator

        Args:

        * array (numpy.array or numpy.ma.array):
            array that holds the precipitation data
        
        * axis (int):
            the number of the time-axis

        * threshold (float):
            the threshold that indicates a precipiation-less day

        * length (int):
            number of days that a dry period must last

        Returns:
            the aggregation result, collapsing the 'axis' dimension of the 'data' argument
        """
        from pycat.analysis.utils import _get_len_true_block_length, _get_true_block_lengths

        up_down = _get_true_block_lengths(array<threshold, axis)
        return _get_len_true_block_length(up_down, length)

    # build the iris.analysis.Aggregators
    cdd_index = Aggregator('cdd_index', _cdd_index)
    cdd_periods = Aggregator('cdd_periods', _cdd_periods)

    # check if the cube already has the needed auxiliary coordinates
    if period == 'season':
        # add the season_year auxiliary coordinate
        try:
            years = np.unique(cube.coord('season_year').points)
        except CoordinateNotFoundError:
            ccat.add_season_year(cube, 'time')
            years = np.unique(cube.coord('season_year').points)
        constraint_year_key = 'season_year'
    else:
        # add calendar years
        try:
            years = np.unique(cube.coord('year').points)
        except CoordinateNotFoundError:
            ccat.add_year(cube, 'time')
            years = np.unique(cube.coord('year').points)
        constraint_year_key = 'year'
        
    if period in ['season', 'month']:
        try:
            index_period = np.unique(cube.coord('%s_number'%period).points)
        except CoordinateNotFoundError:
            cat = getattr(ccat, 'add_%s_number' % period)
            cat(cube, 'time')
            index_period = np.unique(cube.coord('%s_number'%period).points)

    # create time-axis of resulting cubes
    time_dimension = _make_time_dimension(
        cube.coord('time').units.num2date(cube.coord('time').points[0]),
        cube.coord('time').units.num2date(cube.coord('time').points[-1]),
        period=period)
    # create the empty resulting cubes
    dim_coords_and_dims = []
    slices = []
    for coord in cube.dim_coords:
        if coord.units.is_time_reference():
            dim_coords_and_dims.append((time_dimension, cube.coord_dims(coord)))
            slices.append(0)
            time_axis = cube.coord_dims(coord)[0]
        else:
            dim_coords_and_dims.append((coord, cube.coord_dims(coord)))
            slices.append(slice(None,None,None))

    cdd_index_cube = _create_cube(
        long_name='Consecutive dry days is the greatest number of consecutive days per time period with daily precipitation amount below %s mm.' % threshold,
        var_name='consecutive_dry_days_index_per_time_period',
        units=iris.unit.Unit('1'),
        dim_coords_and_dims=dim_coords_and_dims)

    cdd_periods_cube = _create_cube(
        long_name='Number of cdd periods in given time period with more than %d days.' % length,
        var_name='number_of_cdd_periods_with_more_than_%ddays_per_time_period' % length,
        units=iris.unit.Unit('1'),
        dim_coords_and_dims=dim_coords_and_dims)

    # differentiate between the considered period
    if period == 'year':
        # just run the aggregation over all given years resulting in
        # the maximum cdd length and the number of cdd periods for each year
        for year in years:
            tmp_cube = cube.extract(iris.Constraint(year=year))
            slices[time_axis] = year-years[0]
            cdd_index_data = tmp_cube.collapsed(
                'time', cdd_index, threshold=threshold).data
            cdd_periods_data = tmp_cube.collapsed(
                'time', cdd_periods, threshold=threshold, length=length).data
            
            cdd_index_cube.data[slices] = cdd_index_data
            cdd_periods_cube.data[slices] = cdd_periods_data

        return iris.cube.CubeList(
            (cdd_index_cube, cdd_periods_cube)
        )

    else:
        # run the aggregation over all seasons/months of all years
        # afterwards aggregate the seasons/month by MAX Aggregator
        # for the cdd_index and the MEAN Aggregator for cdd_periods
        for year in years:
            for p in index_period:
                constraint_dict = {'%s_number' % period: p,
                                   constraint_year_key: year}
                tmp_cube = cube.extract(iris.Constraint(**constraint_dict))
                if tmp_cube:
                    # the extraction can lead to empty cubes for seasons
                    # in the last year
                    time_index = (year-years[0])*len(index_period) + p
                    # months numbers start at 1
                    if period == 'month':
                        time_index -= 1
                    slices[time_axis] = time_index
                    cdd_index_data = tmp_cube.collapsed(
                        'time', cdd_index, threshold=threshold).data
                    cdd_periods_data = tmp_cube.collapsed(
                        'time', cdd_periods, threshold=threshold, length=length).data
            
                    cdd_index_cube.data[slices] = cdd_index_data
                    cdd_periods_cube.data[slices] = cdd_periods_data
                    
        # aggregate over seasons/months
        cat = getattr(ccat, 'add_%s' % period)
        cat(cdd_index_cube, 'time')
        cat(cdd_periods_cube, 'time')
        
        cdd_index_mean = cdd_index_cube.aggregated_by(period, iris.analysis.MEAN)
        cdd_periods_mean = cdd_periods_cube.aggregated_by(period, iris.analysis.MEAN)

        cdd_index_mean.remove_coord('time')
        cdd_periods_mean.remove_coord('time')
        return iris.cube.CubeList(
            (cdd_index_mean, cdd_periods_mean)
        )
Exemple #26
0
def read_pr_sm_topo(filedir, years):
    """ 
    ;; Arguments
    ;;    filedir: dir
    ;;          directory with input data
    ;;    years: list of int
    ;;          list of years for the analysis
    ;;
    ;; Return 
    ;;    pr: iris cube [time, lat, lon]
    ;;          precipitation time series
    ;;    sm: iris cube [time, lat, lon]
    ;;          soil moisture time series
    ;;    topo: array [lat, lon]
    ;;          topography
    ;;    lon: array [lon]
    ;;          longitude
    ;;    lat: array [lat]
    ;;          latitude
    ;;    time: iris cube coords
    ;;          time info of cube
    ;;    
    ;;
    ;; Description
    ;;    Read cmip5 input data for computing the diagnostic
    ;;
    """

    #-------------------------
    # Input data directories
    #-------------------------

    Patt = filedir + 'pr_3hr_inmcm4_amip_r1i1p1_{}010101-{}123122.nc'
    pr_files = [Patt.format(y, y) for y in years]

    Patt = filedir + 'mrsos_3hr_inmcm4_amip_r1i1p1_{}010100-{}123121.nc'
    sm_files = [Patt.format(y, y) for y in years]

    #----------------------
    # Read in precipitation
    #----------------------

    pr_list = []

    for pr_file in pr_files:

        print 'Reading precipitation from ' + pr_file

        pr = iris.load(pr_file)[0]

        for at_k in pr.attributes.keys():
            pr.attributes.pop(at_k)

        pr_list.append(pr)

    pr = iris.cube.CubeList(pr_list)
    pr = pr.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    pr = coord_change([pr])[0]

    # Add metadata: day, month, year

    add_month(pr, 'time')
    add_day_of_month(pr, 'time', name='dom')
    add_year(pr, 'time')

    # Convert units to kg m-2 hr-1

    pr.convert_units('kg m-2 hr-1')

    #-----------------------
    # Read in soil moisture
    #-----------------------

    sm_list = []

    for sm_file in sm_files:

        print 'Reading soil moisture from ' + sm_file

        sm = iris.load(sm_file)[0]

        for at_k in sm.attributes.keys():
            sm.attributes.pop(at_k)

        sm_list.append(sm)

    sm = iris.cube.CubeList(sm_list)
    sm = sm.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    sm = coord_change([sm])[0]

    # Add metadata: day, month, year

    add_month(sm, 'time')
    add_day_of_month(sm, 'time', name='dom')
    add_year(sm, 'time')

    #----------------------------------------------
    # Constrain pr and sm data to latitude 60S_60N
    #----------------------------------------------

    latconstraint = iris.Constraint(
        latitude=lambda cell: -59.0 <= cell <= 59.0)

    pr = pr.extract(latconstraint)
    sm = sm.extract(latconstraint)

    #---------------------------------------------------
    # Read in grid info: latitude, longitude, timestamp
    #---------------------------------------------------

    lon = sm.coords('longitude')[0].points
    lat = sm.coords('latitude')[0].points
    time = sm.coords('time')

    # --------------------------------------
    # Convert missing data (if any) to -999.
    # --------------------------------------

    try:
        sm.data.set_fill_value(-999)
        sm.data.data[sm.data.mask] = -999.

    except:
        print 'no missing data conversion'

    #----------------------
    # Read in topography
    #----------------------

    # Topography map specs:
    # latitude 60S_60N
    # longitude 180W_180E
    # model resolution

    ftopo = filedir + 'topo_var_5x5_inmcm4.gra'
    dt = '>f4'
    topo = (np.fromfile(ftopo, dtype=dt)).reshape(len(lat), len(lon))

    #-----------------------------------------------
    # Return input data to compute sm_pr diagnostic
    #-----------------------------------------------
    return pr, sm, topo, lon, lat, time
Exemple #27
0
    def load_modify_cubes(self, base_run=None, period=None, instance=None):
        """
        Method to load data from UM output files for job into an IRIS cube according to the constraints defined
        in the metric class, and period defined in the user configuration file.
        
        Then modify the cube by changing its name and units, adding a 'year' coordinate
        and update attributes.

        Parameters
        ----------
        base_run : boolean
            This is used as an identifier for the metric, when we flag this as true it tells the program
            that the metric is defined to be a base run. The default setting is False.
            
        period : string
            String input from the user indicating the types of files, e.g. 'ann' for annual
            
        instance : integer
            This is used as an iterator when looping over a number of future joblists so that the program 
            can extract information from 'cimt_settings.py' correctly

        Returns
        -------
        metric.cubes
            A list of cubes loaded for a specific metric.
        
        Example
        -------
        Load and modify cubes: 
            BaseMetric.load_modify_cubes( base_run = True )
        
        Print list of cubes:
            BaseMetric.cubes     
        
        """
        self.base_run = base_run
        self.period = period
        self.instance = instance

        # Set the self parameters
        if self.base_run == True:  # Case for the base metric
            self.jobs_dict = cimt_settings.base_jobs_dict
            self.job_description = cimt_settings.base_description
            self.start_year = int(cimt_settings.base_start)
            self.end_year = int(cimt_settings.base_end)
            self.year_difference = self.end_year - self.start_year  # Not used at the moment
            print 'Preparing to load cubes for Base Metric: ' + self.job_description

        elif self.base_run == False:  # Case for a future metric
            self.jobs_dict = cimt_settings.future_jobs_dict[instance]
            self.job_description = cimt_settings.future_description[instance]
            self.start_year = int(cimt_settings.future_start[instance])
            self.end_year = int(cimt_settings.future_end[instance])
            self.year_difference = self.end_year - self.start_year  # Not used at the moment
            print 'Preparing to load cubes for Future Metric: ' + self.job_description

        # Rename the class instance according to input start and end year
        self.name = self.__class__.__name__ + '_(' + str(
            self.start_year) + '-' + str(self.end_year) + ')_'

        # Initialise some lists for storing loaded cubes, desired output cubes and jobnames (for internal naming)
        self.cubes = []
        self.cubes_to_output = []
        self.list_jobnames = []

        # Get files for loading cubes, sort into numeric order with OrderedDict
        self.job_files_dict = OrderedDict(
            sorted(self.__get_files(self.jobs_dict, period).items(),
                   key=lambda x: x[1]))

        # Loop over number of jobs in the joblist
        for job, path in self.job_files_dict.iteritems():

            # Check for mis-match between period and location of files
            if len(self.job_files_dict[job]) == 0:
                raise StandardError(
                    "There is a problem loading the files requested, check that the period type matches the type of files requested in the string DATADIR"
                )

            # Calls the abstract method load_cube() which is DIFFERENT depending on metric
            cube = self.load_cube(job)

            print 'Loading Cube: ' + self.name + str(
                self.jobs_dict[job]) + '_' + self.period

            self.list_jobnames.append(str(self.jobs_dict[job]))

            # Apply this to all cubes no matter the metric type
            cat.add_year(
                cube, 'time', name='year'
            )  # NB- Specific to annual, update attributes for cubes with other period types

            cube = cube * self.unit_factor
            cube.units = self.units
            cube.rename(self.name + str(self.jobs_dict[job]) + '_' +
                        self.period)

            # Update cube.attributes for all cubes
            cube.attributes.update({
                'Source': 'Data from Met Office Unified Model',
                'Created by': 'Climate Impacts Metrics Tool',
                'Stash Number': self.stash
            })

            # Extract all yearly files within given range
            if self.start_year != None:
                cube = cube.extract(
                    iris.Constraint(year=lambda cell: self.start_year <= cell
                                    <= self.end_year))

            # NB- Should use self.year_difference to check for requested files between those years

            # Append this cube to cubes
            self.cubes.append(cube)

        return self.cubes
Exemple #28
0
def calculate_mean_delta(CMIP5_ref):
    # calculate the mean of all the remapped trend files
    # should simply mean averaging over all cubes, since should have same dimensions at this point
    remapped_files = {}
    model_list = []
    model_trend_ending = '_month_annual_1950_2120_delta'
    for im, model in enumerate(CMIP5_ref):
        file_trend = os.path.join(savedir, model + model_trend_ending + '.nc')
        file_masked = file_trend[:-3] + '_allyrs_masked_sst_121.nc'
        file_1x1 = file_masked[:-3] + '_1x1.nc'
        remapped_files[model] = file_1x1
        if im == 0:
            cube_ref1 = iris.load_cube(file_1x1, callback=cmip5_callback)

        model_list.append(model.split('_')[0])

    model_subset = '_'.join(model_list)
    print remapped_files

    modelmean_file_out = os.path.join(
        savedir, 'cmip5_modelmean' + model_trend_ending + '_on_hadisst2_' +
        model_subset + '.nc')
    cube_list_new = iris.cube.CubeList()
    hadisst2_mask = iris.load_cube(
        '/gws/nopw/j04/hrcm/cache/malcolm/HadISST2.2.2.0/hadisst_0to360_alldays_sst_1963.nc',
        'sea_surface_temperature')[0]

    if not os.path.exists(modelmean_file_out):
        fnames = []
        cube_list_new = iris.cube.CubeList()
        for im, model in enumerate(CMIP5_ref):
            fname = os.path.join(
                savedir, 'cmip5_modelmean' + model_trend_ending +
                '_on_hadisst2_' + str(model) + '.nc')
            if not os.path.exists(fname):

                cube_model = iris.load_cube(remapped_files[model],
                                            callback=cmip5_callback)
                icc.add_year(cube_model, 'time', name='year')
                print 'cube_model ', cube_model
                cube_model = cube_model.extract(YEARS_between_1950_2100)
                cube_model.remove_coord('year')
                # this assumes all the files have the same period, and just a different calendar
                #fix_time_coord(cube_model, cube_ref1)
                model_coord = iris.coords.AuxCoord(im,
                                                   long_name='Model',
                                                   units='1')
                cube_model.add_aux_coord(model_coord)
                iris.save(cube_model, fname)
                fix_fill_value_with_nco(fname)
            fnames.append(fname)

            print 'fnames to read into list ', fname
            c = iris.load_cube(fname, callback=cmip5_callback)
            c.coord('time').bounds = None
            if im == 0:
                cube_ref1 = c.copy()
                c1 = c
            else:
                c1 = fix_time_coord(c, cube_ref1)
                iris.util.describe_diff(c, cube_ref1)
                print c1.coord('time')
            c1.coord('Model').points = im

            cube_list_new.append(c1)
            #print 'cube_list_new ',cube_list_new

        cube = cube_list_new.merge_cube()
        if not os.path.exists(modelmean_file_out):
            cm = cube.collapsed('Model', iris.analysis.MEAN)
            iris.save(cm,
                      modelmean_file_out,
                      unlimited_dimensions=['time'],
                      fill_value=1.0e20)

        del cube

    return modelmean_file_out
Exemple #29
0
def time_filter_by_month(infile):
    '''
    Filter each month individually with a simple rolling window filter
    To reduce the interannual variability magnitude, since this is being provided by HadISST2.2
    Lanczos filter
    Note this removes the first and last n years (1/n filter)
    '''
    c = iris.load_cube(infile)
    icc.add_month_number(c, 'time', name='month')
    icc.add_year(c, 'time', name='year')

    cube_list_filtered = iris.cube.CubeList()

    # window length in years
    window = 11
    # Construct 5-year low pass filter
    wgts5 = low_pass_weights(window, 1. / 7.)
    fmonths = {}
    for mon in range(1, 13):
        fmonths[mon] = []
    for mon in range(1, 13):
        fout_month = infile[:-3] + '_filtered_' + str(mon).zfill(
            2) + '_7_11.nc'
        fmonths[mon].append(fout_month)
        if not os.path.exists(fout_month):
            print 'filter month ', mon
            con_mon = iris.Constraint(
                coord_values={'month': lambda l: l == mon})
            c_mon = c.extract(con_mon)
            c5 = c_mon.rolling_window('time',
                                      iris.analysis.SUM,
                                      len(wgts5),
                                      weights=wgts5)
            cube_list_filtered.append(c5)
            iris.save(c5,
                      fout_month,
                      unlimited_dimensions=['time'],
                      fill_value=1.0e20)
            fmonths[mon].append(fout_month)
        else:
            c5 = iris.load_cube(fout_month)

        year_start = int(c5.coord('year').points[0])
        year_end = int(c5.coord('year').points[-1])
    print 'years ', year_start, year_end
    years = range(year_start, year_end + 1)

    fout_all_years_filtered = infile[:-3] + '_filtered_' + str(
        years[0]) + '-' + str(years[-1]) + '_7_11.nc'

    c_all_yr_l = iris.cube.CubeList()
    fnames = []
    for year in years:
        con_yr = iris.Constraint(coord_values={'year': lambda l: l == year})
        c_yr_l = iris.cube.CubeList()
        fyear = []
        for im in range(1, 13):
            c = iris.load_cube(fmonths[im])
            c.coord('year').bounds = None
            c_yr = c.extract(con_yr)
            #c_yr.remove_coord('month')
            c_yr_l.append(c_yr)
        c_this_yr = c_yr_l.merge_cube()
        c_this_yr.remove_coord('month')
        c_this_yr.remove_coord('year')
        c_yr_l.append(c_this_yr)
        fout = infile[:-3] + '_filtered_' + str(year) + '_7_11.nc'
        fnames.append(fout)
        iris.save(c_this_yr,
                  fout,
                  unlimited_dimensions=['time'],
                  fill_value=1.0e20)
    cmd = 'ncrcat ' + ' '.join(fnames) + ' ' + fout_all_years_filtered
    subprocess.call(cmd, shell=True)
    #    c_all_yr = c_yr_l.concatenate_cube()
    #    iris.save(c_all_yr, fout_all_years_filtered, unlimited_dimensions = ['time'], fill_value = 1.0e20)

    return fout_all_years_filtered
Exemple #30
0
def find_and_merge_historical_rcp85(CMIP5_ref, var='tos'):
    for model in CMIP5_ref:
        run = CMIP5_ref[model]
        print run
        runid = run + '/' + model
        datapath_hist = form_BADC_path_hist(runid, var=var)
        datapath_rcp85 = form_BADC_path_rcp85(runid, var=var)
        print runid, datapath_hist, datapath_rcp85
        output_file = os.path.join(
            savedir,
            model + '_monthly_1950_' + YEAR_LAST_RCP85 + '_' + var + '.nc')
        if os.path.exists(output_file):
            continue
        datafile = glob.glob(
            os.path.join(datapath_hist,
                         var + '_*mon_' + model + '_historical_r1i1p1_*'))
        print 'datafile ', datafile
        # read all files into cube,
        cube_tmp = iris.load(datafile,
                             variable1[var],
                             callback=cmip5_callback_delall)
        if len(cube_tmp) == 0:
            cube_tmp = iris.load(datafile,
                                 variable2[var],
                                 callback=cmip5_callback_delall)
        sst_cube_full = cube_tmp.concatenate_cube()
        print 'sst_cube_full ', sst_cube_full
        tmp_file = os.path.join(savedir,
                                model + '_monthly_hist_tmp_' + var + '.nc')
        iris.save(sst_cube_full,
                  tmp_file,
                  unlimited_dimensions=['time'],
                  fill_value=1.0e20)

        # add year coordinate
        icc.add_year(sst_cube_full, 'time', name='year')
        end_year = sst_cube_full.coord('year').points.max()
        print 'end_year ', end_year

        YEARS_from_1950 = iris.Constraint(
            coord_values={'year': lambda l: l >= 1950})
        # extract years from 1950-last year, noting last year included
        sst_cube_from1950 = sst_cube_full.extract(YEARS_from_1950)
        hist_file = os.path.join(savedir,
                                 model + '_monthly_hist_' + var + '.nc')
        iris.save(sst_cube_from1950,
                  hist_file,
                  unlimited_dimensions=['time'],
                  fill_value=1.0e20)

        # then read the rcp85 directory,
        datafile = glob.glob(
            os.path.join(datapath_rcp85,
                         var + '_*mon_' + model + '_rcp85_r1i1p1_*'))
        print 'datafile for rcp85 ', datafile
        # limit the files to those not past YEAR_LAST_RCP85 (2120)
        datafile_in_period = []
        for fname in datafile:
            name = os.path.basename(fname)
            years = name.split('_')[-1]
            if int(years[0:4]) <= int(YEAR_LAST_RCP85):
                datafile_in_period.append(fname)
        datafile_in_period = sorted(datafile_in_period)
        print 'datafile_in_period ', datafile_in_period
        # concatenate files, need to try ncrcat instead
        rcp85_file = os.path.join(savedir,
                                  model + '_monthly_rcp85_' + var + '.nc')
        if not 'HadGEM' in model:  # need to do HadGEM by hand due to duplicates
            use_ncrcat_to_merge(datafile_in_period, rcp85_file)

        sst_cube_tmp = iris.load(rcp85_file,
                                 variable1[var],
                                 callback=cmip5_callback_delall)
        if len(sst_cube_tmp) == 0:
            sst_cube_tmp = iris.load(rcp85_file,
                                     variable2[var],
                                     callback=cmip5_callback_delall)
        sst_cube_full = sst_cube_tmp.concatenate_cube()
        # add year coordinate
        icc.add_year(sst_cube_full, 'time', name='year')
        # find first year of this data
        start_year = sst_cube_full.coord('year').points.min()

        # make constraint from the year after the hist cube to YEAR_LAST_RCP85 (2120)
        YEARS_between_histend_2120 = iris.Constraint(coord_values={
            'year':
            lambda l: int(end_year) + 1 <= l <= int(YEAR_LAST_RCP85)
        })
        sst_cube_to2120 = sst_cube_full.extract(YEARS_between_histend_2120)

        # remove the year coordinate (prevents cube merge)
        for c in [sst_cube_from1950, sst_cube_to2120]:
            try:
                c.remove_coord('year')
            except:
                pass
        #print sst_cube_from1950
        #print sst_cube_to2120

        # make a cube list, and then merge these 2 cubes together
        # to obtain a cube 1950-2100
        full_cube_list = iris.cube.CubeList()
        full_cube_list.append(sst_cube_from1950)
        full_cube_list.append(sst_cube_to2120)
        iris.util.unify_time_units(full_cube_list)
        #print 'full_cube_list ',full_cube_list

        cube_files = []
        if not os.path.exists(savedir): os.makedirs(savedir)
        for ic, cubef in enumerate(full_cube_list):
            #print cubef.coord('time')
            fout = os.path.join(
                savedir,
                model + '_monthly_1950_' + str(ic) + '_' + var + '.nc')
            if 'HadGEM3' in model and ic == 1:
                print 'skip the duplicate december'
                cubef = cubef[1:]
            iris.save(cubef,
                      fout,
                      unlimited_dimensions=['time'],
                      fill_value=1.0e20)
            cube_files.append(fout)
        use_ncrcat_to_merge(cube_files, output_file)
Exemple #31
0
def make_trend_files_for_models(model, CMIP5_ref):
    # read the merged monthly SST for 1950-2100 (datafile)
    # extract data between 2012_2016 (sst_cube)
    #   aggregate this by year (sst_cube_year)
    #   and aggregate by month (monthly_mean)
    #   subtract this monthly_mean from each year (yearly_monthly_trend) - should be called delta
    #   save as allyrs_sst.nc
    # read the land sea mask
    # mask the yearly_monthly_trend file with this mask
    #   save as _allyrs_masked_sst.nc
    #

    global_temp_change = {}
    run = CMIP5_ref[model]
    print run
    runid = run + '/' + model
    print runid
    datafile = os.path.join(savedir, model + '_monthly_1950_2120_tos.nc')
    print datafile
    if not os.path.exists(savedir): os.makedirs(savedir)

    file_trend = os.path.join(savedir,
                              model + '_month_annual_1950_2120_delta.nc')
    if not os.path.exists(file_trend[:-3] + '_allyrs_masked_sst.nc'):

        # Read SST data and calculate annual means
        # the callback method only works well is there are full years of 12 months data
        print 'start analysis'
        cube_tmp = iris.load(datafile,
                             variable1['tos'],
                             callback=cmip5_callback)
        if len(cube_tmp) == 0:
            cube_tmp = iris.load(datafile,
                                 variable2['tos'],
                                 callback=cmip5_callback)
        for c in cube_tmp:
            print c.coords('time')
            print
        print cube_tmp

        sst_cube_full = cube_tmp.concatenate_cube()
        icc.add_year(sst_cube_full, 'time', name='year')
        icc.add_month_number(sst_cube_full, 'time', name='month')

        # convert to SST if surface temperature
        #if sst_cube_full.data[0,...].max() >= 100.: sst_cube_full -= 273.15

        # now the surface temperature over sea-ice is below -1.8, so reset it to -1.8
        #sst_cube_full.data[sst_cube_full.data < -1.8] = -1.8
        print 'sst_cube_full ', sst_cube_full

        # extract all years 10 years either side of 2015 (where we want delta to be about zero)
        year_centre = 2015
        YEARS_between = iris.Constraint(
            coord_values={
                'year': lambda l: year_centre - 10 <= l <= year_centre + 10
            })

        sst_cube = sst_cube_full.extract(YEARS_between)

        # now decompose this cube into its components: spatial time-mean, spatial linear trend,
        # mean seasonal cycle centred around year_centre
        monthly_mean = sst_cube.aggregated_by('month', iris.analysis.MEAN)
        print monthly_mean

        # remove mean seasonal cycle from all months
        yearly_monthly_trend = sst_cube_full.copy()
        for m in range(0, 12):
            inds = np.where(sst_cube_full.coord('month').points == m + 1)[0]
            for i in inds:
                yearly_monthly_trend.data[i] -= monthly_mean.data[m]
        iris.save(yearly_monthly_trend,
                  file_trend[:-3] + '_allyrs_sst.nc',
                  fill_value=1.0e20)

        # add the model mask
        mask_path = form_BADC_path_mask1(runid)
        print mask_path
        if not os.path.exists(mask_path):
            print 'mask path not exist'
            mask_path = form_BADC_path_mask2(runid)
        mask = iris.load_cube(os.path.join(mask_path, '*.nc'),
                              'sea_area_fraction')
        if 'IPSL' in run:
            # need to remove wrap rows from NEMO
            remove_nemo_wrap(os.path.join(mask_path, '*.nc'),
                             os.path.join(savedir, 'ipsl_mask.nc'))
            mask = iris.load_cube(os.path.join(savedir, 'ipsl_mask.nc'))
            land = np.where(mask.data.mask == True)
            mask.data[...] = 1.0
            mask.data[land[0], land[1]] = 0.0
        print mask
        land = np.where(mask.data == 0.0)
        mask_land = yearly_monthly_trend[0].copy()
        add_mask(mask_land)
        add_mask(yearly_monthly_trend)
        print mask_land
        mask_land.data[...] = 1.0
        mask_land.data[land] = 0.0
        #mask_land.data[land] = np.ma.masked
        mask_land.data.mask[land] = True
        yearly_monthly_trend *= mask_land
        yearly_monthly_trend.data.mask[:, ...] = mask_land.data.mask[...]

        cube_filled = fill_in_masked_data(yearly_monthly_trend)
        iris.save(cube_filled,
                  file_trend[:-3] + '_allyrs_masked_sst.nc',
                  fill_value=1.0e20)
Exemple #32
0
def main():

    # Delete all the image files in the current directory to ensure that only those
    # created in the loop end up in the movie.
    print ("\nDeleting all .png files in this directory...")
    SpawnCommand("rm -f *.png")
    
    print("Loading the data...")

    # Read all the temperature values and create a single cube containing this data
    cubeList = iris.cube.CubeList([])
    cubeList.extend(myload(1960, 2014, 'tas_1850-2014/bc179a.p5'))
    cubeList.extend(myload(2015, 2100, 'tas_2015-2100-ssp585/be653a.p5'))

    equalise_attributes(cubeList)
    temperatures = cubeList.merge_cube()
    baseYears = temperatures[ :29, :, :]
    baseYearsMean = baseYears.collapsed('time',iris.analysis.MEAN)
    # Calculate the difference in annual mean temperature from the mean  baseline (returns a cube)
    anomaly = temperatures - baseYearsMean
    print("Data downloaded! Now Processing...")

    # Get the range of values.

    # Add a new coordinate containing the year.
    icat.add_year(anomaly, 'time')
    years = anomaly.coord('year')
    
    # Set the limits for the loop over years.  
    minTime = 0
    maxTime = temperatures.shape[0]

    print ("Making images from year", years[minTime].points[0], "to", years[maxTime-1].points[0], "...")

    for time in range(minTime, maxTime):

        # Contour plot the temperatures and add the coastline.
        
        iplt.contourf(anomaly[time], levels = (-6, -3, 0, 4, 8, 12, 16, 20, 25), colors = ('darkblue', 'blue', 'cyan', 'lightyellow', 'yellow', 'orange', 'darkorange', 'red'))
        #-6.4358826, 27.94899
        plt.gca().coastlines()
        #plt.colorbar(boundaries = (-6, -3, 0, 4, 8, 12, 16, 20, 25), values = (-6, -3, 0, 4, 8, 12, 16, 20))
        # We need to fix the boundary of the figure (otherwise we get a black border at left & top).
        # Cartopy removes matplotlib's axes.patch (which normally defines the boundary) and
        # replaces it with outline_patch and background_patch.  It's the former which is causing
        # the black border.  Get the axis object and make its outline patch invisible.
        ax = plt.gca()
        ax.outline_patch.set_visible(False)

        # Extract the year value and display it (coordinates used in locating the text are
        # those of the data).
        year = years[time].points[0]
        plt.text(0, -60, year, horizontalalignment='center') 
        
       
        # Now save the plot in an image file.  The files are numbered sequentially, starting
        # from 000.png; this is so that the ffmpeg command can grok them.
        filename = "image-%04d.png" % time
        plt.savefig(filename, bbox_inches='tight', pad_inches=0)
        
        # Discard the figure (otherwise the text will be overwritten
        # by the next iteration).
        plt.close()
        print('boundaries for colour = -6, -3, 0, 4, 8, 12, 16, 20, 25')
    print("images made! Now converting to .mp4...")
    create_video()
    print("Opening video...")
    myTime.sleep(5)
def calc_future_sst(cube, observed_trend_file, running_mean_file, datafiles):
    '''
    Currently each period (1950-2015, 2016-2081, 2082-) has a join at the end
    when the start of the repeat period + CMIP trend is used.
    Need to figure out how to smooth this join, taking into account e.g. peak
    ENSO in Dec/Jan needs to be reduced slowly

    Also note that we hope to get extra 2016-2017 data before we finalise this, 
    so probably don't want anything very specific (e.g. picking ENSO years for end and start) - end of 2015 is a big El Nino
    Could try 2015 - 1958 as a join instead
    Also could use 2050- to add as the last period, since the rate of warming varies over time
        or perhaps adjust the time in the CMIP trend to use for 2080-

       Inputs:
       cube: The cube of SST data from the start to the end of the historic period
       running_mean_file: file names containing the running mean of seasonal variability
    '''
    print 'start calc_future_sst'
    hadisst2_mask = iris.load_cube(
        '/gws/nopw/j04/hrcm/cache/malcolm/HadISST2.2.2.0/hadisst_0to360_alldays_sst_1963.nc',
        'sea_surface_temperature')[0]
    mask = np.where(hadisst2_mask.data.mask == True)

    cube.coord('time').bounds = None
    year_data_start = cube.coord('year').points[0]  # 1952
    year_data_end = cube.coord('year').points[-1]  # 2015

    future_sst = add_future_time_info(cube)
    #future2_sst = add_future_time_info(future_sst)
    #del cube
    #observed_trend = iris.load_cube(observed_trend_file)

    # load externally produced trend from CMIP5
    cmip5_trend = iris.load_cube(CMIP5_trend_file)
    try:
        icc.add_year(cmip5_trend, 'time', name='year')
    except:
        pass

    for future_c in [future_sst]:
        year1 = future_c.coord('year').points[0]  # 2016
        year2 = future_c.coord('year').points[-1]  # 2080

        for yr in range(year1, year2 + 1):
            adjust_jan = False  # edge of year period
            print('processing for future year ', yr)
            future_file_output = os.path.join(
                SAVEDIR, 'sst', 'future_sst_' + str(yr) + '_025_daily_v1.nc')
            if os.path.exists(future_file_output):
                continue

            monthly_mean_hist_file = ''
            years_from_start = float(yr - year_data_start)
            year_hist = yr - year1 + year_data_start
            year_constraint_hist = iris.Constraint(
                coord_values={'year': lambda l: l == year_hist})

            print('extract year hist from running mean ', yr, year_hist,
                  running_mean_file)
            running_mean_hist = iris.load_cube(running_mean_file)
            #running_mean_hist = monthly_mean_hist.extract(year_constraint_hist)
            future_yr_l = iris.cube.CubeList()

            # have months in the trend
            print 'years into future ', yr, years_from_start, year_hist

            year_constraint = iris.Constraint(
                coord_values={'year': lambda l: l == yr})
            cmip5_yr, cmip5_yrm1, cmip5_yrp1 = get_cmip5_this_year(
                yr, year_hist, cmip5_trend, year_constraint,
                year_constraint_hist)

            # calculate the variability here for hist year
            # calculate sst variability by removing monthly mean and observed trend
            # i.e. baseline variability (to add into future SST segment)
            print 'calculate variability, year, year_hist ', yr, year_hist
            sst_variability_mon, sst_variability = calc_variability(
                cube, running_mean_hist, year_hist, years_from_start)

            # find the most recent decadal mean for this year
            # if we are at the edge of a period, we need to adjust the value to get a smooth Dec-Jan transition
            year_constraint_end = iris.Constraint(
                coord_values={'year': lambda l: l == year_data_end - 4})
            year_end_decade = year_data_end - 4
            print('extract year from running mean file for recent ',
                  year_data_end, running_mean_file)
            running_mean_recent = iris.load_cube(running_mean_file)
            #running_mean_recent = running_mean_recent.extract(year_constraint_end)
            print('use running mean ', running_mean_file, year_data_end)

            # if we are the first year of invented data, need to adjust the Jan value of monthly_mean_recent
            # we also need to get the last day of variability from the previous year, to make the variability consistent across the boundary
            if yr == year1:
                print 'yr, year1, calc local means, anoms ', yr, year1
                monthly_mean_previous = adjust_monthly_mean_edge(
                    running_mean_recent,
                    datafiles[int(year_data_end)],
                    adjust_dec=True)
                monthly_mean_next = running_mean_recent
                cube_2015 = iris.load_cube(datafiles[int(year_data_end)])
                cube_2015.convert_units('degC')
                icc.add_year(cube_2015, 'time')
                print 'cube 2015 ', cube_2015
                sst_variability_mon_yrm1, sst_variability_yrm1 = calc_variability(
                    cube_2015,
                    monthly_mean_previous,
                    int(year_data_end),
                    years_from_start,
                    save=False,
                    weighted=False)
                adjust_jan = True
            else:
                monthly_mean_previous = running_mean_recent
                monthly_mean_next = running_mean_recent
                sst_variability_yrm1 = []
                sst_variability_mon_yrm1 = []

            for m in range(0, 12):
                future_yrmon = get_daily_interpolated_data(
                    m,
                    sst_variability,
                    sst_variability_mon,
                    future_c,
                    running_mean_recent,
                    cmip5_yr,
                    cmip5_yrm1,
                    cmip5_yrp1,
                    year_constraint,
                    year_constraint_hist,
                    monthly_mean_previous,
                    monthly_mean_next,
                    year_end_decade,
                    sst_variability_yrm1=sst_variability_yrm1,
                    sst_variability_mon_yrm1=sst_variability_mon_yrm1,
                    adjust_jan=adjust_jan)
                future_yrmon.data.mask[:, mask[0], mask[1]] == True

                future_yr_l.append(future_yrmon)

            future_yr = future_yr_l.concatenate_cube()
            add_metadata(future_yr)
            savefile = os.path.join(future_file_output)
            iris.save(future_yr,
                      savefile,
                      unlimited_dimensions=['time'],
                      fill_value=1.0e20)

    for c in [cube, future_sst]:
        try:
            c.remove_coord('year')
            c.remove_coord('month')
        except:
            pass
Exemple #34
0
        # check time units is what we expect
        unit = cube.coord('time').units
        if unit.name[:4] == 'hour':
            thirty_mins = 0.5
        elif unit.name[:6] == 'second':
            thirty_mins = 60 * 30
        elif unit.name[:3] == 'day':
            thirty_mins = 0.5 / 24
        else:
            raise ValueError(f"Don't know how to deal with: '{unit.name}'")

        print('Subtracting half an hour from time coord first')
        new_points = cube.coord('time').points - thirty_mins
        cube.coord('time').points = new_points

    iccat.add_year(cube, 'time')
    if freq == 'mon':
        iccat.add_month_number(cube, 'time')
        agg_by = ['month_number', 'year']
        remove_later = 'month_number'
    elif freq == 'day':
        iccat.add_day_of_year(cube, 'time')
        agg_by = ['day_of_year', 'year']
        remove_later = 'day_of_year'
    else:
        raise ValueError('Unrecognised frequency')

    # compute averages
    print(f'Computing {freq} average')
    means = cube.aggregated_by(agg_by, iris.analysis.MEAN)
def main():
    if len(sys.argv) != 3:
        sys.exit("program needs two arguments")
    if sys.argv[1] != 'ssp119' and sys.argv[1] != 'ssp585' and sys.argv[
            1] != 'ssp534OS':
        sys.exit("argument must be ssp119, ssp585 or ssp534OS")
    if sys.argv[2] != 'ssp119' and sys.argv[2] != 'ssp585' and sys.argv[
            2] != 'ssp534OS':
        sys.exit("argument must be ssp119, ssp585 or ssp534OS")

    # Delete all the image files in the current directory to ensure that only those
    # created in the loop end up in the movie.
    print("\nDeleting all .png files in this directory...")
    SpawnCommand("rm -f *.png")

    print("Loading the data...")

    # Read all the temperature values and create a single cube containing this data

    for i in range(1, 3):
        cubeList = iris.cube.CubeList([])
        if sys.argv[i] == 'ssp585':
            cubeList.extend(myload(1960, 2014, 'tas_1850-2014/bc179a.p5'))
            cubeList.extend(
                myload(2015, 2100, 'tas_2015-2100-ssp585/be653a.p5'))
        elif sys.argv[i] == 'ssp119':
            cubeList.extend(myload(1960, 2014, 'tas_1850-2014/bc179a.p5'))
            cubeList.extend(
                myload(2015, 2100, 'tas_2015-2100-ssp119/bh409a.p5'))
        elif sys.argv[i] == 'ssp534OS':
            cubeList.extend(myload(1960, 2014, 'tas_1850-2014/bc179a.p5'))
            cubeList.extend(myload(2015, 2039, 'tas_2015-2100/be653a.p5'))
            cubeList.extend(
                myload(2040, 2100, 'tas_2015-2100-ssp534OS/bh409a.p5'))

        equalise_attributes(cubeList)
        temperatures = cubeList.merge_cube()
        if i == 1:
            leftCube = temperatures.intersection(longitude=(-181, 0),
                                                 ignore_bounds=True)
        elif i == 2:
            rightCube = temperatures.intersection(longitude=(0, 180))

    cubeList = iris.cube.CubeList([leftCube, rightCube])
    temperatures = cubeList.concatenate_cube()

    baseYears = temperatures[:29, :, :]
    baseYearsMean = baseYears.collapsed('time', iris.analysis.MEAN)
    # Calculate the difference in annual mean temperature from the mean  baseline (returns a cube)
    anomaly = temperatures - baseYearsMean
    print("Data downloaded! Now Processing...")

    # Get the range of values.

    # Add a new coordinate containing the year.
    icat.add_year(anomaly, 'time')
    years = anomaly.coord('year')

    # Set the limits for the loop over years.
    minTime = 0
    maxTime = temperatures.shape[0]

    print("Making images from year", years[minTime].points[0], "to",
          years[maxTime - 1].points[0], "...")

    for time in range(minTime, maxTime):

        # Set up for larger image.
        figSize = [12, 6]
        fig = plt.figure(figsize=figSize, dpi=200)
        rect = 0, 0, 200 * figSize[0], 200 * figSize[1]
        fig.add_axes(rect)
        geo_axes = plt.axes(projection=ccrs.PlateCarree())

        # We need to fix the boundary of the figure (otherwise we get a black border at left & top).
        # Cartopy removes matplotlib's axes.patch (which normally defines the boundary) and
        # replaces it with outline_patch and background_patch.  It's the former which is causing
        # the black border.  Get the axis object and make its outline patch invisible.
        geo_axes.outline_patch.set_visible(False)
        plt.margins(0, 0)
        fig.subplots_adjust(left=0, right=1, bottom=0, top=1)

        # Contour plot the temperatures and add the coastline.

        iplt.contourf(anomaly[time],
                      levels=(-6, -3, 0, 4, 8, 12, 17, 22, 28),
                      colors=('darkblue', 'blue', 'cyan', 'lightyellow',
                              'yellow', 'orange', 'darkorange', 'red'))
        #-6.4358826, 27.94899
        plt.gca().coastlines()
        #plt.colorbar(boundaries = (-6, -3, 0, 4, 8, 12, 16, 20, 25), values = (-6, -3, 0, 4, 8, 12, 16, 20))

        # Extract the year value and display it (coordinates used in locating the text are
        # those of the data).
        year = years[time].points[0]

        # Display year on both sides of the display.
        plt.text(-110,
                 0,
                 year,
                 horizontalalignment='center',
                 verticalalignment='top',
                 size='large',
                 fontdict={'family': 'monospace'})
        plt.text(70,
                 0,
                 year,
                 horizontalalignment='center',
                 verticalalignment='top',
                 size='large',
                 fontdict={'family': 'monospace'})

        # Add labels to halves of display.
        plt.text(-110,
                 -60,
                 str(sys.argv[1]),
                 horizontalalignment='center',
                 size='small',
                 fontdict={'family': 'monospace'})
        plt.text(70,
                 -60,
                 str(sys.argv[2]),
                 horizontalalignment='center',
                 size='small',
                 fontdict={'family': 'monospace'})

        # Draw a line along the division between the two halves.
        plt.plot([0, 0], [-90, 90], color='gray', linewidth=3)
        plt.plot([-179.8, -179.8], [-90, 90], color='gray', linewidth=3)

        # Now save the plot in an image file.  The files are numbered sequentially, starting
        # from 000.png; this is so that the ffmpeg command can grok them.
        filename = "split/image-%04d.png" % time
        plt.savefig(filename, dpi=200)

        # Discard the figure (otherwise the text will be overwritten
        # by the next iteration).
        plt.close()
        print('boundaries for colour = -6, -3, 0, 4, 8, 12, 16, 20, 25')
    print("images made! Now converting to .mp4...")
    create_video()
    print("Opening video...")
    myTime.sleep(5)
Exemple #36
0
def read_pr_sm_topo(project_info, model):

    """
    ;; Arguments
    ;;    project_info: dictionary
    ;;          all info from namelist
    ;;
    ;; Return
    ;;    pr: iris cube [time, lat, lon]
    ;;          precipitation time series
    ;;    sm: iris cube [time, lat, lon]
    ;;          soil moisture time series
    ;;    topo: array [lat, lon]
    ;;          topography
    ;;    lon: array [lon]
    ;;          longitude
    ;;    lat: array [lat]
    ;;          latitude
    ;;    time: iris cube coords
    ;;          time info of cube
    ;;    time_bnds_1: float
    ;;          first time_bnd of time series
    ;;
    ;;
    ;; Description
    ;;    Read cmip5 input data for computing the diagnostic
    ;;
    """
    
    import projects
    E = ESMValProject(project_info)
    verbosity = E.get_verbosity()
    #-------------------------
    # Read model info
    #-------------------------

    currProject = getattr(vars()['projects'], model.split_entries()[0])()

    model_info = model.split_entries()

    mip = currProject.get_model_mip(model)
    exp = currProject.get_model_exp(model)
    start_year = currProject.get_model_start_year(model)
    end_year = currProject.get_model_end_year(model)

    years = range(int(start_year), int(end_year) + 1)
    
    '''
    #-------------------------
    # Read model info
    #-------------------------

    model_name = model_info[1]
    time_step = model_info[2]
    exp_fam = model_info[3]
    model_run = model_info[4]
    year_start = model_info[5]
    year_end = model_info[6]
    filedir = model_info[7]

    years = range(int(year_start), int(year_end)+1)
    '''

    
    #-------------------------
    # Input data directories
    #-------------------------
    currDiag = project_info['RUNTIME']['currDiag']

    pr_index = currDiag.get_variables().index('pr')
    pr_field = currDiag.get_field_types()[pr_index]

    sm_index = currDiag.get_variables().index('mrsos')
    sm_field = currDiag.get_field_types()[sm_index]

    indir = currProject.get_cf_outpath(project_info, model)
    in_file = currProject.get_cf_outfile(project_info, model, pr_field, 'pr', mip, exp)
    pr_files = [os.path.join(indir, in_file)]

    in_file = currProject.get_cf_outfile(project_info, model, sm_field, 'mrsos', mip, exp)
    sm_files = [os.path.join(indir, in_file)]
    
    '''
    #-------------------------
    # Input data directories
    #-------------------------
    pr_files = []
    sm_files = []

    for yy in years:

        Patt = filedir+'pr_'+time_step+'_'+model_name+'_'+exp_fam+'_'+\
               model_run+'_'+str(yy)+'*.nc'
        pr_files.append(glob.glob(Patt))

        Patt = filedir+'mrsos_'+time_step+'_'+model_name+'_'+exp_fam+'_'+\
                model_run+'_'+str(yy)+'*.nc'
        sm_files.append(glob.glob(Patt))

    pr_files = [l[0] for l in pr_files if len(l)>0]
    pr_files = sorted(pr_files)

    sm_files = [l[0] for l in sm_files if len(l)>0]
    sm_files = sorted(sm_files)
    '''

    #----------------------
    # Read in precipitation
    #----------------------

    pr_list = []

    for pr_file in pr_files:

        info('Reading precipitation from ' + pr_file, verbosity, required_verbosity=1)

        pr = iris.load(pr_file)[0]

        for at_k in pr.attributes.keys():
            pr.attributes.pop(at_k)

        pr_list.append(pr)

    pr = iris.cube.CubeList(pr_list)
    pr = pr.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    pr = coord_change([pr])[0]

    # Add metadata: day, month, year

    add_month(pr, 'time')
    add_day_of_month(pr, 'time', name='dom')
    add_year(pr, 'time')

    # Convert units to kg m-2 hr-1

    pr.convert_units('kg m-2 hr-1')

    #-----------------------
    # Read in soil moisture
    #-----------------------

    sm_list = []

    for sm_file in sm_files:

        info('Reading soil moisture from ' + sm_file, verbosity, required_verbosity=1)

        sm = iris.load(sm_file)[0]

        for at_k in sm.attributes.keys():
            sm.attributes.pop(at_k)

        sm_list.append(sm)

    sm = iris.cube.CubeList(sm_list)
    sm = sm.concatenate()[0]

    # Convert longitude from 0_360 to -180_180

    sm = coord_change([sm])[0]

    # Add metadata: day, month, year

    add_month(sm, 'time')
    add_day_of_month(sm, 'time', name='dom')
    add_year(sm, 'time')

    #----------------------------------------------
    # Constrain pr and sm data to latitude 60S_60N
    #----------------------------------------------

    latconstraint = iris.Constraint(latitude=lambda cell: -59.0 <= cell <= 59.0)

    pr = pr.extract(latconstraint)
    sm = sm.extract(latconstraint)

    #---------------------------------------------------
    # Read in grid info: latitude, longitude, timestamp
    #---------------------------------------------------

    lon = sm.coords('longitude')[0].points
    lat = sm.coords('latitude')[0].points
    time = sm.coords('time')

    # --------------------------------------
    # Convert missing data (if any) to -999.
    # --------------------------------------

    try:
        sm.data.set_fill_value(-999)
        sm.data.data[sm.data.mask] = -999.

    except:
        info('no missing data conversion', verbosity, required_verbosity=1)

    #----------------------
    # Read in topography
    #----------------------

    # Topography map specs:
    # latitude 60S_60N
    # longitude 180W_180E
    # model resolution

    #ftopo = currProject.get_cf_fx_file(project_info, model)

    #dt = '>f4'
    #topo = (np.fromfile(ftopo, dtype=dt)).reshape(len(lat), len(lon))

    topo = get_topo(project_info, lon, lat, model)

    #----------------------
    # Read in time bounds
    #----------------------

    indir, infiles = currProject.get_cf_infile(project_info, model, pr_field, 'pr', mip, exp)
    Patt = os.path.join(indir, infiles)
    pr_files = sorted(glob.glob(Patt))

    ncf = nc4.Dataset(pr_files[0])
    time_bnds_1 = ncf.variables['time_bnds'][0][0]
    time_bnds_1 = time_bnds_1 - int(time_bnds_1)
    ncf.close()

    #-----------------------------------------------
    # Return input data to compute sm_pr diagnostic
    #-----------------------------------------------
    return pr, sm, topo, lon, lat, time, time_bnds_1