Beispiel #1
0
 def test_xaxis_labels_with_axes(self):
     import matplotlib.pyplot as plt
     fig = plt.figure()
     ax = fig.add_subplot(111)
     iplt.plot(self.cube.coord('str_coord'), self.cube, axes=ax)
     plt.close(fig)
     self.assertPointsTickLabels('xaxis', ax)
Beispiel #2
0
def line(filename, var=None, xvar='time', event='Run',  # IMPORTANT INPUTS
        fsummary=None, fout = None, **kwargs):   

    '''
    line(filename, var=None, xvar='time', event='Run', [fout=None, fsummary=None])

    Plot a simple line plot of 'var' against xvar
    A label with var+event is added to each line, so plt.legend() can be called after multiple calls to line

    xvar = 'time', 'latitude' or 'longitude'.
    event = list of strings giving runs/profiles/sondes to be plotted. 
    Will plot from the start of the first one to the end of the last.
    Defaults to 'Run' (no number, so plot all runs).
    fout = filename to save figure to. Won't save the figure by default.
    **kwargs get input into the matplotlib plotting function
    '''

# Read data
    cube = bae.read.core(filename, var=var, event=event, fsummary=fsummary)

    if xvar=='time': # use iris plot as time is the dimcoord (and gives better labelling)
        iplt.plot(cube, label=var+' : '+event, **kwargs)
    else:
        plt.plot(cube.coord(xvar).points, cube.data, label=var+' : '+event, **kwargs)

    plt.xlabel(xvar)
    plt.ylabel(cube.name()+' / '+cube.units.symbol)

    # suppress scientific notation on y axis
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)

    if fout:
        plt.savefig(fout)
Beispiel #3
0
 def test_plot_longitude(self):
     import matplotlib.pyplot as plt
     fig = plt.figure()
     ax = fig.add_subplot(111)
     iplt.plot(self.lat_lon_cube.coord('longitude'),
               self.lat_lon_cube, axes=ax)
     plt.close(fig)
Beispiel #4
0
def main():
    # Enable a future option, to ensure that the netcdf load works the same way
    # as in future Iris versions.
    iris.FUTURE.netcdf_promote = True

    # Load the gridded temperature and salinity data.
    fname = iris.sample_data_path('atlantic_profiles.nc')
    cubes = iris.load(fname)
    theta, = cubes.extract('sea_water_potential_temperature')
    salinity, = cubes.extract('sea_water_practical_salinity')

    # Extract profiles of temperature and salinity from a particular point in
    # the southern portion of the domain, and limit the depth of the profile
    # to 1000m.
    lon_cons = iris.Constraint(longitude=330.5)
    lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
    depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
    theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
    salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)

    # Plot these profiles on the same set of axes. In each case we call plot
    # with two arguments, the cube followed by the depth coordinate. Putting
    # them in this order places the depth coordinate on the y-axis.
    # The first plot is in the default axes. We'll use the same color for the
    # curve and its axes/tick labels.
    plt.figure(figsize=(5, 6))
    temperature_color = (.3, .4, .5)
    ax1 = plt.gca()
    iplt.plot(theta_1000m, theta_1000m.coord('depth'), linewidth=2,
              color=temperature_color, alpha=.75)
    ax1.set_xlabel('Potential Temperature / K', color=temperature_color)
    ax1.set_ylabel('Depth / m')
    for ticklabel in ax1.get_xticklabels():
        ticklabel.set_color(temperature_color)
    # To plot salinity in the same axes we use twiny(). We'll use a different
    # color to identify salinity.
    salinity_color = (.6, .1, .15)
    ax2 = plt.gca().twiny()
    iplt.plot(salinity_1000m, salinity_1000m.coord('depth'), linewidth=2,
              color=salinity_color, alpha=.75)
    ax2.set_xlabel('Salinity / PSU', color=salinity_color)
    for ticklabel in ax2.get_xticklabels():
        ticklabel.set_color(salinity_color)
    plt.tight_layout()
    iplt.show()

    # Now plot a T-S diagram using scatter. We'll use all the profiles here,
    # and each point will be coloured according to its depth.
    plt.figure(figsize=(6, 6))
    depth_values = theta.coord('depth').points
    for s, t in iris.iterate.izip(salinity, theta, coords='depth'):
        iplt.scatter(s, t, c=depth_values, marker='+', cmap='RdYlBu_r')
    ax = plt.gca()
    ax.set_xlabel('Salinity / PSU')
    ax.set_ylabel('Potential Temperature / K')
    cb = plt.colorbar(orientation='horizontal')
    cb.set_label('Depth / m')
    plt.tight_layout()
    iplt.show()
Beispiel #5
0
def plot_profile(c):
    coord = c.coord('sea_surface_height_above_reference_ellipsoid')
    lon = c.coord(axis='X').points.squeeze()
    lat = c.coord(axis='Y').points.squeeze()
    depth = coord.points.min()
    
    fig, ax = plt.subplots(figsize=(5, 6))
    kw = dict(linewidth=2,  color=(.3, .4, .5),
              alpha=0.75, marker='o', label='iris')
    iplt.plot(c, coord, **kw)
    ax.grid()
    ax.set_ylabel('{} ({})'.format(coord.standard_name, coord.units))
    ax.set_xlabel('{} ({})'.format(c.name(), c.units))
    ax.set_title('lon: %s\nlat: %s\nMax depth = %s' % (lon, lat, depth))
    return fig, ax
Beispiel #6
0
def rolling_window(lat, lon, period, aggregator):
    src_data = iris.load_cube(os.path.join(DATA_DIR, DATA_FILE))
    location = [('latitude', lat), ('longitude', lon)]
    time_series = iris.analysis.interpolate.linear(src_data, location)

    # Get rid of the time bounds to suppress the warning from
    #`rolling_window()`.
    time_series.coord('time').bounds = None

    time_series_windowed = time_series.rolling_window('time', aggregator,
                                                      period)

    plt.figure(figsize=(3, 2))
    iplt.plot(time_series, color='0.7', label='no filter')
    iplt.plot(time_series_windowed, color='b',
              label='{}-year filter'.format(period))
    plt.gca().xaxis.set_major_locator(mdates.YearLocator(50))
Beispiel #7
0
    def test_simple(self):
        lon, lat = self.lon_lat_coords([359, 1], [0, 0])
        expected_path = Path([[-1, 0],
                              [1, 0]],
                             [Path.MOVETO, Path.LINETO])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines, plt.gca())
Beispiel #8
0
    def test_long(self):
        lon, lat = self.lon_lat_coords([271, 89], [0, 0])
        expected_path = Path([[-89, 0],
                              [89, 0]],
                             [Path.MOVETO, Path.LINETO])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines, plt.gca())
Beispiel #9
0
    def test_180(self):
        lon, lat = self.lon_lat_coords([179, -179], [0, 0])
        expected_path = Path([[179, 0],
                              [181, 0]],
                             [Path.MOVETO, Path.LINETO])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines, plt.gca())
Beispiel #10
0
 def test_360_day_calendar(self):
     n = 360
     calendar = '360_day'
     time_unit = Unit('days since 1970-01-01 00:00', calendar=calendar)
     time_coord = AuxCoord(np.arange(n), 'time', units=time_unit)
     expected_ydata = np.array([CalendarDateTime(time_unit.num2date(point),
                                                 calendar)
                                for point in time_coord.points])
     line1, = iplt.plot(time_coord)
     result_ydata = line1.get_ydata()
     self.assertArrayEqual(expected_ydata, result_ydata)
Beispiel #11
0
def plot(cube, *args, **kwargs):
    """
    Draws a labelled line plot based on the given Cube.
    
    See :func:`iris.plot.plot` for details of valid keyword arguments.
    
    """
    coords = kwargs.get('coords')
    result = iplt.plot(cube, *args, **kwargs)
    _label_with_points(cube, ndims=1, coords=coords)
    return result
Beispiel #12
0
    def test_shifted_projection_180(self):
        lon, lat = self.lon_lat_coords([179, -179], [0, 0])
        expected_path = Path([[179, 0],
                              [181, 0]],
                             [Path.MOVETO, Path.LINETO])

        shifted_plate_carree = ccrs.PlateCarree(180)

        plt.axes(projection=shifted_plate_carree)
        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines, plt.gca())
Beispiel #13
0
def plot(*args, **kwargs):
    """
    Draws a labelled line plot based on the given cube(s) or
    coordinate(s).

    See :func:`iris.plot.plot` for details of valid arguments and
    keyword arguments.

    """
    result = iplt.plot(*args, **kwargs)
    _label_1d_plot(*args)
    return result
def main():
    fname = iris.sample_data_path('/nfs/a266/data/CMIP5_AFRICA/BC_0.5x0.5/IPSL-CM5A-LR/historical/tasmax_WFDEI_1979-2013_0.5x0.5_day_IPSL-CM5A-LR_africa_historical_r1i1p1_full.nc')
    soi = iris.load_cube(fname)

    # Window length for filters.
    window = 121

    # Construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly.
    wgts24 = low_pass_weights(window, 1. / 24.)
    wgts84 = low_pass_weights(window, 1. / 84.)
    soi24 = soi.rolling_window('time',
                               iris.analysis.SUM,
                               len(wgts24),
                               weights=wgts24)
    soi84 =  soi.rolling_window('time',
                                iris.analysis.SUM,
                                len(wgts84),
                                weights=wgts84)

    # Plot the SOI time series and both filtered versions.
    plt.figure(figsize=(9, 4))
    iplt.plot(soi, color='0.7', linewidth=1., linestyle='-',alpha=1., label='no filter')
    iplt.plot(soi24, color='b', linewidth=2., linestyle='-',alpha=.7, label='2-year filter')
    iplt.plot(soi84, color='r', linewidth=2., linestyle='-',alpha=.7, label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('West Africa')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    iplt.show()
def plot_data(ax,
              sh_cube,
              nh_cube,
              experiment,
              previous_experiments,
              basin,
              crit=False,
              plot_type='comparison'):
    """Plot the various wind stress metrics."""

    plt.sca(ax)

    if plot_type == 'comparison':
        data = nh_cube / sh_cube
        iplt.plot(data, color=experiment_colors[experiment], label=experiment)
        ylabel = 'NH / SH'
    else:
        iplt.plot(nh_cube,
                  color=experiment_colors[experiment],
                  label=experiment + ', NH')
        iplt.plot(sh_cube,
                  color=experiment_colors[experiment],
                  label=experiment + ', SH',
                  linestyle='--')
        ylabel = str(nh_cube.units)

    if not experiment in previous_experiments:
        plt.legend(ncol=2)
    plt.ylabel(ylabel)
    plt.xlabel('Year')

    if crit:
        plt.title('Critical latitude (15N/S), %s' % (basin))
    else:
        plt.title('Tropics (0-30N/S), %s' % (basin))
    def test_multi(self):
        lon, lat = self.lon_lat_coords([1, 359, 2, 358], [0, 0, 0, 0])
        expected_path = Path([[1, 0],
                              [-1, 0],
                              [2, 0],
                              [-2, 0]],
                             [Path.MOVETO, Path.LINETO, Path.LINETO,
                              Path.LINETO])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines,
                         plt.gca())
def climatology_plot(cube_dict, gs, plotnum, area_scaled=False):
    """Plot the climatology """

    ax = plt.subplot(gs[plotnum])
    plt.sca(ax)

    for var in plot_order:
        if cube_dict[var]:
            climatology_cube = cube_dict[var].collapsed(
                'time', iris.analysis.MEAN)
            label, color, style = line_characteristics[var]
            iplt.plot(climatology_cube,
                      label=label,
                      color=color,
                      linestyle=style)
    ax.axhline(y=0, color='0.5', linestyle='--', linewidth=0.5)
    ax.legend()
    ax.set_title('climatology')
    if area_scaled:
        ax.set_ylabel('$mm \: day^{-1} \: m^2$')
    else:
        ax.set_ylabel('$mm \: day^{-1}$')
Beispiel #18
0
def plotter(eof,pc,var,num):
    # Plot the leading EOF expressed as correlation in the Pacific domain.
    plt.figure()
    clevs = np.linspace(-1, 1, 11)
    ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=190))
    fill = iplt.contourf(eof, clevs, cmap=plt.cm.RdBu_r)
    ax.add_feature(cfeature.LAND, zorder=100, edgecolor='k')
    cb = plt.colorbar(fill, orientation='horizontal')
    cb.set_label('correlation coefficient', fontsize=12)
    variance = str("%.2f" % var)
    ax.set_title('EOF'+str(num)+' ('+variance+'%) expressed as correlation', fontsize=16)
    plt.savefig('EOF'+str(num)+'.eps')
    # Plot the leading PC time series.
    plt.figure()
    iplt.plot(pc, color='b', linewidth=2)
    ax = plt.gca()
    ax.axhline(0, color='k')
    ax.set_ylim(-20, 20)
    ax.set_xlabel('Year')
    ax.set_ylabel('Normalized Units')
    ax.set_title('PC'+str(num)+' Time Series', fontsize=16)
    plt.savefig('PC'+str(num)+'.eps')
Beispiel #19
0
    def test_many_wraps(self):
        lon, lat = self.lon_lat_coords([350, 10, 180, 350, 10, 180, 10, 350],
                                       [0, 0, 0, 0, 0, 0, 0, 0])
        expected_path = Path(
            [[350, 0], [370, 0], [540, 0], [710, 0], [730, 0], [900, 0],
             [730, 0], [710, 0]], [
                 Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
                 Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO
             ])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines, plt.gca())
def main(inargs):
    """Run the program."""

    fig = plt.figure(figsize=[10, 10])
    for infile in inargs.infiles:
        cube = iris.load_cube(infile)
        experiment, model, region, mip = get_file_info(infile)

        color = experiment_colors[experiment]
        style = region_styles[region]
        iplt.plot(cube,
                  color=color,
                  linestyle=style,
                  label=experiment + ', ' + region)
        plt.title(model + ', ' + mip[0:2])

    plt.legend(loc=2)
    plt.ylabel('Aerosol optical depth at 550nm')
    plt.xlabel('Year')
    plt.savefig(inargs.outfile, bbox_inches='tight')
    gio.write_metadata(inargs.outfile,
                       file_info={infile: cube.attributes['history']})
Beispiel #21
0
def plot(*args, **kwargs):
    """
    Draws a labelled line plot based on the given cube(s) or
    coordinate(s).

    See :func:`iris.plot.plot` for details of valid arguments and
    keyword arguments.

    """
    axes = kwargs.get('axes')
    result = iplt.plot(*args, **kwargs)
    _label_1d_plot(*args, axes=axes)
    return result
Beispiel #22
0
 def test_360_day_calendar(self):
     n = 360
     calendar = '360_day'
     time_unit = Unit('days since 1970-01-01 00:00', calendar=calendar)
     time_coord = AuxCoord(np.arange(n), 'time', units=time_unit)
     times = [time_unit.num2date(point) for point in time_coord.points]
     times = [netcdftime.datetime(atime.year, atime.month, atime.day,
                                  atime.hour, atime.minute, atime.second)
              for atime in times]
     expected_ydata = np.array([CalendarDateTime(time, calendar)
                                for time in times])
     line1, = iplt.plot(time_coord)
     result_ydata = line1.get_ydata()
     self.assertArrayEqual(expected_ydata, result_ydata)
Beispiel #23
0
def main():
    # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
    fname = iris.sample_data_path("SOI_Darwin.nc")
    soi = iris.load_cube(fname)

    # Window length for filters.
    window = 121

    # Construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly.
    wgts24 = low_pass_weights(window, 1.0 / 24.0)
    wgts84 = low_pass_weights(window, 1.0 / 84.0)

    # Apply each filter using the rolling_window method used with the weights
    # keyword argument. A weighted sum is required because the magnitude of
    # the weights are just as important as their relative sizes.
    soi24 = soi.rolling_window("time",
                               iris.analysis.SUM,
                               len(wgts24),
                               weights=wgts24)
    soi84 = soi.rolling_window("time",
                               iris.analysis.SUM,
                               len(wgts84),
                               weights=wgts84)

    # Plot the SOI time series and both filtered versions.
    plt.figure(figsize=(9, 4))
    iplt.plot(
        soi,
        color="0.7",
        linewidth=1.0,
        linestyle="-",
        alpha=1.0,
        label="no filter",
    )
    iplt.plot(
        soi24,
        color="b",
        linewidth=2.0,
        linestyle="-",
        alpha=0.7,
        label="2-year filter",
    )
    iplt.plot(
        soi84,
        color="r",
        linewidth=2.0,
        linestyle="-",
        alpha=0.7,
        label="7-year filter",
    )
    plt.ylim([-4, 4])
    plt.title("Southern Oscillation Index (Darwin Only)")
    plt.xlabel("Time")
    plt.ylabel("SOI")
    plt.legend(fontsize=10)
    iplt.show()
Beispiel #24
0
def main(cubes):
    """
    """
    # Calulate N^2
    theta = convert.calc('air_potential_temperature', cubes)
    nsq = variable.N_sq(theta)

    # Find the tropopause
    ztrop, fold_t, fold_b = tropopause.height(cubes)

    # Mask ridges and troughs
    ridges, troughs = tropopause.ridges_troughs(cubes)

    # Create profile of N_sq vs tropopause
    for name, mask in [('troughs', ridges), ('ridges', troughs)]:
        cube = diagnostics.profile(nsq, ztrop, dz, mask=mask)[0]
        iplt.plot(cube, cube.coords()[0], label=name)

    plt.axhline(color='k')
    plt.xlabel(r'$N^2$ $s^{-1}$')
    plt.ylabel('Distance from the tropopause')
    plt.legend(loc='best')
    plt.title('Tropopause PV = %.1f' % pvtrop)
    plt.show()
Beispiel #25
0
def plot_zon_mean(field, title, level=-1):
    orig_f = '../output/exp_year64bits/yyyymmddhh.nc'
    new_f = '../output/exp_year16bits/yyyymmddhh.nc'

    orig = i.load(orig_f, field)[0]
    new = i.load(new_f, field)[0]

    # First month (744 hours) is discarded as spin-up
    if level is -1:
        orig_zon = orig.extract(i.Constraint(time=lambda t: t > 744))\
            .collapsed(['longitude', 'time'], i.analysis.MEAN)
        new_zon  = new.extract(i.Constraint(time=lambda t: t > 744))\
            .collapsed(['longitude', 'time'], i.analysis.MEAN)
    else:
        orig_zon = orig.extract(i.Constraint(generic=level))\
            .extract(i.Constraint(time=lambda t: t > 744))\
            .collapsed(['longitude', 'time'], i.analysis.MEAN)
        new_zon  = new.extract(i.Constraint(generic=level))\
            .extract(i.Constraint(time=lambda t: t > 744))\
            .collapsed(['longitude', 'time'], i.analysis.MEAN)

    fig = plt.figure()

    orig_h, = iplt.plot(orig_zon)
    new_h, = iplt.plot(new_zon)

    leg = plt.legend([orig_h, new_h], ['64 bits', '16 bits'], frameon=True)
    rect = leg.get_frame()
    rect.set_linewidth(0.0)
    rect.set_alpha(0.7)

    plt.xlabel('Latitude')
    plt.xlim([-90, 90])

    plt.title(field)
    plt.savefig('../figs/' + title + '.pdf')
Beispiel #26
0
def main():
    # Enable a future option, to ensure that the netcdf load works the same way
    # as in future Iris versions.
    iris.FUTURE.netcdf_promote = True

    # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
    fname = iris.sample_data_path('SOI_Darwin.nc')
    soi = iris.load_cube(fname)

    # Window length for filters.
    window = 121

    # Construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly.
    wgts24 = low_pass_weights(window, 1. / 24.)
    wgts84 = low_pass_weights(window, 1. / 84.)

    # Apply each filter using the rolling_window method used with the weights
    # keyword argument. A weighted sum is required because the magnitude of
    # the weights are just as important as their relative sizes.
    soi24 = soi.rolling_window('time',
                               iris.analysis.SUM,
                               len(wgts24),
                               weights=wgts24)
    soi84 = soi.rolling_window('time',
                               iris.analysis.SUM,
                               len(wgts84),
                               weights=wgts84)

    # Plot the SOI time series and both filtered versions.
    plt.figure(figsize=(9, 4))
    iplt.plot(soi,
              color='0.7',
              linewidth=1.,
              linestyle='-',
              alpha=1.,
              label='no filter')
    iplt.plot(soi24,
              color='b',
              linewidth=2.,
              linestyle='-',
              alpha=.7,
              label='2-year filter')
    iplt.plot(soi84,
              color='r',
              linewidth=2.,
              linestyle='-',
              alpha=.7,
              label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('Southern Oscillation Index (Darwin Only)')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    iplt.show()
Beispiel #27
0
def main():

    # load the monthly-valued Southern Oscillation Index (SOI) time-series
    fname = iris.sample_data_path('SOI_Darwin.nc')
    soi = iris.load_cube(fname)

    # window length for filters
    window = 121

    # construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly
    wgts24 = low_pass_weights(window, 1. / 24.)  #
    wgts84 = low_pass_weights(window, 1. / 84.)

    # apply the filters using the rolling_window method with the weights
    # keyword argument
    soi24 = soi.rolling_window('time',
                               iris.analysis.MEAN,
                               len(wgts24),
                               weights=wgts24)
    soi84 = soi.rolling_window('time',
                               iris.analysis.MEAN,
                               len(wgts84),
                               weights=wgts84)

    # plot the SOI time series and both filtered versions
    fig = plt.figure(figsize=(9, 4))
    iplt.plot(soi,
              coords=['time'],
              color='0.7',
              linewidth=1.,
              linestyle='-',
              alpha=1.,
              label='no filter')
    iplt.plot(soi24,
              coords=['time'],
              color='b',
              linewidth=2.,
              linestyle='-',
              alpha=.7,
              label='2-year filter')
    iplt.plot(soi84,
              coords=['time'],
              color='r',
              linewidth=2.,
              linestyle='-',
              alpha=.7,
              label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('Southern Oscillation Index (Darwin Only)')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    plt.show()
Beispiel #28
0
    def test_many_wraps(self):
        lon, lat = self.lon_lat_coords([350, 10, 180, 350, 10, 180, 10, 350],
                                       [0, 0, 0, 0, 0, 0, 0, 0])
        expected_path = Path([[350, 0],
                              [370, 0],
                              [540, 0],
                              [710, 0],
                              [730, 0],
                              [900, 0],
                              [730, 0],
                              [710, 0]],
                             [Path.MOVETO, Path.LINETO, Path.LINETO,
                              Path.LINETO, Path.LINETO, Path.LINETO,
                              Path.LINETO, Path.LINETO])

        lines = iplt.plot(lon, lat)

        self.check_paths(expected_path, self.plate_carree, lines,
                         plt.gca())
Beispiel #29
0
    def test_360_day_calendar(self):
        n = 360
        calendar = "360_day"
        time_unit = Unit("days since 1970-01-01 00:00", calendar=calendar)
        time_coord = AuxCoord(np.arange(n), "time", units=time_unit)
        times = [time_unit.num2date(point) for point in time_coord.points]
        times = [
            cftime.datetime(
                atime.year,
                atime.month,
                atime.day,
                atime.hour,
                atime.minute,
                atime.second,
                calendar=calendar,
            ) for atime in times
        ]

        expected_ydata = times
        (line1, ) = iplt.plot(time_coord)
        result_ydata = line1.get_ydata()
        self.assertArrayEqual(expected_ydata, result_ydata)
    def _test_rotated(self, grid_north_pole_latitude=90,
                      grid_north_pole_longitude=0,
                      north_pole_grid_longitude=0):
        cs = ics.RotatedGeogCS(grid_north_pole_latitude,
                               grid_north_pole_longitude,
                               north_pole_grid_longitude)
        glon = coords.AuxCoord([359, 1], 'grid_longitude', units='degrees',
                               coord_system=cs)
        glat = coords.AuxCoord([0, 0], 'grid_latitude', units='degrees',
                               coord_system=cs)
        expected_path = Path([[-1, 0],
                              [1, 0]],
                             [Path.MOVETO, Path.LINETO])

        plt.figure()
        lines = iplt.plot(glon, glat)
        # Matplotlib won't immediately set up the correct transform to allow us
        # to compare paths. Calling set_global(), which calls set_xlim() and
        # set_ylim(), will trigger Matplotlib to set up the transform.
        ax = plt.gca()
        ax.set_global()

        crs = cs.as_cartopy_crs()
        self.check_paths(expected_path, crs, lines, ax)
Beispiel #31
0
def main():

    # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
    fname = iris.sample_data_path('SOI_Darwin.nc')
    soi = iris.load_cube(fname)

    # Window length for filters.
    window = 121

    # Construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly.
    wgts24 = low_pass_weights(window, 1. / 24.)  # 
    wgts84 = low_pass_weights(window, 1. / 84.)

    # Apply each filter using the rolling_window method used with the weights
    # keyword argument. Note that the application of this type of filter really
    # requires a weighted sum. Currently iris lacks a weighted sum operator, so
    # we use the weighted mean and multiply the result by the sum of the filter
    # weights as a work-around. In this example the sum of the weights is
    # approximately 1, but in other filtering examples this may not be true.
    soi24 = soi.rolling_window('time',
                               iris.analysis.MEAN,
                               len(wgts24),
                               weights=wgts24) * wgts24.sum()
    soi84 =  soi.rolling_window('time',
                                iris.analysis.MEAN,
                                len(wgts84),
                                weights=wgts84) * wgts84.sum()

    # Plot the SOI time series and both filtered versions.
    plt.figure(figsize=(9, 4))
    iplt.plot(soi, color='0.7', linewidth=1., linestyle='-',
              alpha=1., label='no filter')
    iplt.plot(soi24, color='b', linewidth=2., linestyle='-',
              alpha=.7, label='2-year filter')
    iplt.plot(soi84, color='r', linewidth=2., linestyle='-',
              alpha=.7, label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('Southern Oscillation Index (Darwin Only)')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    plt.show()
Beispiel #32
0
def main():
    # Enable a future option, to ensure that the netcdf load works the same way
    # as in future Iris versions.
    iris.FUTURE.netcdf_promote = True

    # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
    fname = iris.sample_data_path('SOI_Darwin.nc')
    soi = iris.load_cube(fname)

    # Window length for filters.
    window = 121

    # Construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly.
    wgts24 = low_pass_weights(window, 1. / 24.)
    wgts84 = low_pass_weights(window, 1. / 84.)

    # Apply each filter using the rolling_window method used with the weights
    # keyword argument. A weighted sum is required because the magnitude of
    # the weights are just as important as their relative sizes.
    soi24 = soi.rolling_window('time',
                               iris.analysis.SUM,
                               len(wgts24),
                               weights=wgts24)
    soi84 = soi.rolling_window('time',
                               iris.analysis.SUM,
                               len(wgts84),
                               weights=wgts84)

    # Plot the SOI time series and both filtered versions.
    plt.figure(figsize=(9, 4))
    iplt.plot(soi, color='0.7', linewidth=1., linestyle='-',
              alpha=1., label='no filter')
    iplt.plot(soi24, color='b', linewidth=2., linestyle='-',
              alpha=.7, label='2-year filter')
    iplt.plot(soi84, color='r', linewidth=2., linestyle='-',
              alpha=.7, label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('Southern Oscillation Index (Darwin Only)')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    iplt.show()
def main():

    # load the monthly-valued Southern Oscillation Index (SOI) time-series
    fname = iris.sample_data_path('SOI_Darwin.nc')
    soi = iris.load_cube(fname)

    # window length for filters
    window = 121

    # construct 2-year (24-month) and 7-year (84-month) low pass filters
    # for the SOI data which is monthly
    wgts24 = low_pass_weights(window, 1. / 24.)  # 
    wgts84 = low_pass_weights(window, 1. / 84.)

    # apply the filters using the rolling_window method with the weights
    # keyword argument
    soi24 = soi.rolling_window('time',
                               iris.analysis.MEAN,
                               len(wgts24),
                               weights=wgts24)
    soi84 =  soi.rolling_window('time',
                                iris.analysis.MEAN,
                                len(wgts84),
                                weights=wgts84)

    # plot the SOI time series and both filtered versions
    fig = plt.figure(figsize=(9, 4))
    iplt.plot(soi, coords=['time'], color='0.7', linewidth=1., linestyle='-',
              alpha=1., label='no filter')
    iplt.plot(soi24, coords=['time'], color='b', linewidth=2., linestyle='-',
              alpha=.7, label='2-year filter')
    iplt.plot(soi84, coords=['time'], color='r', linewidth=2., linestyle='-',
              alpha=.7, label='7-year filter')
    plt.ylim([-4, 4])
    plt.title('Southern Oscillation Index (Darwin Only)')
    plt.xlabel('Time')
    plt.ylabel('SOI')
    plt.legend(fontsize=10)
    plt.show()
Beispiel #34
0
def multiline(cubelist,
              xcoord=None,
              ycoord=None,
              legend=True,
              with_units=False,
              **kwargs):
    """Plot multiple cubes on the same axis

    Args:
        cubelist (iris.cube.CubeList): A list of 1 dimensional cube

        xcoord:

        ycoord:

        legend:

        with_units:
    """
    for cube in cubelist:
        # Add the cube label to the arguments for each plot
        kwargs['label'] = qplt._title(cube, with_units)

        # Determine which way to plot coord vs cube
        if xcoord is None:
            if ycoord is None:
                iplt.plot(cube, **kwargs)
            else:
                iplt.plot(cube, ycoord, **kwargs)
        else:
            iplt.plot(cube, xcoord, **kwargs)

    # Create a second figure containing the legend
    ax = plt.gca()
    if legend is True:
        legend = plt.figure()
        plt.figlegend(*ax.get_legend_handles_labels(), loc='upper left')

    return ax, legend
Beispiel #35
0
def plot_inferred_data(primary_data, secondary_data, y_axis_name, y_var_name,
                       y_standard_name, y_long_name, quantity):
    """Plot the inferred data.

    inferred = primary + secondary (for quantity = OHC)
    OR
    inferred = primary - secondary (for HTC and SFL)

    """

    primary_data.coord(y_axis_name).var_name = y_var_name
    primary_data.coord(y_axis_name).standard_name = y_standard_name
    primary_data.coord(y_axis_name).long_name = y_long_name
    primary_data.units = ''
    regridded_secondary_data = grids.regrid_1D(secondary_data,
                                               primary_data,
                                               y_axis_name,
                                               clear_units=True)

    if 'OHC' in quantity:
        label = 'inferred ' + quantity + ' (= HTC + SFL)'
        iplt.plot(primary_data + regridded_secondary_data,
                  label=label,
                  color='black',
                  linestyle='--')
    elif 'HTC' in quantity:
        label = 'inferred ' + quantity + ' (= OHC - SFL)'
        iplt.plot(primary_data - regridded_secondary_data,
                  label=label,
                  color='green',
                  linestyle='--')
    elif 'SFL' in quantity:
        label = 'inferred ' + quantity + ' (= OHC - HTC)'
        iplt.plot(primary_data - regridded_secondary_data,
                  label=label,
                  color='orange',
                  linestyle='--')
Beispiel #36
0
            colours[1].append(grn)
    
    plt.figure(figsize=(10, 8)) # JB impose size to make time labels readable
    
    for i in xrange(levs):
        # three theta levels par case, also subject to change
        theta = tls[case] + 5*i
        # theta levels are spaced by 5
        cubes = iris.load(save_dir + folder + '/circulation/circulations_' + str(theta) + 'K_' + strapp + '.nc')
    
        n = 1
        m = 0
        plt.subplot(2, 2, n) # JB for this specific case with 4 plots
        for cube in cubes:
            if 'circulation' in cube.name(): # the '_' before can be added to remove the line integral
                iplt.plot(cube, label=cube.name(), color = colours[m][i], linewidth = 2.5)
                m += 1

        pg = plt.gca()
        fmt = DateFormatter('\n%m/%d')                          # <--- change this bit to get different formatting of dates/times
        fmt2 = DateFormatter('%H')
        majorLocator = DayLocator(interval=1)
        minorLocator = HourLocator(range(0, 24, 6))
        pg.xaxis.set_major_formatter(fmt)
        pg.xaxis.set_minor_formatter(fmt2)
        pg.xaxis.set_minor_locator(minorLocator)
        pg.xaxis.set_major_locator(majorLocator)

        plt.legend(ncol=2, loc=3, bbox_to_anchor=(0., 1.07, .6, .102))
        # plt.savefig(plotdir + 'circulation_' + theta + 'K.png')
        #plt.title(folder + '_' + strapp)
Beispiel #37
0
def humidity_gradients():
    # Initialise the plot
    fig = plt.figure(figsize=(18, 15))

    # Columns are Ridges and troughs
    for n, variable in enumerate(variables):
        row = n / ncols
        col = n - row * ncols
        print(row, col)
        ax = plt.subplot2grid((nrows, ncols), (row, col))
        for subdomain, linestyle in [('ridges', '-'), ('troughs', '--')]:
            cubes = second_analysis.get_data(coord, subdomain)

            cube = convert.calc(variable, cubes)
            cube.coord(coord).convert_units('km')
            mean, std_err = second_analysis.extract_statistics(
                cube, 'forecast_index')

            if variable == 'vertical_vorticity':
                mean.data = mean.data + 1e-4
            else:
                mean.data = mean.data * 1e3
                std_err.data = std_err.data * 1e3

            iplt.plot(
                mean[0],
                mean.coord(coord),  # xerr=std_err[0],
                linestyle=linestyle,
                label=subdomain.capitalize(),
                color='k',
                marker='x',
                ms=5)

            ax.set_ylabel('')
            ax.set_ylim(-2, 2)
            if col > 0:
                ax.get_yaxis().set_ticklabels([])

        if variable == 'specific_humidity':
            ax.set_title('Specific Humidity')
            ax.set_xlabel(r'Mass Fraction (g kg$^{-1}$)')
        elif variable == 'vertical_vorticity':
            ax.set_title('Vertical Vorticity')
            ax.set_xlabel(r'Vorticity (s$^{-1}$)')
        elif variable == 'mass_fraction_of_cloud_liquid_water_in_air':
            ax.set_title('Cloud Liquid')
            ax.set_xlabel(r'Mass Fraction (g kg$^{-1}$)')
        elif variable == 'mass_fraction_of_cloud_ice_in_air':
            ax.set_title('Cloud Ice')
            ax.set_xlabel(r'Mass Fraction (g kg$^{-1}$)')

        plt.axhline(color='k')
        multilabel(ax, n)

    legend(ax=fig.axes[0], loc='best')
    fig.text(0.075,
             0.5,
             'Vertical distance from tropopause (km)',
             va='center',
             rotation='vertical')

    plt.savefig(plotdir + 'analysis_profiles.pdf')
    plt.show()
Beispiel #38
0
 def test_xaxis_labels(self):
     iplt.plot(self.cube.coord('str_coord'), self.cube)
     self.assertPointsTickLabels('xaxis')
def main():
    # Load the gridded temperature and salinity data.
    fname = iris.sample_data_path("atlantic_profiles.nc")
    cubes = iris.load(fname)
    (theta, ) = cubes.extract("sea_water_potential_temperature")
    (salinity, ) = cubes.extract("sea_water_practical_salinity")

    # Extract profiles of temperature and salinity from a particular point in
    # the southern portion of the domain, and limit the depth of the profile
    # to 1000m.
    lon_cons = iris.Constraint(longitude=330.5)
    lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
    depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
    theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
    salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)

    # Plot these profiles on the same set of axes. Depth is automatically
    # recognised as a vertical coordinate and placed on the y-axis.
    # The first plot is in the default axes. We'll use the same color for the
    # curve and its axes/tick labels.
    plt.figure(figsize=(5, 6))
    temperature_color = (0.3, 0.4, 0.5)
    ax1 = plt.gca()
    iplt.plot(
        theta_1000m,
        linewidth=2,
        color=temperature_color,
        alpha=0.75,
    )
    ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
    ax1.set_ylabel("Depth / m")
    for ticklabel in ax1.get_xticklabels():
        ticklabel.set_color(temperature_color)

    # To plot salinity in the same axes we use twiny(). We'll use a different
    # color to identify salinity.
    salinity_color = (0.6, 0.1, 0.15)
    ax2 = plt.gca().twiny()
    iplt.plot(
        salinity_1000m,
        linewidth=2,
        color=salinity_color,
        alpha=0.75,
    )
    ax2.set_xlabel("Salinity / PSU", color=salinity_color)
    for ticklabel in ax2.get_xticklabels():
        ticklabel.set_color(salinity_color)
    plt.tight_layout()
    iplt.show()

    # Now plot a T-S diagram using scatter. We'll use all the profiles here,
    # and each point will be coloured according to its depth.
    plt.figure(figsize=(6, 6))
    depth_values = theta.coord("depth").points
    for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
        iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
    ax = plt.gca()
    ax.set_xlabel("Salinity / PSU")
    ax.set_ylabel("Potential Temperature / K")
    cb = plt.colorbar(orientation="horizontal")
    cb.set_label("Depth / m")
    plt.tight_layout()
    iplt.show()
Beispiel #40
0
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip)  # noqa

import matplotlib.pyplot as plt

import iris
import iris.plot as iplt


fname = iris.sample_data_path('air_temp.pp')
temperature = iris.load_cube(fname)

# Take a 1d slice using array style indexing.
temperature_1d = temperature[5, :]

iplt.plot(temperature_1d)
plt.show()
trendcube = copy.deepcopy(data_time_period)
trendcube.rename('Trend')
trendcube.data = line(XDATA, Y_INTERCEPTION, slope)
'''
trendcube_upper = copy.deepcopy(data_time_period)
trendcube_upper.rename('Upper Trend')
trendcube_upper.data=line(XDATA, Y_INTERCEPTION, slope_upper_uncrty)

trendcube_lower = copy.deepcopy(data_time_period)
trendcube_lower.rename('Upper Trend')
trendcube_lower.data=line(XDATA, Y_INTERCEPTION, slope_lower_uncrty)       
'''

plt.close()
fig = plt.figure(figsize=(10, 8))
iplt.plot(data_time_period)
plt.grid()
plt.title(indexname + ' HadEX', size=22)
#plt.ylabel(UNITS_DICT[INAME], size=20)
plt.xlabel('years', size=20)

iplt.plot(trendcube,
          label='trend: ' + str(round(slope * 365 * 10., 2)) + ' per decade')
#iplt.plot(trendcube_lower, label='lower trend: '+str(round(slope*365*10.,2))+' '+UNITS_DICT[INAME]+' per decade')
#iplt.plot(trendcube_upper, label='upper trend: '+str(round(slope*365*10.,2))+' '+UNITS_DICT[INAME]+' per decade')

plt.legend(fontsize=16)
plt.tight_layout()
plt.tick_params(axis='both', which='major', labelsize=16)

#plt.xlim( 728294. - 5*365, 735599.)
Beispiel #42
0
def plot_timeseries(workdir, timeseriesfile='Time_series_grid1.txt'):
    '''
    Plotting routine for VAAC resuspended ash runs
    Plot air concentration time series at Icelandic monitoring stations    
    '''

    # Name of input file including full path
    filename = workdir + '/' + timeseriesfile

    # Make font size of axis ticks smaller
    plt.rc('xtick', labelsize=6)
    plt.rc('ytick', labelsize=8)

    # set size of whole figure
    fig = plt.figure(figsize=(8, 10))

    # Add Met Office logo
    fig.figimage(im, 0, 920)
    figtext(0.35, 0.02, r'Met Office Crown Copyright')

    # Load data into iris
    name = iris.load(filename)

    # List of titles for the plots
    Titles = [
        'Hvolsvollur', 'Heimaland', 'Vik', 'Hvaleyrarholt', 'Reykjavik City',
        'Reykjavik', 'Keflavik'
    ]

    #	  #	#     #     #	  #	#     #     #	  #
    # Read through fields and take the ones we want to plot
    #	  #	#     #     #	  #	#     #     #	  #

    length = len(name)
    List = np.arange(length)
    timeseries = [None] * length

    # 0 = Hvolsvollur
    # 1 = Heimaland
    # 2 = Vik
    # 3 = Hvaleyrarholt
    # 4 = ReykjavikCity
    # 5 = Reykjavik -- separate figure
    # 6 = Keflavik  -- separate figure

    for line in List:
        field = name[line].attributes['Location']
        if field == 'Hvolsvollur':
            timeseries[0] = name[line]
        elif field == 'Heimaland':
            timeseries[1] = name[line]
        elif field == 'Vik':
            timeseries[2] = name[line]
        elif field == 'Hvaleyrarholt':
            timeseries[3] = name[line]
        elif field == 'ReykjavikCity':
            timeseries[4] = name[line]
        elif field == 'Reykjavik':
            timeseries[5] = name[line]
        elif field == 'Keflavik':
            timeseries[6] = name[line]

    List = np.arange(5)  # first 5 stations, i.e. not airports

    for line in List:

        plt.subplot(3, 2, len(List) - line)
        ax1 = plt.gca()

        iplt.plot(timeseries[line])  # plot data

        # set logarithmic yscale.
        ax1.set_yscale('symlog', linthreshy=1e-20)

        #set fixed yscale limits
        ax1.set_ylim(1e-08, 1e-04)

        # Add 5% of white space at the top and bottom of the graph
        # (so plotted lines aren't obscured by the axis)
        ax1.margins(0, 0.05)

        # format time axis
        adl = md.AutoDateLocator()
        formatter = md.AutoDateFormatter(adl)
        formatter.scaled[1. / (24. * 60)] = '%H:%M'
        formatter.scaled[1. / 24.] = '%HZ\n%d/%m'
        formatter.scaled[1.] = '%d/%m/%y'
        ax1.xaxis.set_major_locator(adl)
        ax1.xaxis.set_major_formatter(formatter)

        # set minor ticks every 3 hours
        hours = md.HourLocator(interval=3)  # every 3 hour
        ax1.xaxis.set_minor_locator(hours)

        plt.title(Titles[line], fontsize=10)

        # Ylabel for the subplot on the left hand side
        if (line == 0) or (line == 2) or (line == 4):
            plt.ylabel('Relative air concentrations', fontsize=8)

    # Main title
    plt.suptitle(
        'Time series of relative air concentrations at monitoring stations')

    # Add space above and below plots
    plt.subplots_adjust(hspace=0.7)

    # Or Save
    plt.savefig(workdir + '/' + 'RESUSPENDED_ASH_time_series.png')
    print 'Saved figure:', workdir + '/' + 'RESUSPENDED_ASH_time_series.png'

    # close figure
    plt.close()

    #---------------------------------------------------------
    # Second figure with time series at airports
    # -------------------------------------------------------------

    # Make font size of axis ticks smaller
    plt.rc('xtick', labelsize=8)
    plt.rc('ytick', labelsize=10)

    # set size of whole figure
    fig = plt.figure(figsize=(8, 10))

    # Add Met Office logo
    fig.figimage(im, 0, 920)
    figtext(0.35, 0.02, r'Met Office Crown Copyright')

    #List = np.arange(2) # last 2 stations (airports)
    List = [5, 6]  # last 2 stations (airports)

    sp = 0
    for line in List:
        sp += 1

        plt.subplot(2, 1, sp)
        ax1 = plt.gca()

        iplt.plot(timeseries[line])  # plot data

        # set logarithmic yscale.
        ax1.set_yscale('symlog', linthreshy=1e-20)

        #set fixed yscale limits
        ax1.set_ylim(1e-08, 1e-04)

        # Add 5% of white space at the top and bottom of the graph
        # (so plotted lines aren't obscured by the axis)
        ax1.margins(0, 0.05)

        # format time axis
        adl = md.AutoDateLocator()
        formatter = md.AutoDateFormatter(adl)
        formatter.scaled[1. / (24. * 60)] = '%H:%M'
        formatter.scaled[1. / 24.] = '%HZ\n%d/%m'
        formatter.scaled[1.] = '%d/%m/%y'
        ax1.xaxis.set_major_locator(adl)
        ax1.xaxis.set_major_formatter(formatter)

        # set minor ticks every 3 hours
        hours = md.HourLocator(interval=3)  # every 3 hour
        ax1.xaxis.set_minor_locator(hours)

        plt.title(Titles[line], fontsize=12)
        plt.ylabel('Relative air concentrations', fontsize=10)

    # Main title
    plt.suptitle('Time series of relative air concentrations at Airports')

    # Add space above and below plots
    plt.subplots_adjust(hspace=0.7)

    # Or Save
    plt.savefig(workdir + '/' + 'RESUSPENDED_ASH_time_series_airport.png')
    print 'Saved figure:', workdir + '/' + 'RESUSPENDED_ASH_time_series_airport.png'

    # close figure
    plt.close()
Beispiel #43
0
stdv = pc1.collapsed('time', iris.analysis.STD_DEV)
npc1 = iris.analysis.maths.divide(pc1, stdv)

# plot
clev = np.linspace(-50,50,11)
fg = plt.figure(figsize=(8,8))
ax = plt.axes(projection=ccrs.Orthographic(central_longitude=180, central_latitude=90))
cs = iplt.contourf(eof1[0,0], levels=clev, cmap=plt.cm.RdBu_r)
ax.coastlines()
ax.set_global()
ax.set_title('EOF1 ({0:0.2f}%)'.format(100*vfrc.data[0]), fontsize=16)
cb = plt.colorbar(cs, orientation='horizontal')
cb.set_label('covariance')
plt.savefig(fdir+'eof1.pdf', bbox_inches='tight')
fg = plt.figure(figsize=(8,4))
ax = fg.add_subplot(111)
iplt.plot(npc1, color='#e41a1c', linewidth=1)
ax.axhline(0, color='k')
ax.set_title('PC1 (normalized)')
plt.savefig(fdir+'pc1.pdf', bbox_inches='tight')

#====================================================================
# 8.1(c)

project = iris.util.squeeze(slvr.projectField(zanom, neofs=1))
AOIndex = iris.analysis.maths.divide(project, stdv)
AOIndex.long_name = 'Arctic Oscillation index'
AOIndex *= -1

# save AO index
iris.save(AOIndex, ddir+'jra55nl_AOindex_monthly_1958-2015.nc')
Beispiel #44
0
def plot_time_series_with_trend(cube, infos, units_dict):
    ''' Plot a time series with trends for the whole time period, the first period
        from 1991-2004 and the second period from 2005-2015 using the MedianPairwiseSlopes function
        cube should be a cube of the annual values along the time axis
        infos is a list of different infos which can be used for plotting the title or the filename
        units_dict: the unit of the index.'''

    #infos = [TITLE_TIME, INAME, REGION, TIMERANGE, OUTPATH, instrument]

    #extract some information for the title or the filename.
    title_time = infos[0]
    iname = infos[1]
    region = infos[2]
    timerange = infos[3]
    outpath = infos[4]
    instrument = infos[5]
    time_factor = infos[6]

    #The 2006 value is biased due to Dec 14th 2006 where extremely high LST values
    #were measured. This excludes the year 2006 from the plot. But actually it shouldn't be extracted.
    exclude_2006_constraint = iris.Constraint(
        time=lambda c: c.point.year != 2006)
    cube = cube.extract(exclude_2006_constraint)

    #First period where MVIRI was measuring was until end of 2004
    time_constraint1 = iris.Constraint(time=lambda c: c.point.year < 2005)
    cube1 = cube.extract(time_constraint1)

    #Second period where SEVIRI measured began in 2005
    time_constraint2 = iris.Constraint(time=lambda c: c.point.year > 2004)
    cube2 = cube.extract(time_constraint2)

    #compute the trend for the whole time period
    ydata = cube.data
    xdata = cube.coord('time').points
    mdi = ydata.mask
    trendanalysis = MedianPairwiseSlopes(xdata, ydata)
    slope = trendanalysis[0]
    y_interception = trendanalysis[3]
    trendcube = copy.deepcopy(cube)
    trendcube.rename('Trend')
    trendcube.data = line(xdata, y_interception, slope)

    #compute the trend for the first time period
    ydata1 = cube1.data
    xdata1 = cube1.coord('time').points
    mdi1 = ydata1.mask
    trendanalysis1 = MedianPairwiseSlopes(xdata1, ydata1)
    slope1 = trendanalysis1[0]
    y_interception1 = trendanalysis1[3]
    trendcube1 = copy.deepcopy(cube1)
    trendcube1.rename('Trend')
    trendcube1.data = line(xdata1, y_interception1, slope1)

    #compute the trend for the second time period
    ydata2 = cube2.data
    xdata2 = cube2.coord('time').points
    mdi2 = ydata2.mask
    trendanalysis2 = MedianPairwiseSlopes(xdata2, ydata2)
    slope2 = trendanalysis2[0]
    y_interception2 = trendanalysis2[3]
    trendcube2 = copy.deepcopy(cube2)
    trendcube2.rename('Trend')
    trendcube2.data = line(xdata2, y_interception2, slope2)

    #Begin plot#
    plt.close()
    fig = plt.figure(figsize=(10, 8))
    iplt.plot(cube)
    plt.grid()
    plt.title('Time series of ' + title_time + ' ' + instrument + ' ' + iname +
              ' in ' + region,
              size=22)
    plt.ylabel(units_dict[iname], size=20)
    plt.xlabel('years', size=20)

    iplt.plot(trendcube,
              label='trend: ' + str(round(slope * time_factor, 2)) + ' ' +
              units_dict[iname] + ' per decade' + ' (1991-2015)')
    iplt.plot(trendcube1,
              label='trend: ' + str(round(slope1 * time_factor, 2)) + ' ' +
              units_dict[iname] + ' per decade' + ' (1991-2004)')
    iplt.plot(trendcube2,
              label='trend: ' + str(round(slope2 * time_factor, 2)) + ' ' +
              units_dict[iname] + ' per decade' + ' (2005-2015)')

    plt.legend(fontsize=16, loc='best')
    plt.tight_layout()
    plt.tick_params(axis='both', which='major', labelsize=16)
    plt.savefig(outpath + iname + '_with_trend_' + timerange + '_' + region +
                '.png')

    return [
        str(round(slope * time_factor, 2)),
        str(round(slope1 * time_factor, 2)),
        str(round(slope2 * time_factor, 2))
    ]
    def plot(self):
        """
        Produce trajectory plot.

        Returns fig object for further plotting if needed.
        """

        if not self.lines:
            raise ValueError("TrajectoryPlot: no lines have been added")

        if self.fig is None:
            if self.rsmc:
                self.fig = plt.figure(figsize=[12, 6])
                ax = plt.subplot2grid(
                    (3, 3), (0, 0),
                    rowspan=2,
                    colspan=2,
                    projection=ccrs.PlateCarree(central_longitude=self.clon))
            elif self.annote:
                self.fig = plt.figure(figsize=[12, 6])
                ax = plt.subplot2grid(
                    (3, 3), (0, 0),
                    rowspan=2,
                    colspan=2,
                    projection=ccrs.PlateCarree(central_longitude=self.clon))
            else:
                self.fig = plt.figure(figsize=[7, 9])
                ax = plt.subplot2grid(
                    (3, 1), (0, 0),
                    rowspan=2,
                    projection=ccrs.PlateCarree(central_longitude=self.clon))
        ax = plt.gca()

        for line in self.lines:

            add_settings = {}
            if 'add_settings' in line:
                add_settings = line['add_settings']

            style = {
                'label': line['label'],
                'color': line['colour'],
                'linestyle': line['linestyle'],
                'linewidth': line['linewidth'],
                'marker': line['marker']
            }
            style2 = style.copy()
            style2.update(add_settings)

            iplt.plot(line['x'], line['y'], **style2)

            # Add a black square at the trajectory start point
            iplt.scatter(line['x'][0], line['y'][0], color='k', marker='s')

        #Add title and axis labels
        if self.title is not None:
            ax.set_title(self.title)

        # Set the extent
        # Bug in Cartopy Dec'17 - Global extent will not be plotted with
        # extent[0] = 0, extent[1] = 360 So the longitudinal extents are
        # deliberately taken in by 0.1
        if abs(self.extent[1] - self.extent[0]) > 330:
            self.extent[0] = -179.9
            self.extent[1] = 179.9
        ax.set_extent(self.extent, crs=ccrs.PlateCarree())

        # Determine extent of plotting region and use this to
        # select an appropriate mapping zoom
        if abs(self.extent[1] - self.extent[0]) < 15.0:
            res = '10m'
        elif abs(self.extent[1] - self.extent[0]) < 50.0:
            res = '50m'
        else:
            res = '110m'

        if self.mapping == 'countries' or self.mapping == 'states':
            countries = cfeature.NaturalEarthFeature(
                category='cultural',
                name='admin_0_countries_lakes',
                scale=res,
                facecolor='none')

        if self.mapping == 'states':
            states = cfeature.NaturalEarthFeature(
                category='cultural',
                name='admin_1_states_provinces_shp',
                scale=res,
                facecolor='none')

        if self.mapping == 'coastlines':

            ax.coastlines(res)

        elif self.mapping == 'countries':

            ax.coastlines(res, zorder=3)
            ax.add_feature(countries,
                           edgecolor='gray',
                           zorder=2,
                           linewidth=0.5)

        elif self.mapping == 'states':

            ax.coastlines(res, zorder=3)
            ax.add_feature(countries, edgecolor='gray', zorder=2, linewidth=1)
            ax.add_feature(states,
                           edgecolor='lightgray',
                           zorder=2,
                           linewidth=0.5)

        elif self.mapping == 'wms':
            # NOTE WMS mapping does not appear to work for extents
            # greater than 130 degrees in either direction for a
            # typical 6x6 sized map. For smaller maps, the useable
            # WMS extents are smaller.
            # It should also be noted that if the WMS map
            # crosses 180E/W, if the northern or southern
            # edge of the map is on the equator, this will
            # result in the size of page and the placing of
            # the map on the page being altered.
            num_layers = np.linspace(0, 40, 41)
            layers = ['{:.0f}'.format(x) for x in num_layers]
            ax.add_wms(wms='http://exxdmmsprd01:6080/arcgis/services/DMMS/' +
                       'Global_NE_HC_Hybrid_Greyscale/MapServer/WMSServer',
                       layers=layers)

        #Add gridlines
        if self.gridlines:
            try:
                if self.extent[0] < 180 and self.extent[1] > 180:
                    xlocs, xlocs_extend = compute_grid_line_locs(self.extent)
                    ax.gridlines(xlocs=xlocs_extend)
                    gl = ax.gridlines(draw_labels=True,
                                      xlocs=xlocs,
                                      linewidth=0.001)
                else:
                    gl = ax.gridlines(draw_labels=True,
                                      linewidth=0.8,
                                      alpha=0.9,
                                      zorder=9)

                gl.xlabels_top = False
                gl.ylabels_right = False
                gl.xformatter = LONGITUDE_FORMATTER
                gl.yformatter = LATITUDE_FORMATTER
            except:
                gl = ax.gridlines()

        # Add information about the release location and time if provided
        if self.release_info is not None:
            release_text = 'Release location: {}, {},\n'.format(
                self.release_info[0], self.release_info[1])
            release_text += 'Release time: {}'.format(self.release_info[2])
            ax.annotate(release_text,
                        xy=(0.5, 0.34),
                        xycoords=('axes fraction', 'figure fraction'),
                        xytext=(0, 10),
                        textcoords='offset points',
                        size=12,
                        ha='center',
                        va='bottom')

        # Apply branding
        if self.mobrand:
            insert_logo()

        return self.fig
Beispiel #46
0
def main():
    # Create a constraint to extract surface temperature cubes which have a
    # "realization" coordinate.
    constraint = iris.Constraint("surface_temperature",
                                 realization=lambda value: True)
    # Use this to load our ensemble.  The callback ensures all our members
    # have the "realization" coordinate and therefore they will all be loaded.
    surface_temp = iris.load_cube(
        iris.sample_data_path("GloSea4", "ensemble_???.pp"),
        constraint,
        callback=realization_metadata,
    )

    # -------------------------------------------------------------------------
    # Plot #1: Ensemble postage stamps
    # -------------------------------------------------------------------------

    # For the purposes of this example, take the last time element of the cube.
    # First get hold of the last time by slicing the coordinate.
    last_time_coord = surface_temp.coord("time")[-1]
    last_timestep = surface_temp.subset(last_time_coord)

    # Find the maximum and minimum across the dataset.
    data_min = np.min(last_timestep.data)
    data_max = np.max(last_timestep.data)

    # Create a wider than normal figure to support our many plots.
    plt.figure(figsize=(12, 6), dpi=100)

    # Also manually adjust the spacings which are used when creating subplots.
    plt.gcf().subplots_adjust(
        hspace=0.05,
        wspace=0.05,
        top=0.95,
        bottom=0.05,
        left=0.075,
        right=0.925,
    )

    # Iterate over all possible latitude longitude slices.
    for cube in last_timestep.slices(["latitude", "longitude"]):

        # Get the ensemble member number from the ensemble coordinate.
        ens_member = cube.coord("realization").points[0]

        # Plot the data in a 4x4 grid, with each plot's position in the grid
        # being determined by ensemble member number.  The special case for the
        # 13th ensemble member is to have the plot at the bottom right.
        if ens_member == 13:
            plt.subplot(4, 4, 16)
        else:
            plt.subplot(4, 4, ens_member + 1)

        # Plot with 50 evenly spaced contour levels (49 intervals).
        cf = iplt.contourf(cube, 49, vmin=data_min, vmax=data_max)

        # Add coastlines.
        plt.gca().coastlines()

    # Make an axes to put the shared colorbar in.
    colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
    colorbar = plt.colorbar(cf, colorbar_axes, orientation="horizontal")
    colorbar.set_label(last_timestep.units)

    # Limit the colorbar to 8 tick marks.
    colorbar.locator = matplotlib.ticker.MaxNLocator(8)
    colorbar.update_ticks()

    # Get the time for the entire plot.
    time = last_time_coord.units.num2date(last_time_coord.bounds[0, 0])

    # Set a global title for the postage stamps with the date formated by
    # "monthname year".
    time_string = time.strftime("%B %Y")
    plt.suptitle(f"Surface temperature ensemble forecasts for {time_string}")

    iplt.show()

    # -------------------------------------------------------------------------
    # Plot #2: ENSO plumes
    # -------------------------------------------------------------------------

    # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so use the intersection
    # method to restrict to this region.
    nino_cube = surface_temp.intersection(latitude=[-5, 5],
                                          longitude=[-170, -120])

    # Calculate the horizontal mean for the nino region.
    mean = nino_cube.collapsed(["latitude", "longitude"], iris.analysis.MEAN)

    # Calculate the ensemble mean of the horizontal mean.
    ensemble_mean = mean.collapsed("realization", iris.analysis.MEAN)

    # Take the ensemble mean from each ensemble member.
    mean -= ensemble_mean

    plt.figure()

    for ensemble_member in mean.slices(["time"]):
        # Draw each ensemble member as a dashed line in black.
        iplt.plot(ensemble_member, "--k")

    plt.title("Mean temperature anomaly for ENSO 3.4 region")
    plt.xlabel("Time")
    plt.ylabel("Temperature anomaly / K")

    iplt.show()
Beispiel #47
0
 def test_yaxis_labels(self):
     iplt.plot(self.cube, self.cube.coord('str_coord'))
     self.assertBoundsTickLabels('yaxis')
Beispiel #48
0
from __future__ import (absolute_import, division, print_function)

import matplotlib.pyplot as plt

import iris
import iris.plot as iplt

fname = iris.sample_data_path('air_temp.pp')
temperature = iris.load_cube(fname)

# Take a 1d slice using array style indexing.
temperature_1d = temperature[5, :]

iplt.plot(temperature_1d)
plt.show()
Beispiel #49
0
fname = iris.sample_data_path('air_temp.pp')

# Load exactly one cube from the given file
temperature = iris.load_cube(fname)

# We are only interested in a small number of longitudes (the 4 after and
# including the 5th element), so index them out
temperature = temperature[5:9, :]

for cube in temperature.slices('longitude'):

    # Create a string label to identify this cube (i.e. latitude: value)
    cube_label = 'latitude: %s' % cube.coord('latitude').points[0]

    # Plot the cube, and associate it with a label
    iplt.plot(cube, label=cube_label)

# Match the longitude range to global
max_lon = temperature.coord('longitude').points.max()
min_lon = temperature.coord('longitude').points.min()
plt.xlim(min_lon, max_lon)

# Add the legend with 2 columns
plt.legend(ncol=2)

# Put a grid on the plot
plt.grid(True)

# Provide some axis labels
plt.ylabel('Temerature / kelvin')
plt.xlabel('Longitude / degrees')
Beispiel #50
0
def main():
    # extract surface temperature cubes which have an ensemble member
    # coordinate, adding appropriate lagged ensemble metadata
    surface_temp = iris.load_cube(
        iris.sample_data_path("GloSea4", "ensemble_???.pp"),
        iris.Constraint("surface_temperature", realization=lambda value: True),
        callback=realization_metadata,
    )

    # -------------------------------------------------------------------------
    # Plot #1: Ensemble postage stamps
    # -------------------------------------------------------------------------

    # for the purposes of this example, take the last time element of the cube
    last_timestep = surface_temp[:, -1, :, :]

    # Make 50 evenly spaced levels which span the dataset
    contour_levels = np.linspace(
        np.min(last_timestep.data), np.max(last_timestep.data), 50
    )

    # Create a wider than normal figure to support our many plots
    plt.figure(figsize=(12, 6), dpi=100)

    # Also manually adjust the spacings which are used when creating subplots
    plt.gcf().subplots_adjust(
        hspace=0.05,
        wspace=0.05,
        top=0.95,
        bottom=0.05,
        left=0.075,
        right=0.925,
    )

    # iterate over all possible latitude longitude slices
    for cube in last_timestep.slices(["latitude", "longitude"]):

        # get the ensemble member number from the ensemble coordinate
        ens_member = cube.coord("realization").points[0]

        # plot the data in a 4x4 grid, with each plot's position in the grid
        # being determined by ensemble member number the special case for the
        # 13th ensemble member is to have the plot at the bottom right
        if ens_member == 13:
            plt.subplot(4, 4, 16)
        else:
            plt.subplot(4, 4, ens_member + 1)

        cf = iplt.contourf(cube, contour_levels)

        # add coastlines
        plt.gca().coastlines()

    # make an axes to put the shared colorbar in
    colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
    colorbar = plt.colorbar(cf, colorbar_axes, orientation="horizontal")
    colorbar.set_label("%s" % last_timestep.units)

    # limit the colorbar to 8 tick marks
    import matplotlib.ticker

    colorbar.locator = matplotlib.ticker.MaxNLocator(8)
    colorbar.update_ticks()

    # get the time for the entire plot
    time_coord = last_timestep.coord("time")
    time = time_coord.units.num2date(time_coord.bounds[0, 0])

    # set a global title for the postage stamps with the date formated by
    # "monthname year"
    plt.suptitle(
        "Surface temperature ensemble forecasts for %s"
        % (time.strftime("%B %Y"),)
    )

    iplt.show()

    # -------------------------------------------------------------------------
    # Plot #2: ENSO plumes
    # -------------------------------------------------------------------------

    # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint
    # which matches this
    nino_3_4_constraint = iris.Constraint(
        longitude=lambda v: -170 + 360 <= v <= -120 + 360,
        latitude=lambda v: -5 <= v <= 5,
    )

    nino_cube = surface_temp.extract(nino_3_4_constraint)

    # Subsetting a circular longitude coordinate always results in a circular
    # coordinate, so set the coordinate to be non-circular
    nino_cube.coord("longitude").circular = False

    # Calculate the horizontal mean for the nino region
    mean = nino_cube.collapsed(["latitude", "longitude"], iris.analysis.MEAN)

    # Calculate the ensemble mean of the horizontal mean. To do this, remove
    # the "forecast_period" and "forecast_reference_time" coordinates which
    # span both "relalization" and "time".
    mean.remove_coord("forecast_reference_time")
    mean.remove_coord("forecast_period")
    ensemble_mean = mean.collapsed("realization", iris.analysis.MEAN)

    # take the ensemble mean from each ensemble member
    mean -= ensemble_mean.data

    plt.figure()

    for ensemble_member in mean.slices(["time"]):
        # draw each ensemble member as a dashed line in black
        iplt.plot(ensemble_member, "--k")

    plt.title("Mean temperature anomaly for ENSO 3.4 region")
    plt.xlabel("Time")
    plt.ylabel("Temperature anomaly / K")

    iplt.show()
Beispiel #51
0
 def test_xaxis_labels(self):
     iplt.plot(self.cube.coord("str_coord"), self.cube)
     self.assertBoundsTickLabels("xaxis")
Beispiel #52
0
# Create an EOF solver to do the EOF analysis. Square-root of cosine of
# latitude weights are applied before the computation of EOFs.
solver = Eof(sst, weights='coslat')

# Retrieve the leading EOF, expressed as the correlation between the leading
# PC time series and the input SST anomalies at each grid point, and the
# leading PC time series itself.
eof1 = solver.eofsAsCorrelation(neofs=1)
pc1 = solver.pcs(npcs=1, pcscaling=1)

# Plot the leading EOF expressed as correlation in the Pacific domain.
clevs = np.linspace(-1, 1, 11)
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=190))
fill = iplt.contourf(eof1[0], clevs, cmap=plt.cm.RdBu_r)
ax.add_feature(cartopy.feature.LAND, facecolor='w', edgecolor='k')
cb = plt.colorbar(fill, orientation='horizontal')
cb.set_label('correlation coefficient', fontsize=12)
ax.set_title('EOF1 expressed as correlation', fontsize=16)

# Plot the leading PC time series.
plt.figure()
iplt.plot(pc1[:, 0], color='b', linewidth=2)
ax = plt.gca()
ax.axhline(0, color='k')
ax.set_ylim(-3, 3)
ax.set_xlabel('Year')
ax.set_ylabel('Normalized Units')
ax.set_title('PC1 Time Series', fontsize=16)

plt.show()
Beispiel #53
0
def main():
    # extract surface temperature cubes which have an ensemble member coordinate, adding appropriate lagged ensemble metadata
    surface_temp = iris.load_cube(iris.sample_data_path('GloSea4', 'ensemble_???.pp'),
                  iris.Constraint('surface_temperature', realization=lambda value: True),
                  callback=realization_metadata,
                  )

    # ----------------------------------------------------------------------------------------------------------------
    # Plot #1: Ensemble postage stamps
    # ----------------------------------------------------------------------------------------------------------------

    # for the purposes of this example, take the last time element of the cube
    last_timestep = surface_temp[:, -1, :, :]
   
    # Make 50 evenly spaced levels which span the dataset
    contour_levels = np.linspace(np.min(last_timestep.data), np.max(last_timestep.data), 50)
    
    # Create a wider than normal figure to support our many plots
    plt.figure(figsize=(12, 6), dpi=100)
    
    # Also manually adjust the spacings which are used when creating subplots
    plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05, left=0.075, right=0.925)
    
    # iterate over all possible latitude longitude slices
    for cube in last_timestep.slices(['latitude', 'longitude']):
        
        # get the ensemble member number from the ensemble coordinate
        ens_member = cube.coord('realization').points[0]
    
        # plot the data in a 4x4 grid, with each plot's position in the grid being determined by ensemble member number
        # the special case for the 13th ensemble member is to have the plot at the bottom right
        if ens_member == 13:
            plt.subplot(4, 4, 16)
        else:
            plt.subplot(4, 4, ens_member+1)
    
        cf = iplt.contourf(cube, contour_levels)
    
        # add coastlines
        plt.gca().coastlines()
    
    # make an axes to put the shared colorbar in
    colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
    colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
    colorbar.set_label('%s' % last_timestep.units)
    
    # limit the colorbar to 8 tick marks
    import matplotlib.ticker
    colorbar.locator = matplotlib.ticker.MaxNLocator(8)
    colorbar.update_ticks()
    
    # get the time for the entire plot
    time_coord = last_timestep.coord('time')
    time = time_coord.units.num2date(time_coord.points[0])
    
    # set a global title for the postage stamps with the date formated by "monthname year"
    plt.suptitle('Surface temperature ensemble forecasts for %s' % time.strftime('%B %Y'))
    
    iplt.show()


    # ---------------------------------------------------------------------------------------------------------------- 
    # Plot #2: ENSO plumes
    # ----------------------------------------------------------------------------------------------------------------
    
    # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint which matches this
    nino_3_4_constraint = iris.Constraint(longitude=lambda v: -170+360 <= v <= -120+360, latitude=lambda v: -5 <= v <= 5)
    
    nino_cube = surface_temp.extract(nino_3_4_constraint)

    # Subsetting a circular longitude coordinate always results in a circular coordinate, so set the coordinate to be non-circular
    nino_cube.coord('longitude').circular = False
    
    # Calculate the horizontal mean for the nino region
    mean = nino_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)
    
    # Calculate the ensemble mean of the horizontal mean. To do this, remove the "forecast_period" and
    # "forecast_reference_time" coordinates which span both "relalization" and "time".
    mean.remove_coord("forecast_reference_time")
    mean.remove_coord("forecast_period")
    ensemble_mean = mean.collapsed('realization', iris.analysis.MEAN)
    
    # take the ensemble mean from each ensemble member
    mean -= ensemble_mean.data

    plt.figure()
    
    for ensemble_member in mean.slices(['time']):
        # draw each ensemble member as a dashed line in black
        iplt.plot(ensemble_member, '--k')
    
    plt.title('Mean temperature anomaly for ENSO 3.4 region')
    plt.xlabel('Time')
    plt.ylabel('Temperature anomaly / K')
    
    plt.show()