def main(): # Load some test data. fname = iris.sample_data_path("A1B_north_america.nc") cube = iris.load_cube(fname) # Extract a single time series at a latitude and longitude point. location = next(cube.slices(["time"])) # Calculate a polynomial fit to the data at this time series. x_points = location.coord("time").points y_points = location.data degree = 2 p = np.polyfit(x_points, y_points, degree) y_fitted = np.polyval(p, x_points) # Add the polynomial fit values to the time series to take # full advantage of Iris plotting functionality. long_name = "degree_{}_polynomial_fit_of_{}".format(degree, cube.name()) fit = iris.coords.AuxCoord(y_fitted, long_name=long_name, units=location.units) location.add_aux_coord(fit, 0) qplt.plot(location.coord("time"), location, label="data") qplt.plot( location.coord("time"), location.coord(long_name), "g-", label="polynomial fit", ) plt.legend(loc="best") plt.title("Trend of US air temperature over time") qplt.show()
def error_vs_tendency(rp, fp, bins): """Is the error proportional to the tendency (like SPPT) """ # Average of the absolute error in the tendency binned by the double # precision tendency error = rp - fp error.data = np.abs(error.data) mask = np.logical_and(fp.data == 0, rp.data == 0) y = averaged_over(error, bins, fp, weights=None, mask=mask) fig, axes = plt.subplots(2, 1, sharex=True) qplt.plot(y[0], 'kx', axes=axes[0]) axes[0].set_xlabel('') axes[0].set_ylabel('Error in Tendency') axes[0].set_title('') x = y[1].dim_coords[0].points dx = x[1] - x[0] axes[1].bar(x, y[1].data, width=dx) axes[1].set_xlabel('Double-Precision Tendency') axes[1].set_ylabel('Number of Gridpoints') axes[1].set_title('') return
def main(): # Parameters path = datadir + 'stochastic/ensembles/' variable = 'Geopotential Height' # List of ensembles to compare experiments = [ 'convection_8b', 'surface_fluxes_8b', 'vertical_diffusion_8b', 'physics_8b', 'physics_10b', 'physics_23b', 'physics_52b', 'physics_52b_v2', 'physics_52b_v3' ] # Load the ensembles cs = iris.Constraint(variable) # Reference ensemble fp = iris.load_cube(path + 'rp_physics_52b.nc', cs) fp_mean = fp.collapsed('ensemble_member', MEAN) fp_spread = fp.collapsed('ensemble_member', STD_DEV, ddof=1) # Compare the ensembles for exp in experiments: rp = iris.load_cube(path + 'rp_{}.nc'.format(exp), cs) proportion = proportion_different(rp, fp_mean, fp_spread) # Plot the differences qplt.plot(proportion, label=exp) plt.legend() plt.show() return
def main(cubes): theta = convert.calc('equivalent_potential_temperature', cubes) P = convert.calc('air_pressure', cubes) P.convert_units('hPa') mass = convert.calc('mass', cubes) lon, lat = grid.true_coords(theta) lonmask = np.logical_or(lon < -15, lon > 5) latmask = np.logical_or(lat < 47.5, lat > 62.5) areamask = np.logical_or(lonmask, latmask) masks = [ np.logical_or(theta.data < theta_front, areamask), np.logical_or(theta.data > theta_front, areamask) ] #overview(cubes, areamask) #plt.savefig(plotdir + 'composite_iop8_24h_overview_750hpa.pdf') # plt.show() z_cold, z_warm = bl_heights(cubes, theta, areamask) # Initialise the plot fig = plt.figure(figsize=(18, 12)) diags = convert.calc(names, cubes) masses = [] # Rows are different masks for n, mask in enumerate(masks): means = diagnostics.averaged_over(diags, levels, P, mass, mask) masses.append(convert.calc('mass', means)) # Columns are for different mappings for m, mapping in enumerate(mappings): ax = plt.subplot2grid((2, ncol), (n, m)) composite(means, ax, mapping, mass, P) add_trimmings(ax, n, m) if m == 0: ax.set_xlim(0.1, 1.3) elif m == 1: ax.set_xlim(-0.6, 0.6) else: ax.set_xlim(-1, 1) multilabel(ax, 3 * n + m, yreversed=True, fontsize=25) if n == 0: plt.axhline(z_cold, color='k', linestyle='--') elif n == 1: plt.axhline(z_warm, color='k', linestyle='--') add_figlabels(fig) fig.savefig(plotdir + 'composite_iop5_24h.pdf') plt.figure() for mass in masses: qplt.plot(mass, mass.coord('air_pressure')) ax.set_ylim(950, 500) plt.show() return
def main(): fname = iris.sample_data_path('air_temp.pp') # Load exactly one cube from the given file. temperature = iris.load_cube(fname) # We only want a small number of latitudes, so filter some out # using "extract". temperature = temperature.extract( iris.Constraint(latitude=lambda cell: 68 <= cell < 78)) for cube in temperature.slices('longitude'): # Create a string label to identify this cube (i.e. latitude: value). cube_label = 'latitude: %s' % cube.coord('latitude').points[0] # Plot the cube, and associate it with a label. qplt.plot(cube, label=cube_label) # Add the legend with 2 columns. plt.legend(ncol=2) # Put a grid on the plot. plt.grid(True) # Tell matplotlib not to extend the plot axes range to nicely # rounded numbers. plt.axis('tight') # Finally, show it. plt.show()
def main(): # Enable a future option, to ensure that the netcdf load works the same way # as in future Iris versions. iris.FUTURE.netcdf_promote = True # Load some test data. fname = iris.sample_data_path('A1B_north_america.nc') cube = iris.load_cube(fname) # Extract a single time series at a latitude and longitude point. location = next(cube.slices(['time'])) # Calculate a polynomial fit to the data at this time series. x_points = location.coord('time').points y_points = location.data degree = 2 p = np.polyfit(x_points, y_points, degree) y_fitted = np.polyval(p, x_points) # Add the polynomial fit values to the time series to take # full advantage of Iris plotting functionality. long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name()) fit = iris.coords.AuxCoord(y_fitted, long_name=long_name, units=location.units) location.add_aux_coord(fit, 0) qplt.plot(location.coord('time'), location, label='data') qplt.plot(location.coord('time'), location.coord(long_name), 'g-', label='polynomial fit') plt.legend(loc='best') plt.title('Trend of US air temperature over time') qplt.show()
def main(): # Parameters path = datadir + 'stochastic/ensembles/' variable = 'Temperature' # pressure = [925, 850, 700, 500, 300, 200, 100, 30] pressure = 500 # Load the ensembles cs = iris.Constraint(variable, pressure=pressure) fp = iris.load_cube(path + 'rp_physics_52b_v3.nc', cs) fp = fp.collapsed('ensemble_member', MEAN) # Compare the ensembles for exp in [ 'convection_8b', 'surface_fluxes_8b', 'vertical_diffusion_8b', 'physics_8b', 'physics_10b', 'physics_23b', 'physics_52b', 'physics_52b_v2' ]: rp = iris.load_cube(path + 'rp_{}.nc'.format(exp), cs) rp = rp.collapsed('ensemble_member', MEAN) error = rms_diff(rp, fp) # Plot the differences qplt.plot(error, label=exp) plt.legend() plt.show() return
def testPlot1DwithoutGridlines(self): cube = iris.load_cube(iris.sample_data_path('SOI_Darwin.nc')) plt.subplot(1,2,1) cc.plot1D(cube, False) plt.subplot(1,2,2) qplt.plot(cube) plt.show()
def annual(cube): first_30 = cube.extract(iris.Constraint(year=lambda t: 1870 < t.point < 1901)).collapsed('time', iris.analysis.MEAN) year_mean = cube.aggregated_by('year', iris.analysis.MEAN) print(year_mean) anom_1871_1902 = year_mean - first_30 anom_trend = anom_1871_1902.collapsed(['longitude', 'latitude'], iris.analysis.MEAN) qplt.plot(anom_trend, label='year anomaly') plt.title('monthly anomaly with respect to the 1870-1900 period mean')
def test_xy_cube(self): c = simple_1d() qplt.plot(c) ax = qplt.plt.gca() x = ax.xaxis.get_label().get_text() self.assertEqual(x, "Foo") y = ax.yaxis.get_label().get_text() self.assertEqual(y, "Thingness")
def plot1D(self, results, data, name): plt.figure(figsize=(8, 6)) for result in results: dataset = data[result][0]['dataset'] qplt.plot(results[result], label=dataset) plt.legend() plt.grid() plt.savefig(os.path.join('/esarchive/scratch/jcos/esmvaltool/output/figures/', name))
def plot_errors(cubes, variable, *args): # 500 hPa height z500 = convert.calc(variable, cubes) z500 = z500.extract( iris.Constraint(air_pressure=20000, forecast_lead_time=24)) qplt.plot(z500, *args) return
def test_yx_cube(self): c = simple_1d() c.transpose() # Making the cube a vertical coordinate should change the default # orientation of the plot. c.coord("foo").attributes["positive"] = "up" qplt.plot(c) ax = qplt.plt.gca() x = ax.xaxis.get_label().get_text() self.assertEqual(x, "Thingness") y = ax.yaxis.get_label().get_text() self.assertEqual(y, "Foo")
def simulations_result(self): """ Handles the output of the primevera-viewer tool, either a plot or a '.nc' file """ result_cubes = self.simulations_statistics() if len(self.location) == 2: plot_title = (result_cubes[0].long_name + '\nat Lat: ' + str(self.location[0]) + 'N Lon: ' + str(self.location[1]) + 'E') if len(self.location) == 4: plot_title = (result_cubes[0].long_name + '\n over Lat range: ' + str(self.location[0]) + 'N to ' + str(self.location[1]) + 'N Longitude range: ' + str(self.location[2]) + 'E to ' + str(self.location[3]) + 'E') # Optional .nc file output if self.output in ['netCDF', 'both']: # output save file to directory for cube in result_cubes: cube.attributes['plot_title'] = plot_title iris.save(result_cubes, self.filename + '.nc', netcdf_format="NETCDF3_CLASSIC") # Optional plot output if self.output in ['plot', 'both']: fig = plt.figure() # Plot the primavera comparison results colours = [ 'r', 'b', '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', 'c', 'm' ] for i, cube in enumerate(result_cubes): cube_label = cube.coord('simulation_label').points[0] if cube_label == 'Simulations Mean': qplt.plot(cube, label=cube_label, color=self.lighten_color('k', 1.0), linewidth=1.0) else: qplt.plot(cube, label=cube_label, color=self.lighten_color(colours[i], 1.0), linewidth=1.0) # Change final plot details plt.legend() plt.title(plot_title) plt.grid(True) fig.savefig(self.filename + '.png')
def test_cross_section(self): # Slice to get a cross section. # Constant latitude src = self.realistic_cube[0, :, 10, :] lon = _resampled_coord(src.coord('grid_longitude'), 0.6) shape = list(src.shape) shape[1] = len(lon.points) data = np.zeros(shape) dest = iris.cube.Cube(data) dest.add_dim_coord(lon, 1) dest.add_aux_coord(src.coord('grid_latitude').copy(), None) res = regrid_area_weighted(src, dest) self.assertCMLApproxData( res, RESULT_DIR + ('const_lat_cross_section.cml', )) # Plot a single slice. qplt.plot(res[0]) qplt.plot(src[0], 'r') self.check_graphic() # Constant longitude src = self.realistic_cube[0, :, :, 10] lat = _resampled_coord(src.coord('grid_latitude'), 0.6) shape = list(src.shape) shape[1] = len(lat.points) data = np.zeros(shape) dest = iris.cube.Cube(data) dest.add_dim_coord(lat, 1) dest.add_aux_coord(src.coord('grid_longitude').copy(), None) res = regrid_area_weighted(src, dest) self.assertCMLApproxData( res, RESULT_DIR + ('const_lon_cross_section.cml', )) # Plot a single slice. qplt.plot(res[0]) qplt.plot(src[0], 'r') self.check_graphic()
def sanity_check(cubelist_dictionary): """ Plot plots of pressure, temperature, RHi and potential temperature To make sure things don't differ too wildly """ plt.figure(figsize=(12, 8)) for key in cubelist_dictionary: cubelist = cubelist_dictionary[key] pressure = cubelist.extract(iris.Constraint(name='air_pressure'))[0] temperature = cubelist.extract( iris.Constraint(name='air_temperature'))[0] RHi = cubelist.extract( iris.Constraint(name='relative_humidity_ice'))[0] theta = cubelist.extract( iris.Constraint(name='air_potential_temperature'))[0] altitude = cubelist.extract(iris.Constraint(name='altitude'))[0] plt.subplot(2, 2, 1) qplt.plot(altitude, pressure) plt.subplot(2, 2, 2) qplt.plot(altitude, temperature) plt.subplot(2, 2, 3) qplt.plot(altitude, RHi, label=key) plt.subplot(2, 2, 4) qplt.plot(altitude, theta) plt.subplot(2, 2, 3) plt.legend()
def test_cross_section(self): # Slice to get a cross section. # Constant latitude src = self.realistic_cube[0, :, 10, :] lon = _resampled_coord(src.coord('grid_longitude'), 0.6) shape = list(src.shape) shape[1] = len(lon.points) data = np.zeros(shape) dest = iris.cube.Cube(data) dest.add_dim_coord(lon, 1) dest.add_aux_coord(src.coord('grid_latitude').copy(), None) res = regrid_area_weighted(src, dest) self.assertCMLApproxData(res, RESULT_DIR + ('const_lat_cross_section.cml',)) # Plot a single slice. qplt.plot(res[0]) qplt.plot(src[0], 'r') self.check_graphic() # Constant longitude src = self.realistic_cube[0, :, :, 10] lat = _resampled_coord(src.coord('grid_latitude'), 0.6) shape = list(src.shape) shape[1] = len(lat.points) data = np.zeros(shape) dest = iris.cube.Cube(data) dest.add_dim_coord(lat, 1) dest.add_aux_coord(src.coord('grid_longitude').copy(), None) res = regrid_area_weighted(src, dest) self.assertCMLApproxData(res, RESULT_DIR + ('const_lon_cross_section.cml',)) # Plot a single slice. qplt.plot(res[0]) qplt.plot(src[0], 'r') self.check_graphic()
def timeseries(incube, plotpath, modelID): # Plot a timeseries for a smaller domain print 'plotting a timeseries' spi_cotedivoire = incube.intersection(latitude=(4, 12), longitude=(-2, 9)) spi_ts = spi_cotedivoire.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) qplt.plot(spi_ts) qplt.plot(spi_ts, label='spi', color='black', lw=1.5) plt.xlabel('Month') plt.ylabel('spi') plt.suptitle('Monthly Standardized Precipitation Index over CI', fontsize=16) #plt.show() plt.savefig(plotpath + 'ts_' + modelID + '.png')
def main(): try: c_monthly = iris.load_cube('mean.nc') except IOError: path = '/nfs/a266/data/CMIP5_AFRICA/BC_0.5x0.5/HadGEM2-ES/historical/pr_WFDEI_1979-2013_0.5x0.5_day_HadGEM2-ES_africa_historical_r1i1p1_full.nc' my_var = iris.load_cube(path) reg_cube = my_var.intersection(longitude=(-15.0, 25.0), latitude=(4.0, 31.0)) reg_cube.coord('latitude').guess_bounds() reg_cube.coord('longitude').guess_bounds() #std = reg_cube.collapsed('longitude', iris.analysis.STD_DEV) #levels = np.arange(1,100) #mean = reg_cube.collapsed('longitude', iris.analysis.MEAN) iris.coord_categorisation.add_month_number(reg_cube, 'time', name='month') iris.coord_categorisation.add_year(reg_cube, 'time', name='year') c_monthly = reg_cube.aggregated_by(['month','year'], iris.analysis.MEAN) iris.save(c_monthly, 'mean.nc') c_monthly.convert_units('kg m-2 day-1') #iris.coord_categorisation.add_season(c_monthly, 'time', name='season', seasons=('djf', 'mam', 'jja', 'son')) #iris.coord_categorisation.add_season_year(c_monthly, 'time', name='season_year', seasons=('djf', 'mam', 'jja', 'son')) #std = c_monthly.aggregated_by('season', iris.analysis.STD_DEV) #mean = c_monthly.aggregated_by('season', iris.analysis.MEAN) # This is the monthly climatology (mean and std deviation) # Shape of these files: (12, 54, 80) std = c_monthly.aggregated_by('month', iris.analysis.STD_DEV) mean = c_monthly.aggregated_by('month', iris.analysis.MEAN) # We need to change the shape of the monthly climatologies to match the shape of the timeseries (in the cube c_monthly) # Shape of c_monthly: (672, 54, 80) clim_mean_data = np.tile(mean.data, (c_monthly.shape[0]/mean.shape[0],1,1)) clim_std_data = np.tile(std.data, (c_monthly.shape[0]/std.shape[0],1,1)) clim_mean_cube = c_monthly.copy(clim_mean_data) clim_std_cube = c_monthly.copy(clim_std_data) y_spi = (c_monthly - clim_mean_cube) / clim_std_cube spi_cotedivoire = y_spi.intersection(latitude=(4,12), longitude=(-2,9)) spi_ts = spi_cotedivoire.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) qplt.plot(spi_ts)
def plot_timeseries(cubes, theta): for cube in cubes: if 'circulation' in cube.name(): iplt.plot(cube, label=cube.name()) plt.legend(ncol=2, loc='best') plt.savefig(plotdir + 'circulation_' + theta + 'K.png') for cube in cubes: if 'circulation' not in cube.name(): plt.figure() qplt.plot(cube) plt.savefig(plotdir + cube.name() + '_' + theta + 'K.png') plt.show() return
def main(): # Load the data with iris.FUTURE.context(netcdf_promote=True): cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc')) # Slice into cube to retrieve data for the inset map showing the # data region region = cube1[-1, :, :] # Average over latitude to reduce cube to 1 dimension plot_line = region.collapsed('latitude', iris.analysis.MEAN) # Open a window for plotting fig = plt.figure() # Add a single subplot (axes). Could also use "ax_main = plt.subplot()" ax_main = fig.add_subplot(1, 1, 1) # Produce a quick plot of the 1D cube qplt.plot(plot_line) # Set x limits to match the data ax_main.set_xlim(0, plot_line.coord('longitude').points.max()) # Adjust the y limits so that the inset map won't clash with main plot ax_main.set_ylim(294, 310) ax_main.set_title('Meridional Mean Temperature') # Add grid lines ax_main.grid() # Add a second set of axes specifying the fractional coordinates within # the figure with bottom left corner at x=0.55, y=0.58 with width # 0.3 and height 0.25. # Also specify the projection ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25], projection=ccrs.Mollweide(central_longitude=180)) # Use iris.plot (iplt) here so colour bar properties can be specified # Also use a sequential colour scheme to reduce confusion for those with # colour-blindness iplt.pcolormesh(region, cmap='Blues') # Manually set the orientation and tick marks on your colour bar ticklist = np.linspace(np.min(region.data), np.max(region.data), 4) plt.colorbar(orientation='horizontal', ticks=ticklist) ax_sub.set_title('Data Region') # Add coastlines ax_sub.coastlines() # request to show entire map, using the colour mesh on the data region only ax_sub.set_global() qplt.show()
def main(): cube1 = iris.load_cube(iris.sample_data_path("ostia_monthly.nc")) # Slice into cube to retrieve data for the inset map showing the # data region region = cube1[-1, :, :] # Average over latitude to reduce cube to 1 dimension plot_line = region.collapsed("latitude", iris.analysis.MEAN) # Open a window for plotting fig = plt.figure() # Add a single subplot (axes). Could also use "ax_main = plt.subplot()" ax_main = fig.add_subplot(1, 1, 1) # Produce a quick plot of the 1D cube qplt.plot(plot_line) # Set x limits to match the data ax_main.set_xlim(0, plot_line.coord("longitude").points.max()) # Adjust the y limits so that the inset map won't clash with main plot ax_main.set_ylim(294, 310) ax_main.set_title("Meridional Mean Temperature") # Add grid lines ax_main.grid() # Add a second set of axes specifying the fractional coordinates within # the figure with bottom left corner at x=0.55, y=0.58 with width # 0.3 and height 0.25. # Also specify the projection ax_sub = fig.add_axes( [0.55, 0.58, 0.3, 0.25], projection=ccrs.Mollweide(central_longitude=180), ) # Use iris.plot (iplt) here so colour bar properties can be specified # Also use a sequential colour scheme to reduce confusion for those with # colour-blindness iplt.pcolormesh(region, cmap="Blues") # Manually set the orientation and tick marks on your colour bar ticklist = np.linspace(np.min(region.data), np.max(region.data), 4) plt.colorbar(orientation="horizontal", ticks=ticklist) ax_sub.set_title("Data Region") # Add coastlines ax_sub.coastlines() # request to show entire map, using the colour mesh on the data region only ax_sub.set_global() qplt.show()
def main(): cube = iris.load( '~/code/develop/vipc/datasets/NOAA20CRv2_t2m.mon.mean_2.5x2.5.nc', 'air_temperature')[0] iris.coord_categorisation.add_season(cube, 'time', name='clim_season') iris.coord_categorisation.add_season_year(cube, 'time', name='year_season') iris.coord_categorisation.add_year(cube, 'time', name='year') print(cube.coord('year')) mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN) window = 10 * 12 #5*12 #24 span = 24 flt_mean = timeseries_filter(mean, window, span, filter_type='lowpass', filter_stats='mean') qplt.plot(mean) qplt.plot(flt_mean) plt.show()
def plot_1D(self, results, data): # Plot the results for each dataset in the same plot for result in results: dataset = data[result][0]['dataset'] qplt.plot(results[result], label=dataset) plot_name = 'Timeseries_difference_between_tas_and_tos.{out}'.format( out=self.cfg[n.OUTPUT_FILE_TYPE]) # Get the path to the plot directory plot_dir = self.cfg[n.PLOT_DIR] # Some tweaks using matplotlib plt.legend() plt.tick_params(axis='x', labelsize=8) # Save the plot # plt.savefig(os.path.join(plot_dir, plot_name)) plt.savefig( os.path.join('/home/Earth/jcos/es-esmvaltool/output/', plot_name)) plt.close()
def compute(self): # --------------------------------------------------------------------- # Every dataset in the recipe is associated with an alias. We are going # to use the alias and the group_metadata shared function to loop over # the datasets. #---------------------------------------------------------------------- data = group_metadata(self.cfg['input_data'].values(), 'alias') total = {} # Loop over the datasets. for alias in data: # ----------------------------------------------------------------- # Use the group_metadata function again so that, for each dataset, # the metadata dictionary is organised by the variables' # short name. # ----------------------------------------------------------------- variables = group_metadata(data[alias], 'short_name') # Returns the path to the preprocessed files. tas_file = variables['tas'][0]['filename'] # tos_file = variables['tos'][0]['filename'] # ----------------------------------------------------------------- # Now it is up to you to load the data and work with it with your # preferred libraries and methods. We are going to continue the # example using Iris and ESMValTool functions. # Note that all preprocessor routines can be called inside the # diag_script by adding the corresponding imports. # ----------------------------------------------------------------- tas = iris.load(tas_file)[0] tas.convert_units('degC') with open("/home/Earth/jcos/es-esmvaltool/common-tools/sample.txt", "a") as f: f.write(str(tas)) qplt.plot(tas) plot_name = 'tas20002010.png' plt.savefig( os.path.join('/home/Earth/jcos/es-esmvaltool/output/', plot_name))
def plot_timeseries(cube_dict, user_regions, title, tex_units, ref_region=None): """Create the timeseries plot.""" region_dict = { 'globe': ('globe', 'black', '--'), 'globe60': ('globe (60S - 60N)', 'black', '-'), 'tropics': ('tropics (20S to 20N)', 'purple', '-'), 'ne': ('northern extratropics (north of 20N)', 'red', '--'), 'ne60': ('northern extratropics (20N - 60N)', 'red', '-'), 'nh60': ('northern hemisphere (to 60N)', 'red', '-.'), 'se': ('southern extratropics (south of 20S)', 'blue', '--'), 'se60': ('southern extratropics (60S - 20S)', 'blue', '-'), 'sh60': ('southern hemisphere (to 60S)', 'blue', '-.'), 'ose': ('outside southern extratropics (north of 20S)', '#cc0066', '-.'), 'ose60': ('outside southern extratropics (20S - 60N)', '#cc0066', '--') } for region in user_regions: name, color, style = region_dict[region] cube = cube_dict[name] qplt.plot(cube.coord('time'), cube, label=name, color=color, linestyle=style) plt.legend(loc='best') plt.title(title) if ref_region: ylabel = '%s equivalent ocean heat content (%s)' % ( region_dict[ref_region][0], tex_units) else: ylabel = 'ocean heat content (%s)' % (tex_units) plt.ylabel(ylabel) plt.xlabel('year')
def main(): # Load the three files of sample NEMO data. fname = iris.sample_data_path("NEMO/nemo_1m_*.nc") cubes = iris.load(fname) # Some attributes are unique to each file and must be blanked # to allow concatenation. differing_attrs = ["file_name", "name", "timeStamp", "TimeStamp"] for cube in cubes: for attribute in differing_attrs: cube.attributes[attribute] = "" # The cubes still cannot be concatenated because their time dimension is # time_counter rather than time. time needs to be promoted to allow # concatenation. for cube in cubes: promote_aux_coord_to_dim_coord(cube, "time") # The cubes can now be concatenated into a single time series. cube = cubes.concatenate_cube() # Generate a time series plot of a single point plt.figure() y_point_index = 100 x_point_index = 100 qplt.plot(cube[:, y_point_index, x_point_index], "o-") # Include the point's position in the plot's title lat_point = cube.coord("latitude").points[y_point_index, x_point_index] lat_string = "{:.3f}\u00B0 {}".format(abs(lat_point), "N" if lat_point > 0.0 else "S") lon_point = cube.coord("longitude").points[y_point_index, x_point_index] lon_string = "{:.3f}\u00B0 {}".format(abs(lon_point), "E" if lon_point > 0.0 else "W") plt.title("{} at {} {}".format(cube.long_name.capitalize(), lat_string, lon_string)) iplt.show()
def climatology(cube): """ Summary hovmoller diagrams for various fields in the netcdf file """ import iris import iris.quickplot as qplt import iris.plot as iplt import matplotlib.pyplot as plt from math import sqrt mu = cube.collapsed(('time',) , iris.analysis.MEAN) mmu = mu.collapsed(('time', 'longitude'), iris.analysis.MEAN) std = cube.collapsed(('time',) , iris.analysis.STD_DEV) qplt.plot(mu) iris.plot.plot(mu + std, 'r--') iris.plot.plot(mu - std,'r--') ax= plt.gca().twinx() iris.plot.plot(std ,'k-') title = qplt._title(cube, False) plt.gca().set_title(title) plt.gca().axis('tight')
def main(): # Load the three files of sample NEMO data. fname = iris.sample_data_path('NEMO/nemo_1m_*.nc') cubes = iris.load(fname) # Some attributes are unique to each file and must be blanked # to allow concatenation. differing_attrs = ['file_name', 'name', 'timeStamp', 'TimeStamp'] for cube in cubes: for attribute in differing_attrs: cube.attributes[attribute] = '' # The cubes still cannot be concatenated because their time dimension is # time_counter rather than time. time needs to be promoted to allow # concatenation. for cube in cubes: promote_aux_coord_to_dim_coord(cube, 'time') # The cubes can now be concatenated into a single time series. cube = cubes.concatenate_cube() # Generate a time series plot of a single point plt.figure() y_point_index = 100 x_point_index = 100 qplt.plot(cube[:, y_point_index, x_point_index], 'o-') # Include the point's position in the plot's title lat_point = cube.coord('latitude').points[y_point_index, x_point_index] lat_string = '{:.3f}\u00B0 {}'.format(abs(lat_point), 'N' if lat_point > 0. else 'S') lon_point = cube.coord('longitude').points[y_point_index, x_point_index] lon_string = '{:.3f}\u00B0 {}'.format(abs(lon_point), 'E' if lon_point > 0. else 'W') plt.title('{} at {} {}'.format(cube.long_name.capitalize(), lat_string, lon_string)) iplt.show()
def plot_1d(cube, plot_method, gridlines): """ Produces a plot object for 1D cubes using the quickplot.plot() method or the matplotlib.pyplot.plot() method. Args: * cube The 1D cube to be plotted. * plot_method String holding the users choice of plotting using either quickplot or simply plotting from the data array. * gridlines Boolean holding whether or not gridlines are desired. """ if plot_method == "from data array": plt.plot(cube.data) else: qplt.plot(cube) plt.gca().grid(gridlines)
def nino3_plot(cube): """ Plots the nino 3 timeseries for cube, surface temp wold make sense Input: Iris cube (sfc temp) Output: plot of nino3 timeseries, and cube of same thing """ try: cube.coord('t').standard_name='time' except: pass else: print "t coord changed to time" cube_rsc = remove_seascyc(cube) loni = 210; lonf = 270; lati = -5; latf = 5 # latlon = [-5,5,210,270] #for data from 0 - 360 degress nino3, nino3_mean = regmean(cube_rsc,loni,lonf,lati,latf) plt.ion() plt.clf() qplt.plot(nino3_mean[:,0]) plt.title('NINO3 timeseries') return nino3, nino3_mean
def simulations_output(self): result_cubes = self.simulations_statistics() # Optional .nc file output if self.output == 'net_cdf': # output save file to directory iris.save(result_cubes, 'primavera_comparison.nc', netcdf_format="NETCDF3_CLASSIC") return result_cubes # Optional plot output if self.output == 'plot': # Plot the primavera comparison results colours = ['r','b','#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', 'c', 'm'] for i, cube in enumerate(result_cubes): cube_label = cube.coord('simulation_label').points[0] cube_name = cube.long_name if cube_label == 'Simulations Mean': qplt.plot(cube, color=self.lighten_color('k', 1.0), linewidth=1.0) else: qplt.plot(cube, color=self.lighten_color(colours[i], 1.0), linewidth=1.0) # Change final plot details plt.legend() if len(self.location) == 2: plt.title(cube_name+'\n' 'at Lat: '+str(self.location[0])+'N ' 'Lon: '+str(self.location[1])+'E') if len(self.location) == 4: plt.title(cube_name+'\n' 'over Lat range: '+str(self.location[0])+'N ' 'to '+str(self.location[1])+'N ' 'Longitude range: '+str(self.location[2])+'E ' 'to '+str(self.location[3])+'E') plt.grid(True) return plt.show()
def main(): # Load data into three Cubes, one for each set of PP files e1 = iris.load_strict(iris.sample_data_path('E1_north_america.nc')) a1b = iris.load_strict(iris.sample_data_path('A1B_north_america.nc')) # load in the global pre-industrial mean temperature, and limit the domain to # the same North American region that e1 and a1b are at. north_america = iris.Constraint( longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60, ) pre_industrial = iris.load_strict(iris.sample_data_path('pre-industrial.pp'), north_america ) pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) e1_mean = e1.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) a1b_mean = a1b.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) # Show ticks 30 years apart plt.gca().xaxis.set_major_locator(mdates.YearLocator(30)) # Label the ticks with year data plt.gca().format_xdata = mdates.DateFormatter('%Y') # Plot the datasets qplt.plot(e1_mean, coords=['time'], label='E1 scenario', lw=1.5, color='blue') qplt.plot(a1b_mean, coords=['time'], label='A1B-Image scenario', lw=1.5, color='red') # Draw a horizontal line showing the pre industrial mean plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', label='pre-industrial', lw=1.5) # Establish where r and t have the same data, i.e. the observations common = numpy.where(a1b_mean.data == e1_mean.data)[0] observed = a1b_mean[common] # Plot the observed data qplt.plot(observed, coords=['time'], label='observed', color='black', lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title('North American mean air temperature', fontsize=18) plt.xlabel('Time / year') plt.grid() iplt.show()
def main(): # Load data into three Cubes, one for each set of PP files e1 = iris.load_cube(iris.sample_data_path("E1_north_america.nc")) a1b = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) # load in the global pre-industrial mean temperature, and limit the domain to # the same North American region that e1 and a1b are at. north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60) pre_industrial = iris.load_cube(iris.sample_data_path("pre-industrial.pp"), north_america) pre_industrial_mean = pre_industrial.collapsed(["latitude", "longitude"], iris.analysis.MEAN) e1_mean = e1.collapsed(["latitude", "longitude"], iris.analysis.MEAN) a1b_mean = a1b.collapsed(["latitude", "longitude"], iris.analysis.MEAN) # Show ticks 30 years apart plt.gca().xaxis.set_major_locator(mdates.YearLocator(30)) # Label the ticks with year data plt.gca().format_xdata = mdates.DateFormatter("%Y") # Plot the datasets qplt.plot(e1_mean, coords=["time"], label="E1 scenario", lw=1.5, color="blue") qplt.plot(a1b_mean, coords=["time"], label="A1B-Image scenario", lw=1.5, color="red") # Draw a horizontal line showing the pre industrial mean plt.axhline(y=pre_industrial_mean.data, color="gray", linestyle="dashed", label="pre-industrial", lw=1.5) # Establish where r and t have the same data, i.e. the observations common = np.where(a1b_mean.data == e1_mean.data)[0] observed = a1b_mean[common] # Plot the observed data qplt.plot(observed, coords=["time"], label="observed", color="black", lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title("North American mean air temperature", fontsize=18) plt.xlabel("Time / year") plt.grid() iplt.show()
future_temperature_cube = load_cube(model_future_temperature_file) #just do this to make sure that the cube has teh right metadata if not future_temperature_cube.coord('latitude').has_bounds(): future_temperature_cube.coord('latitude').guess_bounds() if not future_temperature_cube.coord('longitude').has_bounds(): future_temperature_cube.coord('longitude').guess_bounds() grid_areas = iris.analysis.cartography.area_weights(future_temperature_cube) #we'll start by having a quick look at how the heat in the ocean changes into the future in this model to get a feel for what is going on future_temperature_mean = future_temperature_cube.collapsed(['depth','latitude','longitude'],MEAN,weights = grid_areas) figure() qplt.plot(future_temperature_mean) savefig('/home/ph290/Documents/figures/canesm2_ocean_heat.png') show() #It's increasing, so perhaps we would want to try and compare the first bit with observations - we will not do that here, what we will do is look at where that heat is going #again we'll focus in on the Atlantic here future_temperature_atlantic = future_temperature_cube.extract(atlantic_region2) atlantic_grid_areas = iris.analysis.cartography.area_weights(future_temperature_atlantic) #let's just start by thinking about how the heat propogates into the Atlantic, by averaging all of the latitudes and longitudes together - thsi way we only have dpth and time - which is ewaht we want future_temperature_atlantic_with_depth = future_temperature_atlantic.collapsed(['longitude','latitude'],MEAN,weights = atlantic_grid_areas) figure() qplt.contourf(future_temperature_atlantic_with_depth,50) savefig('/home/ph290/Documents/figures/canesm2_atl_heat_through_time.png')
gphtNH_min = WNH[min_i:min_f,::].collapsed('time',iris.analysis.MEAN) gphtSH= gpht.extract(iris.Constraint(latitude = lambda v: SHlati <= v <= SHlatf)) WSH = gphtSH.collapsed(['latitude'], iris.analysis.MEAN,weights=iris.analysis.cartography.area_weights(gphtSH)) lsmSH= lsmask.extract(iris.Constraint(latitude = lambda v: SHlati <= v <= SHlatf)) LSMSH = lsmSH.collapsed(['latitude'], iris.analysis.MEAN,weights=iris.analysis.cartography.area_weights(lsmSH)) gphtSH_max = WSH[max_i:max_f,::].collapsed('time',iris.analysis.MEAN) gphtSH_min = WSH[min_i:min_f,::].collapsed('time',iris.analysis.MEAN) gmm = 50 plt.clf() qplt.contourf(gphtNH_max,levels=np.linspace(-gmm,gmm,51),cmap=plt.cm.seismic,extend='both') qplt.plot(-20*LSMNH+1000,linewidth=2,color='k') latstr = str(gphtNH_max.coord('latitude').points[0]) plt.title(gphtNH_max.standard_name+' '+latstr+' max forcing') plt.xticks(range(0,420,60)) plt.savefig('./figures/gpht_lat'+latstr+'.png') plt.clf() qplt.contourf(gphtSH_max,levels=np.linspace(-gmm,gmm,51),cmap=plt.cm.seismic,extend='both') latstr = str(gphtSH_max.coord('latitude').points[0]) qplt.plot(-20*LSMSH+1000,linewidth=2,color='k') plt.title(gphtSH_max.standard_name+', lat:'+latstr+', max forcing') plt.xticks(range(0,420,60)) plt.savefig('./figures/gpht_lat'+latstr+'.png') gmm = 50 plt.clf()
def main(): # Load data into three Cubes, one for each set of NetCDF files. e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc')) a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) # load in the global pre-industrial mean temperature, and limit the domain # to the same North American region that e1 and a1b are at. north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60) pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'), north_america) # Generate area-weights array. As e1 and a1b are on the same grid we can # do this just once and re-use. This method requires bounds on lat/lon # coords, so let's add some in sensible locations using the "guess_bounds" # method. e1.coord('latitude').guess_bounds() e1.coord('longitude').guess_bounds() e1_grid_areas = iris.analysis.cartography.area_weights(e1) pre_industrial.coord('latitude').guess_bounds() pre_industrial.coord('longitude').guess_bounds() pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) # Perform the area-weighted mean for each of the datasets using the # computed grid-box areas. pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=pre_grid_areas) e1_mean = e1.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) a1b_mean = a1b.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) # Show ticks 30 years apart plt.gca().xaxis.set_major_locator(mdates.YearLocator(30)) # Plot the datasets qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue') qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red') # Draw a horizontal line showing the pre-industrial mean plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', label='pre-industrial', lw=1.5) # Establish where r and t have the same data, i.e. the observations observed = a1b_mean[:np.argmin(np.isclose(a1b_mean.data, e1_mean.data))] # Plot the observed data qplt.plot(observed, label='observed', color='black', lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title('North American mean air temperature', fontsize=18) plt.xlabel('Time / year') plt.grid() iplt.show()
#This loop calculates the mean temperature over all space, #it "collapses" the cube. print("Calculating spatial means...") collapsed_cubes = [] for i in range(len(month_cubes)): print("Calculating spatial mean for {}...".format(month_name[i+1])) #We get the area weights of the cells composing the region: grid_areas = area_weights(month_cubes[i]) #We "collapse" our 2D+Time cube into a 0D+Time by averaging using MEAN aggregator: collapsed_cubes.append(month_cubes[i].collapsed(['longitude', 'latitude'], MEAN, weights=grid_areas)) print("...done calculating spatial means.") #Finally, we cast our analysis into a plot import matplotlib.pyplot as plt import iris.quickplot as iplt print("Plotting...".format(month_name[i+1])) for i in range(len(collapsed_cubes)): #Plot... iplt.plot(collapsed_cubes[i],linewidth='10',label=month_name[i+1]) plt.legend(loc=4) #iplt.plot(c_interp,'b.') print("...done with the plot.") figname = 'asdf.png' plt.savefig(figname) print('Figure saved to {}'.format(figname))
def test_xaxis_labels(self): qplt.plot(self.cube.coord('str_coord'), self.cube) self.assertPointsTickLabels('xaxis')
for year in [1983,]: print('################## {0!s}\n'.format(year)) time1=datetime.datetime(year=year,month=1,day=1,hour=0,minute=0,second=0) time2=datetime.datetime(year=year,month=12,day=31,hour=23,minute=59,second=59) descriptor['times']=(time1,time2) descriptor['fileout1']=descriptor['basedir']+descriptor['source']+\ '/anom_std/'+descriptor['var_name']+\ '_'+str(descriptor['level'])+'_'+str(year)+'_'+\ descriptor['filter']+'.nc' aa=da.TimeFilter(descriptor,verbose=VERBOSE) aa.time_filter() if PLOT: print('# Plot') time_constraint=iris.Constraint(time = lambda cell: time1 <= cell <= time2) tol=0.1 lon0=0.0 lon_constraint=iris.Constraint(longitude = lambda cell: lon0-tol <= cell <= lon0+tol) lat0=55.0 lat_constraint=iris.Constraint(latitude = lambda cell: lat0-tol <= cell <= lat0+tol) with iris.FUTURE.context(cell_datetime_objects=True): x1=aa.data_in.extract(time_constraint & lon_constraint & lat_constraint) x2=aa.data_out.extract(lon_constraint & lat_constraint) qplt.plot(x1,label='in') qplt.plot(x2,label='out') plt.legend() plt.axis('tight') qplt.show()
grid_areas = iris.analysis.cartography.area_weights(cube2) plt.figure() qplt.contourf(cube2.collapsed('TMNTH',iris.analysis.MEAN, weights = grid_areas),np.linspace(-100,100)) plt.gca().coastlines() plt.show(block = False) plt.figure() qplt.contourf(cube.collapsed('TMNTH',iris.analysis.MEAN, weights = grid_areas),np.linspace(300,500)) plt.gca().coastlines() plt.show(block = False) ts = cube2.collapsed(['latitude','longitude'],iris.analysis.MEAN, weights = grid_areas) plt.figure() qplt.plot(ts) plt.show() plt.figure() qplt.pcolormesh(cube2[-20]) plt.gca().coastlines() plt.show(block = False) plt.figure() qplt.pcolormesh(cube2[-1]) plt.gca().coastlines() plt.show(block = False) west = -70 east = -10 south = 50
cube = iris.load_cube(file) timeseries1 = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN) ''' Filtering out everything happening on timescales shorter than than X years (where x is called lower_limit_years) ''' lower_limit_years = 10.0 output_cube = cube.copy() output_cube.data = low_pass_filter(cube.data,lower_limit_years) timeseries2 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN) plt.close('all') qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2) qplt.plot(timeseries2 - np.mean(timeseries2.data),'g',alpha = 0.5,linewidth = 2) plt.show(block = True) ''' Filtering out everything happening on timescales longer than than X years (where x is called upper_limit_years) ''' upper_limit_years = 5.0 output_cube = cube.copy() output_cube.data = high_pass_filter(cube.data,upper_limit_years) timeseries3 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN) plt.close('all') qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
def test_1d_positive_down(self): path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc')) cube = iris.load_cube(path) qplt.plot(cube[0, :, 60, 80], cube.coord('depth')) self.check_graphic()
def test_reference_time_units(self): # units should not be displayed for a reference time qplt.plot(self.cube.coord('time'), self.cube) plt.gcf().autofmt_xdate() self.check_graphic()
cs = ax.pcolormesh(lon, lat, c.data, cmap=cm.rscolmap, alpha=0.5) ax.plot(obs['lon'], obs['lat'], 'k*', label='observation') # Note that to avoid dealing with the different indices formats # from the different models (FVCOM, ROMS, ESTOFS etc) # I recommend using the coords from the series output. ax.plot(series.coord(axis='X').points, series.coord(axis='Y').points, 'ro', label='model', alpha=0.5) #ax.set_title(c.attributes['title']) ax.add_feature(states, edgecolor='gray') leg = ax.legend(numpoints=1, loc='upper left') # <codecell> import iris.quickplot as qplt # I hate when I have to "mask manually." series.data = ma.masked_greater(series.data, 999) fig, ax = plt.subplots(figsize=(9, 2.75)) l, = qplt.plot(series) # <codecell> !conda info # <codecell> !conda list
def test_yaxis_labels(self): qplt.plot(self.cube, self.cube.coord('str_coord')) self.assertBoundsTickLabels('yaxis')
# a high number of interpolation points, in this case 1000 of them. altitude_points = [('altitude', np.linspace(400, 1250, 1000))] scheme = iris.analysis.Linear(extrapolation_mode='mask') linear_column = column.interpolate(altitude_points, scheme) # Now interpolate the data onto 10 evenly spaced altitude levels, # as we did in the example. altitude_points = [('altitude', np.linspace(400, 1250, 10))] scheme = iris.analysis.Linear() new_column = column.interpolate(altitude_points, scheme) plt.figure(figsize=(5, 4), dpi=100) # Plot the black markers for the original data. qplt.plot(column, column.coord('altitude'), marker='o', color='black', linestyle='', markersize=3, label='Original values', zorder=2) # Plot the gray line to display the linear interpolation. qplt.plot(linear_column, linear_column.coord('altitude'), color='gray', label='Linear interpolation', zorder=0) # Plot the red markers for the new data. qplt.plot(new_column, new_column.coord('altitude'), marker='D', color='red', linestyle='', label='Interpolated values', zorder=1) ax = plt.gca() # Space the plot such that the labels appear correctly. plt.subplots_adjust(left=0.17, bottom=0.14)
wNH_min = WNH[min_i:min_f,::].collapsed('time',iris.analysis.MEAN) wSH= w.extract(iris.Constraint(latitude = lambda v: SHlati <= v <= SHlatf, atmosphere_hybrid_height_coordinate = lambda h: h <= 20000)) WSH = wSH.collapsed(['latitude'], iris.analysis.MEAN,weights=iris.analysis.cartography.area_weights(wSH)) lsmSH= lsmask.extract(iris.Constraint(latitude = lambda v: SHlati <= v <= SHlatf)) LSMSH = lsmSH.collapsed(['latitude'], iris.analysis.MEAN,weights=iris.analysis.cartography.area_weights(lsmSH)) wSH_max = WSH[max_i:max_f,::].collapsed('time',iris.analysis.MEAN) wSH_min = WSH[min_i:min_f,::].collapsed('time',iris.analysis.MEAN) plt.clf() qplt.contourf(wNH_max,levels=np.linspace(-0.0025,0.0025,51),cmap=plt.cm.seismic,extend='both') qplt.plot(500*LSMNH,linewidth=2,color='k') latstr = str(wNH_max.coord('latitude').points[0]) plt.title(wNH_max.standard_name+' '+latstr+' max forcing') plt.savefig('./figures/w_lat'+latstr+'.pdf') plt.clf() qplt.contourf(wSH_max,levels=np.linspace(-0.0025,0.0025,51),cmap=plt.cm.seismic,extend='both') latstr = str(wSH_max.coord('latitude').points[0]) qplt.plot(500*LSMSH,linewidth=2,color='k') plt.title(wSH_max.standard_name+', lat:'+latstr+', max forcing') plt.savefig('./figures/w_lat'+latstr+'.pdf') # hold[0,i,:,0] = WNH[max_i:max_f,::].collapsed('time',iris.analysis.MEAN).data # hold[0,i,:,1] = TO_forc_meanSH[max_i:max_f,::].collapsed('time',iris.analysis.MEAN).data # hold[0,i,:,1] = TO_forc_meanNH[min_i:min_f,::].collapsed('time',iris.analysis.MEAN).data
def main(): # Load data into three Cubes, one for each set of NetCDF files. e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc')) a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) # load in the global pre-industrial mean temperature, and limit the domain # to the same North American region that e1 and a1b are at. north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60) pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'), north_america) # Generate area-weights array. As e1 and a1b are on the same grid we can # do this just once and re-use. This method requires bounds on lat/lon # coords, so let's add some in sensible locations using the "guess_bounds" # method. e1.coord('latitude').guess_bounds() e1.coord('longitude').guess_bounds() e1_grid_areas = iris.analysis.cartography.area_weights(e1) pre_industrial.coord('latitude').guess_bounds() pre_industrial.coord('longitude').guess_bounds() pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) # Perform the area-weighted mean for each of the datasets using the # computed grid-box areas. pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=pre_grid_areas) e1_mean = e1.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) a1b_mean = a1b.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) # Plot the datasets qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue') qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red') # Draw a horizontal line showing the pre-industrial mean plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', label='pre-industrial', lw=1.5) # Constrain the period 1860-1999 and extract the observed data from a1b constraint = iris.Constraint(time=lambda cell: 1860 <= cell.point.year <= 1999) observed = a1b_mean.extract(constraint) # Assert that this data set is the same as the e1 scenario: # they share data up to the 1999 cut off. assert np.all(np.isclose(observed.data, e1_mean.extract(constraint).data)) # Plot the observed data qplt.plot(observed, label='observed', color='black', lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title('North American mean air temperature', fontsize=18) plt.xlabel('Time / year') plt.grid() iplt.show()
def main(): # Load data into three Cubes, one for each set of NetCDF files. e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc')) a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) # load in the global pre-industrial mean temperature, and limit the domain # to the same North American region that e1 and a1b are at. north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60) pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'), north_america) # Generate area-weights array. As e1 and a1b are on the same grid we can # do this just once and re-use. This method requires bounds on lat/lon # coords, so let's add some in sensible locations using the "guess_bounds" # method. e1.coord('latitude').guess_bounds() e1.coord('longitude').guess_bounds() e1_grid_areas = iris.analysis.cartography.area_weights(e1) pre_industrial.coord('latitude').guess_bounds() pre_industrial.coord('longitude').guess_bounds() pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) # Perform the area-weighted mean for each of the datasets using the # computed grid-box areas. pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=pre_grid_areas) e1_mean = e1.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) a1b_mean = a1b.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) # Show ticks 30 years apart plt.gca().xaxis.set_major_locator(mdates.YearLocator(30)) # Plot the datasets qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue') qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red') # Draw a horizontal line showing the pre-industrial mean plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', label='pre-industrial', lw=1.5) # Constrain the period 1860-1999 and extract the observed data from a1b constraint = iris.Constraint( time=lambda cell: 1860 <= cell.point.year <= 1999) with iris.FUTURE.context(cell_datetime_objects=True): observed = a1b_mean.extract(constraint) # Assert that this data set is the same as the e1 scenario: # they share data up to the 1999 cut off. assert np.all( np.isclose(observed.data, e1_mean.extract(constraint).data)) # Plot the observed data qplt.plot(observed, label='observed', color='black', lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title('North American mean air temperature', fontsize=18) plt.xlabel('Time / year') plt.grid() iplt.show()
from __future__ import (absolute_import, division, print_function) import matplotlib.pyplot as plt import iris import iris.quickplot as qplt fname = iris.sample_data_path('air_temp.pp') temperature = iris.load_cube(fname) # Take a 1d slice using array style indexing. temperature_1d = temperature[5, :] qplt.plot(temperature_1d) plt.show()
def test_not_reference_time_units(self): # units should be displayed for other time coordinates qplt.plot(self.cube.coord('forecast_period'), self.cube) self.check_graphic()
def main(): # Load data into three Cubes, one for each set of PP files e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc')) a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) # load in the global pre-industrial mean temperature, and limit the domain to # the same North American region that e1 and a1b are at. north_america = iris.Constraint( longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60, ) pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'), north_america) # Generate area-weights array. As e1 and a1b are on the same grid we can # do this just once and re-use. # This method requires bounds on lat/lon coords, so first we must guess # these. e1.coord('latitude').guess_bounds() e1.coord('longitude').guess_bounds() e1_grid_areas = iris.analysis.cartography.area_weights(e1) pre_industrial.coord('latitude').guess_bounds() pre_industrial.coord('longitude').guess_bounds() pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) # Now perform an area-weighted collape for each dataset: pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=pre_grid_areas) e1_mean = e1.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) a1b_mean = a1b.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=e1_grid_areas) # Show ticks 30 years apart plt.gca().xaxis.set_major_locator(mdates.YearLocator(30)) # Label the ticks with year data plt.gca().format_xdata = mdates.DateFormatter('%Y') # Plot the datasets qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue') qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red') # Draw a horizontal line showing the pre industrial mean plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', label='pre-industrial', lw=1.5) # Establish where r and t have the same data, i.e. the observations common = np.where(a1b_mean.data == e1_mean.data)[0] observed = a1b_mean[common] # Plot the observed data qplt.plot(observed, label='observed', color='black', lw=1.5) # Add a legend and title plt.legend(loc="upper left") plt.title('North American mean air temperature', fontsize=18) plt.xlabel('Time / year') plt.grid() iplt.show()
"-", "--", "-", "--", "-", "--", "-", "--", "-", "--", "-", "--", "-", "--", ] for i, model in enumerate(loaded_models): line = qplt.plot(monthly_to_yearly(global_mean[i])) plt.setp(line, linestyle=linestyles[i], linewidth=2) plt.show() for j, model in enumerate(loaded_models): line = plt.plot([0, 1], [j + 1, j + 1]) plt.text(1.2, j + 0.8, model, fontsize=12) plt.setp(line, linestyle=linestyles[j], linewidth=2) plt.xlim([0, 2]) plt.ylim([0, 18]) plt.show()
# <codecell> url='http://comt.sura.org/thredds/dodsC/comt_1_archive_full/inundation_tropical/observations/tropical/netcdf/Ike/NOAA/8729108_Panama_City_Ike_WL.nc' # <codecell> cl = iris.load(url) # <codecell> print cl # <codecell> fig, ax = plt.subplots(figsize=(12, 3.5)) qplt.plot(cl[2], label=cl[2].name()) plt.grid() # <headingcell level=2> # You can also convert Iris cube object to a Pandas Series object # <codecell> from iris.pandas import as_cube, as_series, as_data_frame df = as_series(cl[2]) df.head() # <codecell> df.plot(figsize=(12,3.5));