Example #1
0
def save_by_filename(filename1, filename2, cube, saver_fn, iosaver=None):
    """ Saves a cube to two different filenames using iris.save and the save method of the object representing the file type directly"""
    # Save from object direct
    saver_fn(cube, filename1)

    # Call save on iris
    iris.save(cube, filename2, iosaver) # Optional iris.io.find_saver passed in from test
Example #2
0
def generate_cube(tmin = 0, tmax = 11*8760,tsamp = 100,
                xmin = -179, xmax = 179, xsamp = 358,
                ymin = -89, ymax = 89, ysamp = 178,
                zmin = 0, zmax = 7000, zsamp = 7):

    #Create coordinates:
    tco = iris.coords.DimCoord(np.linspace(tmin,tmax,tsamp), standard_name='time', units='hours since 2000-01-01 00:00')
    tco.guess_bounds()

    xco = iris.coords.DimCoord(np.linspace(xmin,xmax,xsamp), standard_name='longitude', units='degree')
    xco.guess_bounds()

    yco = iris.coords.DimCoord(np.linspace(ymin,ymax,ysamp), standard_name='latitude', units='degree')
    yco.guess_bounds()

    zco = iris.coords.DimCoord(np.linspace(zmin,zmax,zsamp), standard_name='height', units='metres')
    zco.guess_bounds()

    #Create cube:
    c1 = iris.cube.Cube(fill_array(tco,xco,yco,zco), standard_name='air_temperature', units='celsius')
    c1.add_dim_coord(tco, 0)
    c1.add_dim_coord(xco, 1)
    c1.add_dim_coord(yco, 2)
    c1.add_dim_coord(zco, 3)

    cubefile = 'cube_generated.nc'
    iris.save(c1,cubefile)
    print("Cube saved in {}".format(cubefile))
Example #3
0
 def test_conflicting_attributes(self):
     # Should be data variable attributes.
     self.cube.attributes['foo'] = 'bar'
     self.cube2.attributes['foo'] = 'orange'
     with self.temp_filename(suffix='.nc') as filename:
         iris.save([self.cube, self.cube2], filename)
         self.assertCDL(filename, ('netcdf', 'netcdf_save_confl_attr.cdl'))
Example #4
0
 def test_uint32_data_netcdf3(self):
     self.cube.data = self.cube.data.astype(np.uint32)
     with self.temp_filename(suffix='.nc') as filename:
         iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
         reloaded = iris.load_cube(filename)
         self.assertCML(reloaded, ('netcdf',
                                   'uint32_data_netcdf3.cml'))
Example #5
0
 def test_no_global_attributes(self):
     # Should all be data variable attributes.
     # Different keys.
     self.cube.attributes['a'] = 'a'
     self.cube2.attributes['b'] = 'a'
     self.cube3.attributes['c'] = 'a'
     self.cube4.attributes['d'] = 'a'
     self.cube5.attributes['e'] = 'a'
     self.cube6.attributes['f'] = 'a'
     # Different values.
     self.cube.attributes['g'] = 'p'
     self.cube2.attributes['g'] = 'q'
     self.cube3.attributes['g'] = 'r'
     self.cube4.attributes['g'] = 's'
     self.cube5.attributes['g'] = 't'
     self.cube6.attributes['g'] = 'u'
     # One different value.
     self.cube.attributes['h'] = 'v'
     self.cube2.attributes['h'] = 'v'
     self.cube3.attributes['h'] = 'v'
     self.cube4.attributes['h'] = 'w'
     self.cube5.attributes['h'] = 'v'
     self.cube6.attributes['h'] = 'v'
     cubes = [self.cube, self.cube2, self.cube3,
              self.cube4, self.cube5, self.cube6]
     with self.temp_filename(suffix='.nc') as filename:
         iris.save(cubes, filename)
         self.assertCDL(filename, ('netcdf',
                                   'netcdf_save_no_global_attr.cdl'))
Example #6
0
    def test_scalar_cube(self):
        cube = stock.realistic_4d()[0, 0, 0, 0]

        with self.temp_filename(suffix='.nc') as filename:
            iris.save(cube, filename, netcdf_format='NETCDF3_CLASSIC')
            self.assertCDL(filename, ('netcdf',
                                      'netcdf_save_realistic_0d.cdl'))
def gen_or_load_2D(filename, data_functions, names, params={}, units='1', **kwargs):
    '''load data from filename if it exists, otherwise generate data and save it
    
    Arguments:
    
     * filename:
         name of file to load from or write to

     * data_functions:
         list of functions to use to create 2D cubes

     * names:
         list of names to give each cube
    
    
    
    '''
    import os
    if not os.path.exists(filename):
        cubes = iris.cube.CubeList()
        for data_function, name in zip(data_functions, names):
            cube = gen_2D_cube_for_testing(data_function, **kwargs)
            cube.long_name = name
            cube.units = units
            cube.attributes.update(params)
            cubes.append(cube)
        iris.save(cubes, filename)    
    else:
        cubes = iris.load_cubes(filename, names)
        for cube in cubes: 
            assert params == register_params(cube.attributes)
            #assert cube.shape == CUBE_SHAPE
        
    return cubes
Example #8
0
    def test_perturbation(self):
        path = tests.get_data_path(('NetCDF', 'global', 'xyt',
                                    'SMALL_hires_wind_u_for_ipcc4.nc'))
        cube = load_cube(path)
        # trim to 1 time and regular lats
        cube = cube[0, 12:144, :]
        crs = iris.coord_systems.GeogCS(6371229)
        cube.coord('latitude').coord_system = crs
        cube.coord('longitude').coord_system = crs
        # add a realization coordinate
        cube.add_aux_coord(iris.coords.DimCoord(points=1,
                                                standard_name='realization',
                                                units='1'))
        with self.temp_filename('testPDT11.GRIB2') as temp_file_path:
            iris.save(cube, temp_file_path)
            # Get a grib_dump of the output file.
            dump_text = check_output(('grib_dump -O -wcount=1 ' +
                                      temp_file_path),
                                     shell=True).decode()

            # Check that various aspects of the saved file are as expected.
            expect_strings = (
                'editionNumber = 2',
                'gridDefinitionTemplateNumber = 0',
                'productDefinitionTemplateNumber = 11',
                'perturbationNumber = 1',
                'typeOfStatisticalProcessing = 0',
                'numberOfForecastsInEnsemble = 255')
            for expect in expect_strings:
                self.assertIn(expect, dump_text)
Example #9
0
 def test_uint64_data_netcdf3(self):
     # Data that cannot be safely cast to int32.
     self.cube.data = self.cube.data.astype(np.uint64)
     self.cube.data[0, 1] = 18446744073709551615
     with self.temp_filename(suffix='.nc') as filename:
         with self.assertRaises(ValueError):
             iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
def aggregate_cubes(cubelistToAggregate, variable_dict, mass_for_mmr,diag_list,out_base,mmflag):
    # give it the TOMCAT name if cannot match to a name in the UKCA dictionary
    if mmflag==1:
        print str(cubelistToAggregate[0].long_name)[:12]
        if str(cubelistToAggregate[0].long_name)[:12]=='Monthly mean':
            getname = str(cubelistToAggregate[0].long_name)
        else:
            getname = (str(cubelistToAggregate[0].long_name))[:-13]
    else:
        getname = (str(cubelistToAggregate[0].long_name))
    ukca_name = variable_dict.get(getname, diag_list.get(getname,getname))
    print str(ukca_name)
    newlist = cubelistToAggregate.concatenate_cube()
    iris.coord_categorisation.add_month(newlist,'time',name='month')
    monthmean1 = newlist.aggregated_by(['month'],iris.analysis.MEAN)
    sf=1
    if monthmean1.units=='ppbv':
        sf=1e-9
    if monthmean1.units=='ppmv':
        sf=1e-6
    if monthmean1.units=='pptv':
        sf=1e-12
    mass_of_molecule = mass_for_mmr.get(getname, 1)
    print str(monthmean1.long_name), mass_of_molecule
    if mass_of_molecule==1:
        monthmean=monthmean1
    else:
        monthmean=monthmean1*(sf*mass_of_molecule/mass_of_air)
        monthmean.units='kg/kg'
        
    monthmean.rename(str(ukca_name))
    iris.save(monthmean,path_out+out_base+str(ukca_name)+'.nc')
Example #11
0
 def test_time_mean(self):
     # This test for time-mean fields also tests negative forecast time.
     source_grib = tests.get_data_path(("GRIB", "time_processed",
                                        "time_bound.grib2"))
     cubes = iris.load(source_grib)
     expect_diffs = {'totalLength': (21232, 21227),
                     'productionStatusOfProcessedData': (0, 255),
                     'scaleFactorOfRadiusOfSphericalEarth': (MDI,
                                                             0),
                     'shapeOfTheEarth': (0, 1),
                     'scaledValueOfRadiusOfSphericalEarth': (MDI,
                                                             6367470),
                     'longitudeOfLastGridPoint': (356249908, 356249809),
                     'latitudeOfLastGridPoint': (-89999938, -89999944),
                     'typeOfGeneratingProcess': (0, 255),
                     'generatingProcessIdentifier': (128, 255),
                     'typeOfTimeIncrement': (2, 255)
                     }
     self.skip_keys.append('stepType')
     self.skip_keys.append('stepTypeInternal')
     with self.temp_filename(suffix='.grib2') as temp_file_path:
         iris.save(cubes, temp_file_path)
         self.assertGribMessageDifference(source_grib, temp_file_path,
                                          expect_diffs, self.skip_keys,
                                          skip_sections=[2])
Example #12
0
    def test_perturbation(self):
        path = tests.get_data_path(('NetCDF', 'global', 'xyt',
                                    'SMALL_hires_wind_u_for_ipcc4.nc'))
        cube = load_cube(path)
        # trim to 1 time and regular lats
        cube = cube[0, 12:144, :]
        crs = iris.coord_systems.GeogCS(6371229)
        cube.coord('latitude').coord_system = crs
        cube.coord('longitude').coord_system = crs
        # add a realization coordinate
        cube.add_aux_coord(iris.coords.DimCoord(points=1,
                                                standard_name='realization',
                                                units='1'))
        with self.temp_filename('testPDT11.GRIB2') as temp_file_path:
            iris.save(cube, temp_file_path)

            # Check that various aspects of the saved file are as expected.
            expect_values = (
                (0, 'editionNumber',  2),
                (3, 'gridDefinitionTemplateNumber', 0),
                (4, 'productDefinitionTemplateNumber', 11),
                (4, 'perturbationNumber', 1),
                (4, 'typeOfStatisticalProcessing', 0),
                (4, 'numberOfForecastsInEnsemble', 255))
            self.assertGribMessageContents(temp_file_path, expect_values)
Example #13
0
    def test_name2_field(self):
        filepath = tests.get_data_path(('NAME', 'NAMEII_field.txt'))
        name_cubes = iris.load(filepath)
        # Check gribapi version, because we currently have a known load/save
        # problem with gribapi 1v14 (at least).
        gribapi_ver = gribapi.grib_get_api_version()
        gribapi_fully_supported_version = \
            (StrictVersion(gribapi.grib_get_api_version()) <
             StrictVersion('1.13'))
        for i, name_cube in enumerate(name_cubes):
            if not gribapi_fully_supported_version:
                data = name_cube.data
                if np.min(data) == np.max(data):
                    msg = ('NAMEII cube #{}, "{}" has empty data : '
                           'SKIPPING test for this cube, as save/load will '
                           'not currently work with gribabi > 1v12.')
                    warnings.warn(msg.format(i, name_cube.name()))
                    continue

            with self.temp_filename('.grib2') as temp_filename:
                iris.save(name_cube, temp_filename)
                grib_cube = iris.load_cube(temp_filename, callback=name_cb)
                self.check_common(name_cube, grib_cube)
                self.assertCML(
                    grib_cube, tests.get_result_path(
                        ('integration', 'name_grib', 'NAMEII',
                         '{}_{}.cml'.format(i, name_cube.name()))))
Example #14
0
 def test_scalar_int32_pressure(self):
     # Make sure we can save a scalar int32 coordinate with unit conversion.
     cube = self._load_basic()
     cube.coord("pressure").points = np.array([200], dtype=np.int32)
     cube.coord("pressure").units = "hPa"
     with self.temp_filename(".grib2") as testfile:
         iris.save(cube, testfile)
Example #15
0
    def test_pp_save_rules(self):
        # Test single process flags
        for _, process_desc in iris.fileformats.pp.LBPROC_PAIRS[1:]:
            # Get basic cube and set process flag manually
            ll_cube = stock.lat_lon_cube()
            ll_cube.attributes["ukmo__process_flags"] = (process_desc,)
    
            # Save cube to pp
            temp_filename = iris.util.create_temp_filename(".pp")
            iris.save(ll_cube, temp_filename)
     
            # Check the lbproc is what we expect
            self.assertEqual(self.lbproc_from_pp(temp_filename),
                             iris.fileformats.pp.lbproc_map[process_desc])

            os.remove(temp_filename)

        # Test mutiple process flags
        multiple_bit_values = ((128, 64), (4096, 1024), (8192, 1024))
        
        # Maps lbproc value to the process flags that should be created
        multiple_map = {sum(bits) : [iris.fileformats.pp.lbproc_map[bit] for bit in bits] for bits in multiple_bit_values}

        for lbproc, descriptions in six.iteritems(multiple_map):
            ll_cube = stock.lat_lon_cube()
            ll_cube.attributes["ukmo__process_flags"] = descriptions
            
            # Save cube to pp
            temp_filename = iris.util.create_temp_filename(".pp")
            iris.save(ll_cube, temp_filename)
            
            # Check the lbproc is what we expect
            self.assertEqual(self.lbproc_from_pp(temp_filename), lbproc)

            os.remove(temp_filename)
Example #16
0
 def test_round_trip(self):
     cube, = iris.load(self.fname)
     with self.temp_filename(suffix='.nc') as filename:
         iris.save(cube, filename, unlimited_dimensions=[])
         round_cube, = iris.load_raw(filename)
         self.assertEqual(len(round_cube.cell_measures()), 1)
         self.assertEqual(round_cube.cell_measures()[0].measure, 'area')
Example #17
0
    def system_test_supported_filetypes(self):
        nx, ny = 60, 60
        dataarray = np.arange(nx * ny, dtype='>f4').reshape(nx, ny)

        laty = np.linspace(0, 59, ny)
        lonx = np.linspace(30, 89, nx)

        horiz_cs = lambda : iris.coord_systems.LatLonCS(
                        iris.coord_systems.SpheroidDatum("spherical", 6371229.0, flattening=0.0, units=iris.unit.Unit('m')),
                        iris.coord_systems.PrimeMeridian(label="Greenwich", value=0.0),
                        iris.coord_systems.GeoPosition(90.0, 0.0), 0.0)

        cm = iris.cube.Cube(data=dataarray, long_name="System test data", units='m s-1')
        cm.add_dim_coord(
            iris.coords.DimCoord(laty, 'latitude', units='degrees',
                                 coord_system=horiz_cs()),
            0)
        cm.add_dim_coord(
            iris.coords.DimCoord(lonx, 'longitude', units='degrees',
                coord_system=horiz_cs()),
            1)
        cm.add_aux_coord(iris.coords.AuxCoord(9, 'forecast_period', units='hours'))
        hours_since_epoch = iris.unit.Unit('hours since epoch', iris.unit.CALENDAR_GREGORIAN)
        cm.add_aux_coord(iris.coords.AuxCoord(3, 'time', units=hours_since_epoch))
        cm.add_aux_coord(iris.coords.AuxCoord(99, long_name='pressure', units='Pa'))
 
        cm.assert_valid()

        for filetype in ('.nc', '.pp' , '.grib2'):
            saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
            iris.save(cm, saved_tmpfile)

            new_cube = iris.load_strict(saved_tmpfile)
        
            self.assertCML(new_cube, ('system', 'supported_filetype_%s.cml' % filetype))
Example #18
0
 def test_default_coord_system(self):
     GeogCS = iris.coord_systems.GeogCS
     cube = iris.tests.stock.lat_lon_cube()
     reference_txt_path = tests.get_result_path(('cube_to_pp',
                                                 'default_coord_system.txt'))
     # Remove all coordinate systems.
     for coord in cube.coords():
         coord.coord_system = None
     # Ensure no coordinate systems available.
     self.assertIsNone(cube.coord_system(GeogCS))
     self.assertIsNone(cube.coord_system(None))
     with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
             temp_pp_path:
         # Save cube to PP with no coordinate system.
         iris.save(cube, temp_pp_path)
         pp_cube = iris.load_cube(temp_pp_path)
         # Ensure saved cube has the default coordinate system.
         self.assertIsInstance(pp_cube.coord_system(GeogCS),
                               iris.coord_systems.GeogCS)
         self.assertIsNotNone(pp_cube.coord_system(None))
         self.assertIsInstance(pp_cube.coord_system(None),
                               iris.coord_systems.GeogCS)
         self.assertIsNotNone(pp_cube.coord_system())
         self.assertIsInstance(pp_cube.coord_system(),
                               iris.coord_systems.GeogCS)
def load_all_steps(pp_name):
    cubes=iris.load(pp_name)
    
    #########################################
    log.info("pp_name="+str(step_file))
    #########################################

    for cube in cubes:
        #capturing stash code from pp file
        stash_code=ukl.get_stash(cube)
        #print stash_code
        stashcodes.append(stash_code)
        #print stashcodes
        if stash_code in vd.variable_reference_stash:
            if not isinstance(cube.long_name,str):
                cube.long_name=vd.variable_reference_stash[stash_code].long_name
                # print 'added long_name',cube.long_name, 'to', stash_code
                if not isinstance(cube._var_name,str):
                    if not vd.variable_reference_stash[stash_code].short_name=='':
                        cube._var_name=vd.variable_reference_stash[stash_code].short_name
                        # print 'added short_name as cube._var_name',cube._var_name, 'to', stash_code
        #########################################
        log.info("cube.long_name= "+str(cube.long_name))
        #########################################
        
        folder_NETCDF=output_files_directory+'All_time_steps/'
        if cube._standard_name:
            saving_name=folder_NETCDF+'All_time_steps_'+stash_code+'_'+cube._standard_name+'.nc'
        elif isinstance(cube.long_name,str):
            saving_name=folder_NETCDF+'All_time_steps_'+stash_code+'_'+cube.long_name+'.nc'
        else:
            saving_name=folder_NETCDF+'All_time_steps_'+stash_code+'.nc'

        iris.save(cube,saving_name, netcdf_format="NETCDF4")
    def f_time_mean(self):
        """Calculate time mean over time domain and save to netcdf.

        Calculate this by a weighted (cube_event_ntimes) mean of the
        cube_event_means.  Hence, each individual time (e.g., day) in the
        original data has equal weighting.

        Create attribute time_mean"""
        # Contribution from first event mean
        ntime_total=0
        ntime=self.cube_event_ntimes[0]
        x1=self.cube_event_means[0]*float(ntime)
        ntime_total+=ntime
        # Contribution from remaining events
        if self.tdomain.nevents>1:
            for ievent in range(1,self.tdomain.nevents):
                ntime=self.cube_event_ntimes[ievent]
                x1+=self.cube_event_means[ievent]*float(ntime)
                ntime_total+=ntime
        # Calculate mean
        time_mean=x1/float(ntime_total)
        time_mean.standard_name=self.name
        self.time_mean=time_mean
        with iris.FUTURE.context(netcdf_no_unlimited=True):
            iris.save(self.time_mean,self.fileout1)
def save_cube(cube):
    """
    Saves cube as a netCDF file.
    """
    saving_name=saving_folder_l1+'L1_'+cube._var_name+'_'+cube.long_name+'.nc'
    iris.save(cube,saving_name, netcdf_format="NETCDF4")
    print 'saved:',cube.long_name
Example #22
0
def main(infile):

    thiscube = iris.load_cube(infile)

    xmin_rp = min(thiscube.coord('grid_longitude').points) - (0.036/2)
    xmax_rp = max(thiscube.coord('grid_longitude').points) + (0.036/2)
    ymin_rp = min(thiscube.coord('grid_latitude').points) - (0.036/2)
    ymax_rp = max(thiscube.coord('grid_latitude').points) + (0.036/2)

    pole_lat = thiscube.coord("grid_latitude").coord_system.grid_north_pole_latitude
    pole_lon = thiscube.coord("grid_latitude").coord_system.grid_north_pole_longitude

    bbox_ll = carto.unrotate_pole(np.array([xmin_rp,xmax_rp]), np.array([ymin_rp, ymax_rp]), pole_lon, pole_lat)

    latpts = np.arange(bbox_ll[1][0], bbox_ll[1][1], float(0.036))
    lonpts = np.arange(bbox_ll[0][0], bbox_ll[0][1], float(0.036))

    latitude = iris.coords.DimCoord(latpts, standard_name='latitude', units='degrees')
    longitude = iris.coords.DimCoord(lonpts, standard_name='longitude', units='degrees')

    ll = cs.GeogCS(semi_major_axis=6378137, semi_minor_axis=6356752.314245)
    llcube = iris.cube.Cube(np.zeros((latpts.size, lonpts.size), np.float32), dim_coords_and_dims=[(latitude, 0), (longitude, 1)])
    llcube.coord_system = ll

    llcube.coord(axis='x').coord_system = ll
    llcube.coord(axis='y').coord_system = ll

    thiscube_ll = iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid(thiscube, llcube)

    outfile = infile.replace('.pp','_ll.nc')

    iris.save(thiscube_ll, outfile)

    return(thiscube_ll)
Example #23
0
    def test_netcdf_hybrid_height(self):
        # Test saving a CF-netCDF file which contains a hybrid height
        # (i.e. dimensionless vertical) coordinate.
        # Read PP input file.
        names = ['air_potential_temperature', 'surface_altitude']
        file_in = tests.get_data_path(
            ('PP', 'COLPEX', 'small_colpex_theta_p_alt.pp'))
        cube = iris.load_cube(file_in, names[0])

        # Write Cube to netCDF file.
        with self.temp_filename(suffix='.nc') as file_out:
            iris.save(cube, file_out)

            # Check the netCDF file against CDL expected output.
            self.assertCDL(file_out,
                           ('netcdf', 'netcdf_save_hybrid_height.cdl'))

            # Read netCDF file.
            cubes = iris.load(file_out)
            cubes_names = [c.name() for c in cubes]
            self.assertEqual(cubes_names, names)

            # Check the PP read, netCDF write, netCDF read mechanism.
            self.assertCML(cubes.extract(names[0])[0],
                           ('netcdf', 'netcdf_save_load_hybrid_height.cml'))
Example #24
0
    def system_test_supported_filetypes(self):
        nx, ny = 60, 60
        dataarray = np.arange(nx * ny, dtype=">f4").reshape(nx, ny)

        laty = np.linspace(0, 59, ny).astype("f8")
        lonx = np.linspace(30, 89, nx).astype("f8")

        horiz_cs = lambda: iris.coord_systems.GeogCS(6371229)

        cm = iris.cube.Cube(data=dataarray, long_name="System test data", units="m s-1")
        cm.add_dim_coord(iris.coords.DimCoord(laty, "latitude", units="degrees", coord_system=horiz_cs()), 0)
        cm.add_dim_coord(iris.coords.DimCoord(lonx, "longitude", units="degrees", coord_system=horiz_cs()), 1)
        cm.add_aux_coord(iris.coords.AuxCoord(np.array([9], "i8"), "forecast_period", units="hours"))
        hours_since_epoch = cf_units.Unit("hours since epoch", cf_units.CALENDAR_GREGORIAN)
        cm.add_aux_coord(iris.coords.AuxCoord(np.array([3], "i8"), "time", units=hours_since_epoch))
        cm.add_aux_coord(iris.coords.AuxCoord(np.array([99], "i8"), long_name="pressure", units="Pa"))

        cm.assert_valid()

        filetypes = (".nc", ".pp")
        if tests.GRIB_AVAILABLE:
            filetypes += (".grib2",)
        for filetype in filetypes:
            saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
            iris.save(cm, saved_tmpfile)

            new_cube = iris.load_cube(saved_tmpfile)

            self.assertCML(new_cube, ("system", "supported_filetype_%s.cml" % filetype))
Example #25
0
 def save_data(self, output_file):
     """
     Save this data object to a given output file
     :param output_file: Output file to save to.
     """
     logging.info('Saving data to %s' % output_file)
     iris.save(self, output_file, local_keys=self._local_attributes)
Example #26
0
 def save_data(self, output_file):
     """
     Save data to a given output file
     :param output_file: File to save to
     """
     logging.info('Saving data to %s' % output_file)
     iris.save(self, output_file)
def loadCube(data_file, topog_file, **kwargs):
    """
    Loads cube and reorders axes into appropriate structure

    The Iris altitude conversion only works on pp files
    at load time, so we need to pull the nc file from
    OpenDAP, save a local temporary pp file and then
    load in with the topography.

    """
    opendapcube = iris.load_cube(data_file, **kwargs)
    tempfilep = os.path.join(tempfile.gettempdir(), "temporary.pp")
    iris.save(opendapcube, tempfilep)
    data, topography = iris.load([tempfilep, topog_file])

    if "altitude" not in [_.name() for _ in data.derived_coords]:
        # raise IOError("Derived altitude coord not present - probelm with topography?")
        print "Derived altitude coord not present - probelm with topography?"

    xdim, = data.coord_dims(data.coords(dim_coords=True, axis="X")[0])
    ydim, = data.coord_dims(data.coords(dim_coords=True, axis="Y")[0])
    zdim, = data.coord_dims(data.coords(dim_coords=True, axis="Z")[0])
    try:
        tdim, = data.coord_dims(data.coords(dim_coords=True, axis="T")[0])
        data.transpose([tdim, xdim, ydim, zdim])
    except IndexError:
        data.transpose([xdim, ydim, zdim])

    return data
Example #28
0
def composite_m48(cube_name, ncfile_path='/home/nicholat/project/pacemaker/ncfiles/',notanom=False):
    """
    Create a 48 month long cube, for use with
    pacemaker 4 yr oscillating runs
    Input: cube name (string), ncfilepath 

    """
    cube = iris.load_cube(ncfile_path+cube_name)
    try:
        cube.coord('t').standard_name='time'
    except:
        pass
    else:
        print "t coord changed to time"

    if notanom:
        cube_rsc = cube
        iris.coord_categorisation.add_month_number(cube_rsc, 'time', 'month_number')
    else:
        cube_rsc = remove_seascyc(cube) 
    cube_m48 = enscyc_ag(cube_rsc)
    cube_m48.long_name = cube.long_name
    if notanom:
        new_name = cube_name[:-2]+'m48.abs.nc'
    else:
        new_name = cube_name[:-2]+'m48.nc'
    iris.save(cube_m48,ncfile_path+new_name)
    return cube_m48, cube_rsc, cube
def getJobs(db):
    try:
        q = tinydb.Query()
        rows = db.search(q.saved is None  q.success == f)


        info = manifest.runnames[file.split("_")[-2]]
        newfiles = []
        variables = info["variables"]

        for variable in info["variables"]:
            print "Ingesting " + variable
            try:
                thisdata = iris.load_cube(file, variable)
                stem, fname = os.path.split(file)
                newname = file.split("_")[-2] + "_" + variable + "_" + fname.split("_")[0] + "_piotr_" + fname.split("_")[-1].replace("grib2", "nc")
                iris.save(thisdata, os.path.join(stem, newname))
                #postJob(newname)
            except iris.exceptions.ConstraintMismatchError:
                print variable + " not found in file " + file + ". continuing"
                cubes = iris.load(file)
                print cubes

    except:
        raise
    finally:
        print "Removing file", file
Example #30
0
    def test_netcdf_multi_nocoord(self):
        # Testing the saving of a cublist with no coords.
        cubes = iris.cube.CubeList([self.cube, self.cube2, self.cube3])
        with self.temp_filename(suffix='.nc') as file_out:
            iris.save(cubes, file_out)

            # Check the netCDF file against CDL expected output.
            self.assertCDL(file_out, ('netcdf', 'netcdf_save_nocoord.cdl'))
s00010 = ['m01s00i010'] # specific humidty
s00024 = ['m01s00i024'] # T
s00025 = ['m01s00i025'] # BL
s00409 = ['m01s00i409'] # P

filename = ['/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/apa.pp/av256a.pa201406*.pp']

cube = iris.load(filename)

cube00002 = cube.extract(iris.AttributeConstraint(STASH=s00002[0]))
cube00003 = cube.extract(iris.AttributeConstraint(STASH=s00003[0]))
cube00010 = cube.extract(iris.AttributeConstraint(STASH=s00010[0]))
cube00024 = cube.extract(iris.AttributeConstraint(STASH=s00024[0]))
cube00025 = cube.extract(iris.AttributeConstraint(STASH=s00025[0]))
cube00409 = cube.extract(iris.AttributeConstraint(STASH=s00409[0]))

#surface34001 = cube34001.extract(iris.Constraint(model_level_number=1))
#surface34996 = cube34996.extract(iris.Constraint(model_level_number=1))

iris.save(cube00002, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/u.pp')
iris.save(cube00003, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/v.pp')
iris.save(cube00010, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/specific_humidty.pp')
iris.save(cube00024, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/T.pp')
iris.save(cube00025, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/BL.pp')
iris.save(cube00409, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/Met/P.pp')

print 'done'



Example #32
0
    20 + 15 * 0.5,
)

# Convert the field to an in=1, out=0 mask
lats = rGrid.coord(axis='Y').points
lons = rGrid.coord(axis='X').points
[lon2d, lat2d] = numpy.meshgrid(lons, lats)
lon2 = lon2d.reshape(-1)  # to 1d for iteration
lat2 = lat2d.reshape(-1)
mask = []
for lat, lon in zip(lat2, lon2):
    this_point = geopandas.GeoSeries([Point(lon, lat)],
                                     crs=yangtze.crs,
                                     index=yangtze.index)
    res = yangtze.geometry.contains(this_point)
    mask.append(res.values[0])

mask = numpy.array(mask).reshape(lon2d.shape)
mask = mask * 1
rGrid.data = mask

rGrid2 = rGrid.copy()
for latI in range(len(lats) - 1):
    for lonI in range(len(lons) - 1):
        rGrid2.data[latI, lonI] = 0
        if rGrid.data[latI, lonI] != rGrid.data[latI, lonI + 1]:
            rGrid2.data[latI, lonI] = 1
        if rGrid.data[latI, lonI] != rGrid.data[latI + 1, lonI]:
            rGrid2.data[latI, lonI] = 1
iris.save(rGrid2, "mask.boundary.nc")
Example #33
0
#iris.save(cube38490, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38490.pp')
#iris.save(cube38491, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38491.pp')
#iris.save(cube38492, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38492.pp')
#iris.save(cube38493, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38493.pp')
#iris.save(cube38494, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38494.pp')
#iris.save(cube38495, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38495.pp')
#iris.save(cube38496, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38496.pp')
#iris.save(cube38497, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38497.pp')
#iris.save(cube38498, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38498.pp')
#iris.save(cube38499, '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/diurnal_cycle/apc pm (kg m-3)/s38499.pp')

# =============================================================================
# apk (kg/kg)
# =============================================================================
iris.save(
    cube34102,
    '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av257/dumped_pp_files/June/s34102.pp'
)
iris.save(
    cube34104,
    '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av257/dumped_pp_files/June/s34104.pp'
)
iris.save(
    cube34105,
    '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av257/dumped_pp_files/June/s34105.pp'
)
iris.save(
    cube34106,
    '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av257/dumped_pp_files/June/s34106.pp'
)
iris.save(
    cube34108,
args = parser.parse_args()

root_dir = "%s/EUSTACE/1.0/" % os.getenv('SCRATCH')

op_dir = "%s/monthly/climatology_%04d_%04d" % (root_dir, args.startyear,
                                               args.endyear)
if not os.path.isdir(op_dir):
    os.makedirs(op_dir)

accum = None
for cyr in range(args.startyear, args.endyear + 1):
    inst = iris.load_cube(
        "%s/monthly/%04d/%02d.nc" % (root_dir, cyr, args.month),
        'air_temperature')
    if accum is None:
        accum = inst
        count = inst.copy()
        count.data.mask = False
        count.data *= 0
        count.data[accum.data.mask == False] += 1
    else:
        accum.data.mask[inst.data.mask == False] = False
        accum.data[accum.data < 0] *= 0
        accum.data = accum.data + inst.data
        count.data[inst.data.mask == False] += 1

accum.data[count.data >= 20] /= count.data[count.data >= 20]
accum.data.mask[count.data < 20] = True

iris.save(accum, "%s/%02d.nc" % (op_dir, args.month), fill_value=-32768)
Example #35
0
    cmip_file = os.path.join(dir_in, 'cmip5_mean_trend_filtered_modelmean_masked_time.nc')
    datadir = '/home/users/mjrobert/hrcm/cache/malcolm/HadISST2/1x1/processing_2018'    
    datafile = os.path.join(datadir, 'hadisst2_monthly_1948-2015_tos_01-12_1x1.nc')

    hadisst2_dir = '/home/users/mjrobert/hrcm/cache/malcolm/HadISST2/1x1/'
    cmip_1x1 = os.path.join(hadisst2_dir, 'cmip5_trend_1x1.nc')

    if not os.path.exists(cmip_1x1):
        trend_025 = iris.load_cube(cmip_file)
        trend_025.coord('longitude').circular = True
        c_ref = iris.load(datafile)[0]
        for coord in ['latitude','longitude']:
            c_ref.coord(coord).guess_bounds()
            trend_025.coord(coord).guess_bounds()
        trend_1x1 = trend_025.regrid(c_ref, iris.analysis.AreaWeighted())
        iris.save(trend_1x1, cmip_1x1)

    trend = iris.load_cube(cmip_1x1)
    icc.add_day_of_year(trend, 'time')
    icc.add_year(trend, 'time')

    hadisst2_files = glob.glob(os.path.join(hadisst2_dir, 'HadISST2_1x1_regrid_sst*'))
    

    '''
    for each year, read in the HadISST2 daily data
    read in the monthly data, dec year-1 to jan year +1
    do time interpolation from one time to the other
    '''
    hadisst_data = iris.load_cube(os.path.join(hadisst2_dir, 'HadISST2_1x1_regrid_sst_2002.nc'))
    icc.add_day_of_year(hadisst_data, 'time')
Example #36
0
 def _save_cube(self, cube):
     descriptor, temp_file = tempfile.mkstemp('.nc')
     os.close(descriptor)
     iris.save(cube, temp_file)
     self.temp_files.append(temp_file)
     return temp_file
Example #37
0
            series = as_series(series)
            status = "Water"

        log.info('[{}] {}'.format(status, obs.name))

    if raw_series:  # Save cube.
        for station, cube in raw_series.items():
            cube = standardize_fill_value(cube)
            cube = add_station(cube, station)
        try:
            cube = iris.cube.CubeList(raw_series.values()).merge_cube()
        except MergeError as e:
            log.warning(e)

        ensure_timeseries(cube)
        iris.save(cube, fname)
        del cube

    log.info('Finished processing [{}]: {}'.format(mod_name, url))

# <markdowncell>

# ### Add extra stations.

# <codecell>

include = dict({
    'Scituate, MA': dict(lon=-70.7166, lat=42.9259),
    'Wells, ME': dict(lon=-70.583883, lat=43.272411)
})
Example #38
0
    for t, time_cube in enumerate(time_coord_cube[0].slices(
        ['pressure', 'grid_latitude', 'grid_longitude'])):

        time_cube_loop_points.append(time_cube.coord('time').points[0])

    time_coord_argsort = np.argsort(np.array(time_cube_loop_points))

    fu = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s%s.pp' % (expmin1, experiment_id,
                                                        experiment_id, diag)

    cube = iris.load_cube(fu)

    cube.remove_coord('time')

    #pdb.set_trace()

    cube.add_dim_coord(
        DimCoord(points=np.array(time_cube_loop_points),
                 long_name='time',
                 standard_name='time',
                 units=time_coord_cube[0].coord('time').units), 0)

    print experiment_id
    fu_2 = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s%s_updated.pp' % (
        expmin1, experiment_id, experiment_id, diag)
    iris.save(cube,
              '%s' % fu_2,
              field_coords=('grid_latitude', 'grid_longitude'))

    #pdb.set_trace
Example #39
0
landmask = ~(ma.make_mask(lsmask.data.copy()) +
             np.zeros(temp_mean.shape)).astype(bool)  # mask sea, show land
linreg_mask = ma.array(linreg_map, mask=landmask)
linreg_mask_nw = ma.array(linreg_map_nw, mask=landmask)
linreg_mask_mw = ma.array(linreg_map_mw, mask=landmask)

#Copy the soil moisture cube then change everything, clean it up, save phi as an netcdf
reg_cube = temp_mean.copy()
reg_cube.data[:] = linreg_mask
reg_cube.long_name = 'Lin Regression'
reg_cube.units = 'no_unit'
reg_cube.attributes['title'] = 'Lin Regression'
reg_cube.attributes['name'] = 'reg'
reg_cube.remove_coord('surface')
reg_cube.remove_coord('time')
iris.save(reg_cube, ncfile_path + 'lreg.4ysl.tsfc.ttropo.nc')

reg_cube_nw = temp_mean.copy()
reg_cube_nw.data[:] = linreg_mask_nw
reg_cube_nw.long_name = 'Lin Regression'
reg_cube_nw.units = 'no_unit'
reg_cube_nw.attributes['title'] = 'Lin Regression'
reg_cube_nw.attributes['name'] = 'reg'
reg_cube_nw.remove_coord('surface')
reg_cube_nw.remove_coord('time')

reg_cube_mw = temp_mean.copy()
reg_cube_mw.data[:] = linreg_mask_mw
reg_cube_mw.long_name = 'Lin Regression'
reg_cube_mw.units = 'no_unit'
reg_cube_mw.attributes['title'] = 'Lin Regression'

     mean_list.extend(iris.cube.CubeList([pc_time_merge]))

 #pdb.set_trace()

 mean = iris.cube.CubeList(mean_list).merge_cube() 

 #mean.coord('grid_latitude').guess_bounds()
 #mean.coord('grid_longitude').guess_bounds()
 #mean.coords('time')[0].guess_bounds()
 #mean.add_dim_coord(DimCoord(points=mean.coords('time')[0].bounds[:,0].flatten(), long_name='time', standard_name='time',\
                        #    units=mean.coords('time')[0].units),0)


 iris.save((mean),'%s%s/%s/%s%s_mean_by_hour.pp' % (pp_file_path, expmin1, experiment_id, experiment_id, diag),\
                            field_coords=('grid_latitude','grid_longitude'))

 try:
    iris.load_cube('%s%s/%s/%s%s_mean_by_hour.pp' % (pp_file_path, expmin1, experiment_id, experiment_id, diag))
 except iris.exceptions.ConstraintMismatchError:

     #pdb.set_trace()

     save_as_cube=iris.cube.Cube(mean.data)
     save_as_cube.add_dim_coord(mean.coord('pressure'),0)
     save_as_cube.add_dim_coord(mean.coord('grid_latitude'),2)
     save_as_cube.add_dim_coord(mean.coord('grid_longitude'),3)      
     save_as_cube.add_dim_coord(mean.coords('time')[0],1)

     iris.save((save_as_cube),'%s%s/%s/%s%s_mean_by_hour.pp'  % (pp_file_path, expmin1, experiment_id, experiment_id, diag)\
                                             ,field_coords=('grid_latitude','grid_longitude'))
Example #41
0
def cell_statistics(input_cubes,
                    track,
                    mask,
                    aggregators,
                    cell,
                    output_path='./',
                    output_name='Profiles',
                    width=10000,
                    z_coord='model_level_number',
                    dimensions=['x', 'y'],
                    **kwargs):
    from iris.cube import Cube, CubeList
    from iris.coords import AuxCoord
    from iris import Constraint, save

    # If input is single cube, turn into cubelist
    if type(input_cubes) is Cube:
        input_cubes = CubeList([input_cubes])

    logging.debug('Start calculating profiles for cell ' + str(cell))
    track_i = track[track['cell'] == cell]

    cubes_profile = {}
    for aggregator in aggregators:
        cubes_profile[aggregator.name()] = CubeList()

    for time_i in track_i['time'].values:
        constraint_time = Constraint(time=time_i)

        mask_i = mask.extract(constraint_time)
        mask_cell_i = mask_cell(mask_i, cell, track_i, masked=False)
        mask_cell_surface_i = mask_cell_surface(mask_i,
                                                cell,
                                                track_i,
                                                masked=False,
                                                z_coord=z_coord)

        x_dim = mask_cell_surface_i.coord_dims('projection_x_coordinate')[0]
        y_dim = mask_cell_surface_i.coord_dims('projection_y_coordinate')[0]
        x_coord = mask_cell_surface_i.coord('projection_x_coordinate')
        y_coord = mask_cell_surface_i.coord('projection_y_coordinate')

        if (mask_cell_surface_i.core_data() > 0).any():
            box_mask_i = get_bounding_box(mask_cell_surface_i.core_data(),
                                          buffer=1)

            box_mask = [[
                x_coord.points[box_mask_i[x_dim][0]],
                x_coord.points[box_mask_i[x_dim][1]]
            ],
                        [
                            y_coord.points[box_mask_i[y_dim][0]],
                            y_coord.points[box_mask_i[y_dim][1]]
                        ]]
        else:
            box_mask = [[np.nan, np.nan], [np.nan, np.nan]]

        x = track_i[track_i['time'].values ==
                    time_i]['projection_x_coordinate'].values[0]
        y = track_i[track_i['time'].values ==
                    time_i]['projection_y_coordinate'].values[0]

        box_slice = [[x - width, x + width], [y - width, y + width]]

        x_min = np.nanmin([box_mask[0][0], box_slice[0][0]])
        x_max = np.nanmax([box_mask[0][1], box_slice[0][1]])
        y_min = np.nanmin([box_mask[1][0], box_slice[1][0]])
        y_max = np.nanmax([box_mask[1][1], box_slice[1][1]])

        constraint_x = Constraint(projection_x_coordinate=lambda cell: int(
            x_min) < cell < int(x_max))
        constraint_y = Constraint(projection_y_coordinate=lambda cell: int(
            y_min) < cell < int(y_max))

        constraint = constraint_time & constraint_x & constraint_y
        #       Mask_cell_surface_i=mask_cell_surface(Mask_w_i,cell,masked=False,z_coord='model_level_number')
        mask_cell_i = mask_cell_i.extract(constraint)
        mask_cell_surface_i = mask_cell_surface_i.extract(constraint)

        input_cubes_i = input_cubes.extract(constraint)
        for cube in input_cubes_i:
            cube_masked = mask_cube_cell(cube, mask_cell_i, cell, track_i)
            for aggregator in aggregators:
                cubes_profile[aggregator.name()].append(
                    cube_masked.collapsed(dimensions, aggregator, **kwargs))

    minutes = (track_i['time_cell'] / pd.Timedelta(minutes=1)).values
    latitude = track_i['latitude'].values
    longitude = track_i['longitude'].values
    minutes_coord = AuxCoord(minutes, long_name='cell_time', units='min')
    latitude_coord = AuxCoord(latitude, long_name='latitude', units='degrees')
    longitude_coord = AuxCoord(longitude,
                               long_name='longitude',
                               units='degrees')

    for aggregator in aggregators:

        cubes_profile[aggregator.name()] = cubes_profile[
            aggregator.name()].merge()
        for cube in cubes_profile[aggregator.name()]:
            for coord in cube.coords():
                if (coord.ndim > 1
                        and (cube.coord_dims(dimensions[0])[0]
                             in cube.coord_dims(coord) or cube.coord_dims(
                                 dimensions[1])[0] in cube.coord_dims(coord))):
                    cube.remove_coord(coord.name())

            cube.add_aux_coord(minutes_coord,
                               data_dims=cube.coord_dims('time'))
            cube.add_aux_coord(latitude_coord,
                               data_dims=cube.coord_dims('time'))
            cube.add_aux_coord(longitude_coord,
                               data_dims=cube.coord_dims('time'))

        os.makedirs(os.path.join(output_path, output_name, aggregator.name()),
                    exist_ok=True)
        savefile = os.path.join(
            output_path, output_name, aggregator.name(), output_name + '_' +
            aggregator.name() + '_' + str(int(cell)) + '.nc')
        save(cubes_profile[aggregator.name()], savefile)
def main(cfg):
    # The config object is a dict of all the metadata from the pre-processor

    # get variable processed
    var = list(extract_variables(cfg).keys())
    assert len(var) == 1
    var = var[0]

    if var == "pr":
        rel_change = True
    else:
        rel_change = False

    # first group datasets by project..
    # this creates a dict of datasets keyed by project (CMIP5, CMIP6 etc.)
    projects = group_metadata(cfg["input_data"].values(), "project")
    # how to uniquely define a dataset varies by project, for CMIP it's simple, just dataset...
    # for CORDEX, combo of dataset and driver (and possibly also domain if we start adding those)
    # also gets more complex if we start adding in different ensembles..

    # This section of the code loads and organises the data to be ready for plotting
    logger.info("Loading data")
    # empty dict to store results
    projections = {}
    model_lists = {}
    cordex_drivers = []
    # loop over projects
    for proj in projects:
        # we now have a list of all the data entries..
        # for CMIPs we can just group metadata again by dataset then work with that..
        models = group_metadata(projects[proj], "dataset")

        # empty dict for results
        if proj == 'non-cordex-rcm':
            proj = 'CORDEX'

        if proj == 'non-cmip5-gcm':
            proj = 'CMIP5'

        if proj not in projections.keys():
            projections[proj] = {}

        proj_key = proj
        # loop over the models
        for m in models:
            if proj == "CORDEX":
                # then we need to go one deeper in the dictionary to deal with driving models
                drivers = group_metadata(models[m], "driver")
                projections[proj][m] = dict.fromkeys(drivers.keys())
                for d in drivers:
                    logging.info(f"Calculating anomalies for {proj} {m} {d}")
                    anoms = get_anomalies(drivers[d], rel_change)
                    if anoms is None:
                        continue
                    projections[proj][m][d] = anoms
                    if proj not in model_lists:
                        model_lists[proj] = []
                    model_lists[proj].append(f"{m} {d}")

                    # fix shorthand driver names
                    if d == 'HadGEM':
                        d = 'MOHC-HadGEM2-ES'
                    elif d == 'MPI':
                        d = 'MPI-M-MPI-ESM-LR'

                    if proj == "CORDEX":
                        cordex_drivers.append(d)
            elif proj == "UKCP18":
                # go deeper to deal with ensembles and datasets
                # split UKCP into seperate GCM and RCM
                proj_key = f"UKCP18 {m}"
                ensembles = group_metadata(models[m], "ensemble")
                projections[proj_key] = dict.fromkeys(ensembles.keys())
                for ens in ensembles:
                    logging.info(f"Calculating anomalies for {proj_key} {ens}")
                    anoms = get_anomalies(ensembles[ens], rel_change)
                    if anoms is None:
                        continue
                    projections[proj_key][ens] = anoms
                    if proj_key not in model_lists:
                        model_lists[proj_key] = []
                    model_lists[proj_key].append(f"{proj_key} {ens}")
            elif "cordex-cpm" in proj:
                # in this case need to split by domain as same model spec
                # is used in multiple domains in some cases
                domains = group_metadata(models[m], "domain")
                proj_key = "cordex-cpm"
                projections[proj_key][m] = dict.fromkeys(domains.keys())
                for dom in domains:
                    logging.info(
                        f"calculating anomalies for  {proj_key} {dom} {m}")
                    anoms = get_anomalies(domains[dom], rel_change)
                    projections[proj_key][m][dom] = anoms
                if proj_key not in model_lists:
                    model_lists[proj_key] = []
                model_lists[proj_key].append(f"{dom} {m}")
            else:
                logging.info(f"Calculating anomalies for {proj} {m}")
                anoms = get_anomalies(models[m], rel_change)
                if anoms is None:
                    continue
                projections[proj][m] = anoms
                if proj not in model_lists:
                    model_lists[proj] = []
                model_lists[proj].append(f"{m}")
        # remove any empty categories (i.e. UKCP18 which has been split into rcm and gcm)
        if projections[proj] == {}:
            del projections[proj]

    cordex_drivers = set(cordex_drivers)

    # create two extra subsets containing CORDEX drivers, and CPM drivers
    projections['CORDEX_drivers'] = {}
    cmip5_driving_models = []
    for m in cordex_drivers:
        cmip5_driving_models.append(remove_institute_from_driver(m))

    for m in projections['CMIP5']:
        if m in cmip5_driving_models:
            projections['CORDEX_drivers'][m] = projections['CMIP5'][m]

    projections['CPM_drivers'] = {}
    for rcm in projections['CORDEX']:
        for d in projections['CORDEX'][rcm]:
            if f'{rcm} {d}' in list(CPM_DRIVERS.values()):
                projections['CPM_drivers'][f'{rcm} {d}'] = projections[
                    'CORDEX'][rcm][d]

    # compute multi model means
    for p in projections:
        mm_mean = compute_multi_model_stats(
            list(NestedDictValues(projections[p])), iris.analysis.MEAN)
        projections[p]['mean'] = mm_mean

    # compute regridded versions for CORDEX and CPMs
    for p in projections:
        grid = None
        if p == 'CORDEX':
            grid = projections['CORDEX_drivers']['mean']
            scheme = 'area_weighted'
        elif p == 'cordex-cpm':
            grid = projections['CPM_drivers']['mean']
            scheme = 'area_weighted'

        if grid:
            src = projections[p]['mean']
            regrid_mean = regrid(src, grid, scheme)
            projections[p]['mean_rg'] = regrid_mean

    # compute regrid diffs
    for p in projections:
        if p == 'CORDEX':
            diff = projections[p]['mean_rg'] - projections['CORDEX_drivers'][
                'mean']
            projections[p]['diff_rg'] = diff
        elif p == 'cordex-cpm':
            diff = projections[p]['mean_rg'] - projections['CPM_drivers'][
                'mean']
            projections[p]['diff_rg'] = diff

    # this section of the code does the plotting..
    # we now have all the projections in the projections dictionary

    # now lets plot them
    # first we need to process the dictionary, and move the data into a list of vectors
    # the projections object is the key one that contains all our data..
    seasons = {0: "DJF", 1: "MAM", 2: "JJA", 3: "SON"}
    logger.info("Plotting")
    extent = (
        cfg["domain"]["start_longitude"] - 2,
        cfg["domain"]["end_longitude"] + 2,
        cfg["domain"]["start_latitude"] - 2,
        cfg["domain"]["end_latitude"] + 2,
    )
    for s in seasons.keys():
        # make directory
        try:
            os.mkdir(f"{cfg['plot_dir']}/{seasons[s]}")
        except FileExistsError:
            pass
        for p in projections:
            pdata = process_projections_dict(projections[p], s)

            for m in pdata:
                # dont plot driving model data twice.
                if '_drivers' in p:
                    if m != 'mean':
                        continue

                title = f"{p} {m} {seasons[s]} {var} change"
                plt.figure(figsize=(12.8, 9.6))
                ax = plt.axes(projection=ccrs.PlateCarree())
                plot_map(pdata[m], extent, var, ax, True)
                plt.title(title)
                logging.info(f'Saving plot for {p} {m} {s}')
                plt.savefig(
                    f"{cfg['plot_dir']}/{seasons[s]}/{p}_{m}_map_{seasons[s]}.png"
                )
                plt.close()

                # save calculated anomaly data, in case we want to work with it later
                # make directory
                try:
                    os.mkdir(f"{cfg['work_dir']}/{seasons[s]}")
                except FileExistsError:
                    pass

                iris.save(
                    pdata[m],
                    f"{cfg['work_dir']}/{seasons[s]}/{p}_{m}_anom_{seasons[s]}.nc"
                )

        # now make panel plots for the mean data
        # only if we have CPM data though
        if 'cordex-cpm' in projections:
            scon = iris.Constraint(season_number=s)
            logging.info(f'Making {seasons[s]} panel plot')
            plt.figure(figsize=(12.8, 9.6))
            # plots should include. All CMIP5, CORDEX drivers, CORDEX, CPM drivers, CPM.
            ax = plt.subplot(331, projection=ccrs.PlateCarree())
            cmesh = plot_map(projections['CMIP5']['mean'].extract(scon),
                             extent, var, ax)
            plt.title('CMIP5')

            ax = plt.subplot(334, projection=ccrs.PlateCarree())
            plot_map(projections['CORDEX_drivers']['mean'].extract(scon),
                     extent, var, ax)
            plt.title('CORDEX driving models')

            ax = plt.subplot(335, projection=ccrs.PlateCarree())
            plot_map(projections['CORDEX']['mean'].extract(scon), extent, var,
                     ax)
            plt.title('CORDEX')

            # plot diff of CORDEX to CMIP
            ax = plt.subplot(336, projection=ccrs.PlateCarree())
            cmesh_diff = plot_map(
                projections['CORDEX']['diff_rg'].extract(scon), extent,
                f'{var}_diff', ax)
            plt.title('CORDEX - CMIP5 diff')

            ax = plt.subplot(337, projection=ccrs.PlateCarree())
            plot_map(projections['CPM_drivers']['mean'].extract(scon), extent,
                     var, ax)
            plt.title('CPM driving models')

            ax = plt.subplot(338, projection=ccrs.PlateCarree())
            plot_map(projections['cordex-cpm']['mean'].extract(scon), extent,
                     var, ax)
            plt.title('CPM')

            # plot diff of CPM to CORDEX
            ax = plt.subplot(339, projection=ccrs.PlateCarree())
            plot_map(projections['cordex-cpm']['diff_rg'].extract(scon),
                     extent, f'{var}_diff', ax)
            plt.title('CPM - CORDEX diff')

            # add legends
            ax = plt.subplot(332)
            ax.axis("off")
            plt.colorbar(cmesh, orientation="horizontal")

            ax = plt.subplot(333)
            ax.axis("off")
            plt.colorbar(cmesh_diff, orientation="horizontal")

            plt.suptitle(f'{seasons[s]} {var} change')
            plt.savefig(
                f"{cfg['plot_dir']}/{seasons[s]}/all_means_map_{seasons[s]}.png"
            )

    # print all datasets used
    print("Input models for plots:")
    for p in model_lists.keys():
        print(f"{p}: {len(model_lists[p])} models")
        print(model_lists[p])
        print("")
Example #43
0
def main(inargs):
    """Run the program."""

    agg_functions = {'mean': iris.analysis.MEAN, 'sum': iris.analysis.SUM}
    metadata_dict = {}

    basin_cube = iris.load_cube(inargs.basin_file, 'region')
    assert basin_cube.data.min() == 11
    assert basin_cube.data.max() == 17
    basin_numbers = numpy.array([11, 12, 13, 14, 15, 16, 17, 18])
    metadata_dict[inargs.basin_file] = basin_cube.attributes['history']

    flag_values = basin_cube.attributes['flag_values'] + ' 18'
    flag_meanings = basin_cube.attributes['flag_meanings'] + ' globe'
    basin_coord = iris.coords.DimCoord(basin_numbers,
                                       standard_name=basin_cube.standard_name,
                                       long_name=basin_cube.long_name,
                                       var_name=basin_cube.var_name,
                                       units=basin_cube.units,
                                       attributes={
                                           'flag_values': flag_values,
                                           'flag_meanings': flag_meanings
                                       })

    if inargs.weights:
        weights_cube = gio.get_ocean_weights(inargs.weights)
        metadata_dict[inargs.weights] = weights_cube.attributes['history']

    output_cubelist = iris.cube.CubeList([])
    for infile in inargs.infiles:
        print(infile)
        if inargs.var == 'ocean_volume':
            cube = gio.get_ocean_weights(infile)
            history = [cube.attributes['history']]
        else:
            cube, history = gio.combine_files(infile, inargs.var, checks=True)
        assert cube.ndim in [3, 4]
        coord_names = [coord.name() for coord in cube.dim_coords]
        if inargs.annual:
            cube = timeseries.convert_to_annual(cube, chunk=inargs.chunk)

        assert basin_cube.shape == cube.shape[-2:]
        basin_array = uconv.broadcast_array(basin_cube.data,
                                            [cube.ndim - 2, cube.ndim - 1],
                                            cube.shape)
        if inargs.weights:
            assert weights_cube.data.shape == cube.shape[-3:]
            if cube.ndim == 4:
                weights_array = uconv.broadcast_array(weights_cube.data,
                                                      [1, 3], cube.shape)
            else:
                weights_array = weights_cube.data
        else:
            weights_array = None

        if cube.ndim == 3:
            outdata = numpy.ma.zeros([cube.shape[0], len(basin_numbers)])
        else:
            outdata = numpy.ma.zeros(
                [cube.shape[0], cube.shape[1],
                 len(basin_numbers)])

        for basin_index, basin_number in enumerate(basin_numbers):
            temp_cube = cube.copy()
            if basin_number == 18:
                temp_cube.data = numpy.ma.masked_where(basin_array == 17,
                                                       temp_cube.data)
            else:
                temp_cube.data = numpy.ma.masked_where(
                    basin_array != basin_number, temp_cube.data)
            if len(coord_names) == cube.ndim:
                horiz_agg = temp_cube.collapsed(coord_names[-2:],
                                                agg_functions[inargs.agg],
                                                weights=weights_array).data
            elif inargs.agg == 'mean':
                horiz_agg = numpy.ma.average(temp_cube.data,
                                             axis=(-2, -1),
                                             weights=weights_array)
            elif inargs.agg == 'sum':
                horiz_agg = numpy.ma.sum(temp_cube.data, axis=(-2, -1))
            if outdata.ndim == 2:
                outdata[:, basin_index] = horiz_agg
            else:
                outdata[:, :, basin_index] = horiz_agg

        coord_list = [(cube.dim_coords[0], 0)]
        if cube.ndim == 4:
            coord_list.append((cube.dim_coords[1], 1))
            coord_list.append((basin_coord, 2))
        else:
            coord_list.append((basin_coord, 1))
        outcube = iris.cube.Cube(outdata,
                                 standard_name=cube.standard_name,
                                 long_name=cube.long_name,
                                 var_name=cube.var_name,
                                 units=cube.units,
                                 attributes=cube.attributes,
                                 dim_coords_and_dims=coord_list)
        output_cubelist.append(outcube)

    equalise_attributes(output_cubelist)
    iris.util.unify_time_units(output_cubelist)
    outcube = output_cubelist.concatenate_cube()
    if history:
        metadata_dict[inargs.infiles[-1]] = history[0]
    outcube.attributes['history'] = cmdprov.new_log(
        infile_history=metadata_dict, git_repo=repo_dir)
    iris.save(outcube, inargs.outfile)
Example #44
0
    def cube_save_test(self,
                       reference_txt_path,
                       reference_cubes=None,
                       reference_pp_path=None,
                       **kwargs):
        """
        A context manager for testing the saving of Cubes to PP files.

        Args:

        * reference_txt_path:
            The path of the file containing the textual PP reference data.

        Kwargs:

        * reference_cubes:
            The cube(s) from which the textual PP reference can be re-built if necessary.
        * reference_pp_path:
            The location of a PP file from which the textual PP reference can be re-built if necessary.
            NB. The "reference_cubes" argument takes precedence over this argument.

        The return value from the context manager is the name of a temporary file
        into which the PP data to be tested should be saved.

        Example::
            with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
                iris.save(cubes, temp_pp_path)

        """
        # Watch out for a missing reference text file
        if not os.path.isfile(reference_txt_path):
            tests.logger.warning('Creating result file: %s',
                                 reference_txt_path)
            if reference_cubes:
                temp_pp_path = iris.util.create_temp_filename(".pp")
                try:
                    iris.save(reference_cubes, temp_pp_path, **kwargs)
                    self._create_reference_txt(reference_txt_path,
                                               temp_pp_path)
                finally:
                    os.remove(temp_pp_path)
            elif reference_pp_path:
                self._create_reference_txt(reference_txt_path,
                                           reference_pp_path)
            else:
                raise ValueError(
                    'Missing all of reference txt file, cubes, and PP path.')

        temp_pp_path = iris.util.create_temp_filename(".pp")
        try:
            # This value is returned to the target of the "with" statement's "as" clause.
            yield temp_pp_path

            # Load deferred data for all of the fields (but don't do anything with it)
            pp_fields = list(iris.fileformats.pp.load(temp_pp_path))
            for pp_field in pp_fields:
                pp_field.data
            with open(reference_txt_path, 'r') as reference_fh:
                reference = ''.join(reference_fh)
            self._assert_str_same(reference + '\n',
                                  str(pp_fields) + '\n',
                                  reference_txt_path,
                                  type_comparison_name='PP files')
        finally:
            os.remove(temp_pp_path)
def ws_units_func(u_cube, v_cube):
    if u_cube.units != getattr(v_cube, 'units', u_cube.units):
        raise ValueError("units do not match")
    return u_cube.units


# test if windspeed file_exists, if not produce it
exists = os.path.isfile(directory_containing_files_to_process +
                        'wind_speed.nc')
if not exists:
    u_cube = iris.load_cube(directory_containing_files_to_process + 'uas.nc')
    v_cube = iris.load_cube(directory_containing_files_to_process + 'vas.nc')
    ws_ifunc = iris.analysis.maths.IFunc(ws_data_func, ws_units_func)
    ws_cube = ws_ifunc(u_cube, v_cube, new_name='wind speed')
    iris.save(ws_cube, directory_containing_files_to_process + 'wind_speed.nc')

for year in range(start_year, end_year + 1):
    print 'processing year ', year
    cubes = []
    cube_data = []
    for k in range(len(input_variables)):
        single_input_variable = input_variables[k]
        print 'loading data for ' + single_input_variable
        cube = iris.load_cube(directory_containing_files_to_process +
                              single_input_variable + '.nc')
        try:
            cube = cube.collapsed('air_pressure', iris.analysis.MEAN)
        except:
            pass
        #flip latitudes so mono' increasing for regridding
Example #46
0
    # This loops over the years and months of interest.
    # Wet season in Zambia is Oct to Mar so only months 1,2,3,10,11,12 included

    for yr in (1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007):
        # for yr in (2003,2004):
        # for months 1 to 3
        for mn in ('01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'):
            cp4_data = '/data/users/sburgan/CP4A/precip/u-' + stash + '/' + stash + 'a.pa' + str(yr) + str(mn) + '*.pp'
            cp4_data_check = '/data/users/sburgan/CP4A/precip/u-' + stash + '/' + stash + 'a.pa' + str(yr) + str(
                mn) + '01_00.pp'
            if (path.exists(cp4_data_check) == True):
                cp4_cubelist = load_CP4_cubelist(cp4_data, 'longitude', 'latitude')
                # mask cube with shapefile
                # add together rain and snow (probably not necessary for Zambia but just in case)
                cp4_cube = convec_2_precipflux(cp4_cubelist)[0]
                # this narrows down even further to the Lusaka area
                # some experimentation needed to find the right area for other analyses
                # cp4_small = cp4_cube[:,34:64,17:64]                                               #Scott - Blanked out for now as already narrowed down

                # calculate daily means
                # print statement can be removed after running once to check cube metadata looks right
                cp4_cube_daily = cp4_cube.aggregated_by('day_of_month', iris.analysis.MEAN)
                print(cp4_cube_daily)

                # save out the daily files
                # for months 1-3
                iris.save(cp4_cube_daily, data_dir + '' + region + '_CP4A_daily_precip_' + str(yr) + str(mn) + '.pp')



Example #47
0
mslp = iris.load_cube(files, 'air_pressure_at_sea_level')  ## MSLP
mslp_mn = means(mslp,
                name='MSLP',
                start_time=start_time,
                end_time=end_time,
                verbose=verbose)

## remove the global-mean from the all values -- first we need to find it...
gm = filter(lambda x: x.name() == u'MSLP_GLOBAL', mslp_mn)
mslp_delta = copy.deepcopy(
    mslp_mn)  # deep copy the mean mslp as we modify delta
for i in range(len(mslp_delta)):  # iterate over indices here
    mslp_delta[i].data = mslp_mn[i].data - gm[0].data
    mslp_delta[i].rename(mslp_mn[i].name() + "_DGM")  # rename new version

## then keep only the NHX and TROPICS data
mslp_short = filter(
    lambda x: (x.name() == u'MSLP_NHX_DGM' or x.name() == u'MSLP_TROPICS_DGM'),
    mslp_delta)
output_data.extend(mslp_short)  # add it to the list to be appended

if (verbose):  # print out the summary data for all created values
    for cube in output_data:
        print cube.name(), "=", cube.data
        if (verbose > 1):
            print cube
            print "============================================================"

## now to write the data
iris.save(output_data, output_file)
Example #48
0
def calc_CDD(incube, outfile):
    iris.coord_categorisation.add_day_of_month(incube,
                                               'time',
                                               name='day_of_month')
    iris.coord_categorisation.add_year(incube, 'time', name='year')
    incube.convert_units('kg m-2 day-1')
    #year0 = incube.coord('year').points[0]
    # creates a cube for putting data into
    outcube = incube.aggregated_by('year', iris.analysis.MEAN)
    #the rainfall threshold
    total = 1.0
    # first bring in the data
    # this code will need to know the starting day of the data
    for yr in incube.coord('year').points:
        #pdb.set_trace()
        incube_yr = incube.extract(iris.Constraint(year=yr))

        strt_day = incube_yr.coord('day_of_month').points[0]
        yeardata = incube_yr.data
        dtes = []
        duration = []
        pltdtes = np.zeros((yeardata.shape[1], yeardata.shape[2]), dtype=int)
        pltdur = np.zeros((yeardata.shape[1], yeardata.shape[2]), dtype=int)
        for x in range(0, yeardata.shape[2]):
            for y in range(0, yeardata.shape[1]):
                current_max = 0
                current_dte = 0
                dte = 0
                duratn = 0
                for t in range(0, yeardata.shape[0]):
                    if yeardata[t, y, x] <= float(total):
                        dte = t
                        print dte, yeardata[t, y, x]
                        for t1 in xrange(t, yeardata.shape[0]):
                            if yeardata[t1, y, x] <= float(total):
                                continue
                            else:
                                duratn = t1 - t
                                if duratn > current_max:
                                    current_dte = dte + strt_day
                                    current_max = duratn
                            break
            dtes.extend([current_dte])
            duration.extend([current_max])
            pltdtes[y, x] = current_dte
            pltdur[y, x] = current_max

        #                else:
        #                  continue
        #                break
        #print np.max(duration[:])
        #print np.max(dtes[:])
        #   plt.clf()
        plt.scatter(duration[:], dtes[:])
        plt.show()
        ax1 = plt.figure(figsize=(12, 12))
        plt.subplot(2, 1, 1)
        plt.contourf(pltdtes[:])
        #cb2.set_label('Date of longest dry spell (after onset)')

        plt.subplot(2, 1, 2)
        plt.contourf(pltdur[:])
        plt2_ax = ax1.gca()
        #cb2.set_label('Duration of first dry spell')
        plt.subplots_adjust(hspace=0.5)
        # plt.savefig('TRMM_CDD_'+str(yyyy)+'_limit'+str(total)+'mm_per_day.png')
        plt.show()
        #outcube[year,:].data = dtes[:]
        outcube[yr, :, :].data = duration

        #   np.savetxt('CDD_date_'+str(yyyy)+'.csv', pltdtes[:], delimiter = ',')
        #   np.savetxt('CDD_duration_'+str(yyyy)+'.csv', pltdur[:], delimiter = ',')
        return (pltdtes, pltdur)

    iris.save(outcube, outfile)
 def setUp(self):
     """setting up test dir and test files"""
     self.test_dir = './anciltest/'
     self.stage = self.test_dir + 'stage.nc'
     os.mkdir(self.test_dir)
     save(_make_test_cube('stage test'), self.stage)
def fix_sst_under_ice(dir_in, filename_sice_func_sst_month_pickle, year,
                      months, year_last_real_data, sst_fixed_year):

    fnames_sst = sorted(
        glob.glob(os.path.join(dir_in, 'tos*' + year + '0101-*.nc')))
    fnames_ice = sorted(
        glob.glob(os.path.join(dir_in, 'siconc*' + year + '0101-*.nc')))
    fout_year = sst_fixed_year

    if os.path.exists(fout_year):
        return

    year_min = int(os.path.basename(fnames_sst[0]).split('_')[-1][0:4])
    NYEARS = len(fnames_sst)
    print 'NYEARS ', NYEARS

    cube_mon = iris.cube.CubeList()

    # read calculated relationship
    fh = open(filename_sice_func_sst_month_pickle, 'r')
    mean_freq_sice = pickle.load(fh)
    fh.close()

    mean_freq_sice_daily = sic_functions.interpolate_histogram(
        year, mean_freq_sice)

    for f_sst, f_ice in zip(fnames_sst, fnames_ice):
        year = f_sst.split('_')[-1][0:4]
        year1 = f_ice.split('_')[-1][0:4]
        iy = int(year) - year_min
        print 'process ', f_sst, year, iy

        if year != year1:
            raise Exception('Paired SST and sea-ice years not same ' + year +
                            ' ' + year1)
        sst = iris.load_cube(f_sst)
        ice = iris.load_cube(f_ice)
        icc.add_month_number(sst, 'time', name='month')
        icc.add_month_number(ice, 'time', name='month')

        if os.path.exists(fout_year):
            continue

        files_fixed = []
        for im in months:
            print 'processing month ', im
            month = im + 1
            month_no = iris.Constraint(
                coord_values={'month': lambda l: l == month})
            sst_mon = sst.extract(month_no)
            ice_mon = ice.extract(month_no)

            print('calc the fixed SST under siconc ')
            sst_fixed = fix_sst_based_on_siconc(sst_mon, ice_mon,
                                                mean_freq_sice_daily, im)
            fout = fout_year[:-3] + '_' + str(month).zfill(2) + '.nc'
            iris.save(sst_fixed, fout, unlimited_dimensions=['time'])
            print('saved file ', fout)
            files_fixed.append(fout)

        cmd = 'ncrcat -O ' + ' '.join(files_fixed) + ' ' + fout_year
        print 'cmd ', cmd
        os.system(cmd)
        for f in files_fixed:
            os.remove(f)
            print '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s_on_p_levs.pp' % (expmin1, experiment_id, experiment_id, dm)
            print e
            pass 


        with h5py.File(fname, 'r') as f:

            for t in time_coord_argsort:
                
                save_as_cube=iris.cube.Cube(f['%s' % ds][t])
                save_as_cube.add_dim_coord((info_cube2.coord('grid_latitude')),0)
                save_as_cube.add_dim_coord((info_cube2.coord('grid_longitude')),1)
                save_as_cube.add_dim_coord(DimCoord(points=p_levels, long_name='pressure', units='hPa'),2)
                save_as_cube.add_aux_coord(DimCoord(points=time_cube_loop_points[t], long_name='time', standard_name='time', units=info_cube.coord('time').units))
                
                if dm=='temp':
                    save_as_cube.rename('potential_temperature')
                    save_as_cube.units=iris.unit.Unit('K')
                if dm=='sp_hum':
                    save_as_cube.rename('specific_humidity')
                    save_as_cube.units=iris.unit.Unit('kg kg-1')
                if dm=='408':
                    save_as_cube.rename('height')
                    save_as_cube.units=iris.unit.Unit('m')

                iris.save(save_as_cube, '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s_on_p_levs.pp' 
                                                      % (expmin1, experiment_id, experiment_id, dm), field_coords=('grid_latitude','grid_longitude'), append=True) 

 except Exception:
  PrintException()     
    # get list of model runs
    temp_list_trim = [x for x in temp_list if x is not None]
    print(len(temp_list_trim))

    # if length only 1 save as new netcdf
    if len(temp_list_trim) == 1:
        ensemble_mean = temp_list_trim[0]
        ensemble_mean.standard_name = temp_list_trim[0].standard_name
        ensemble_mean.attributes = temp_list_trim[0].attributes
        dates = get_dates(ensemble_mean, verbose=False)
        outpath = (fpath + '/test_ensemble_mean_historical_' + model + '_' +
                   var + '_' + str(dates[0].year) +
                   str(dates[0].month).zfill(2) + '_' + str(dates[-1].year) +
                   str(dates[-1].month).zfill(2) + '.nc')
        print(outpath)
        iris.save(ensemble_mean, outpath)
        continue

    else:
        # if multiple runs calculate mean of runs
        n = len(temp_list_trim)
        print(n)
        equalise_attributes(temp_list_trim)
        unify_time_units(temp_list_trim)
        if n == 2:
            ensemble_mean = (temp_list_trim[0] + temp_list_trim[1]) / n

        if n == 3:
            ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
                             temp_list_trim[2]) / n
        if n == 4:
Example #53
0
 def test_save(self):
     with self.temp_filename(suffix=".nc") as filename:
         iris.save(self.cube, filename)
         self.assertCDL(filename)
                       )  # this reads in all timesteps of EXTINCTION AT 550!!!
extinction = extinction.concatenate_cube()  # this is dodge (sorry)

#site1 = extinction[:,0:85] # site 1 is all time steps but the first 85 of the second dimension.
#site1.remove_coord('model_level_number')

dir_out = '/nfs/see-fs-01_users/gy11s2s/Python/Layers_over_time_analysis/model_extinction_sites/'  # set this

n = 0  # initialize n

for i in range(0, 16):
    # loop through 16 times

    site = extinction[:, n:n + 85]

    site.remove_coord(
        'model_level_number')  # get rid of old metadata incase it's dodge
    site.remove_coord('level_height')
    site.remove_coord('sigma')

    site.add_dim_coord(
        level_number_coord, 1
    )  # add new model level number coordinate to the second dimension in your cube

    site.rename('Extinction_550nm_site_' +
                str(i))  # rename cube with correct site number

    iris.save(site, dir_out + site.name() + '.nc')  # save it as a netcdf file

    n += 85  # add 85 to n counter
Example #55
0
 def save_fieldcubes(self, cubes, basename=''):
     # Save cubes to a temporary file, and return its filepath.
     file_path = self._temp_filepath(user_name=basename, suffix='.pp')
     iris.save(cubes, file_path)
     return file_path
Example #56
0
 def test_scalar_cube_save_load(self):
     cube = iris.cube.Cube(1, long_name="scalar_cube")
     with self.temp_filename(suffix=".nc") as fout:
         iris.save(cube, fout)
         scalar_cube = iris.load_cube(fout)
         self.assertEqual(scalar_cube.name(), "scalar_cube")
Example #57
0
    results = []
    for level, stats in data_levels.items():
        print('for level %s we have %s slices' % (level, stats['count']))
        results.append(stats['cube'] / stats['count'])
        
    return results


# In[8]:

# Run this thing....
start_time = 
files = [os.path.join(dir_path, f) for f in os.listdir(dir_path)]
print("%d files to process in dir %s" % (len(files), dir_path))

avgs = average_for_month_and_region(month,
                                    regions[region_name],
                                    'm01s16i203',
                                    files[:10])

for c in avgs:
    name = "ava_tem_plevel_%d_%s.nc" % (c.coord('pressure').points[0], region_name)
    iris.save(c, name)
    print("Saved %s" % name)


# In[ ]:



Example #58
0
    def _test_file(self, name):
        """This is the main test routine that is called for each of the files listed below."""
        pp_path = self._src_pp_path(name)

        # 1) Load the PP and check the Cube
        callback_name = 'callback_' + name.replace('.', '_')
        callback = globals().get(callback_name)
        cubes = iris.load(pp_path, callback=callback)

        if name.endswith('.pp'):
            fname_name = name[:-3]
        else:
            fname_name = name

        self.assertCML(cubes, self._ref_dir + ('from_pp', fname_name + '.cml',))

        # 2) Save the Cube and check the netCDF
        nc_filenames = []

        for index, cube in enumerate(cubes):
            # Write Cube to netCDF file - must be NETCDF3_CLASSIC format for the cfchecker.
            file_nc = os.path.join(os.path.sep, 'var', 'tmp', '%s_%d.nc' % (fname_name, index))
            #file_nc = tests.get_result_path(self._ref_dir + ('to_netcdf', '%s_%d.nc' % (fname_name, index)))
            iris.save(cube, file_nc, netcdf_format='NETCDF3_CLASSIC')

            # Check the netCDF file against CDL expected output.
            self.assertCDL(file_nc, self._ref_dir + ('to_netcdf', '%s_%d.cdl' % (fname_name, index)))
            nc_filenames.append(file_nc)

            # Perform CF-netCDF conformance checking.
            with open('/dev/null', 'w') as dev_null:
                try:
                    # Check for the availability of the "cfchecker" application
                    subprocess.check_call(['which', 'cfchecker'], stderr=dev_null, stdout=dev_null)
                except subprocess.CalledProcessError:
                    warnings.warn('CF-netCDF "cfchecker" application not available. Skipping CF-netCDF compliance checking.')
                else:
                    file_checker = os.path.join(os.path.dirname(file_nc), '%s_%d.txt' % (fname_name, index))

                    with open(file_checker, 'w') as report:
                        # Generate cfchecker text report on the file.
                        # Don't use check_call() here, as cfchecker returns a non-zero status code
                        # for any non-compliant file, causing check_call() to raise an exception.
                        subprocess.call(['cfchecker', file_nc], stderr=report, stdout=report)

                    if not os.path.isfile(file_checker):
                        os.remove(file_nc)
                        self.fail('Failed to process %r with cfchecker' % file_nc)

                    with open(file_checker, 'r') as report:
                        # Get the cfchecker report and purge unwanted lines.
                        checker_report = ''.join([line for line in report.readlines() if not line.startswith('Using')])

                    os.remove(file_checker)
                    self.assertString(checker_report, self._ref_dir + ('to_netcdf', 'cf_checker', '%s_%d.txt' % (fname_name, index)))

        # 3) Load the netCDF and check the Cube
        for index, nc_filename in enumerate(nc_filenames):
            # Read netCDF to Cube.
            cube = iris.load_cube(nc_filename)
            self.assertCML(cube, self._ref_dir + ('from_netcdf', '%s_%d.cml' % (fname_name, index)))
            os.remove(nc_filename)

        # 4) Save the Cube and check the PP
        # Only the first four files pass their tests at the moment.

        if name in self.files_to_check[:4]:
            self._test_pp_save(cubes, name)
Example #59
0
 def _test_pp_save(self, cubes, name):
     # If there's no existing reference file then make it from the *source* data
     reference_txt_path = tests.get_result_path(self._ref_dir + ('to_pp', name + '.txt'))
     reference_pp_path = self._src_pp_path(name)
     with self.cube_save_test(reference_txt_path, reference_pp_path=reference_pp_path) as temp_pp_path:
         iris.save(cubes, temp_pp_path)
Example #60
0
        iris.Constraint(pseudo_level=3)
        & iris.AttributeConstraint(STASH='m01s02i244') & tconstr,
        iris.Constraint(pseudo_level=3)
        & iris.AttributeConstraint(STASH='m01s02i245') & tconstr
    ])

# make cube to store total AOD
aodsum = aod[0].copy()
# add-up components
aodsum.data = aod[0].data + aod[1].data + aod[2].data + aod[3].data + aod[
    4].data + aod[5].data + aod[6].data

# make cube to store total AAOD
aaodsum = aaod[0].copy()
# add-up components
aaodsum.data = aaod[0].data + aaod[1].data + aaod[2].data + aaod[
    3].data + aaod[4].data + aaod[5].data + aaod[6].data

# calculate single-scattering albedo
ssa = aodsum.copy()
ssa.data = 1.0 - (aaodsum.data / aodsum.data)

# rename
ssa.rename('single_scattering_albedo_in_air_due_to_ambient_aerosol_particles')

# remove unlimited dimension when writing to netCDF
iris.FUTURE.netcdf_no_unlimited = True

# output to netCDF
iris.save(ssa, 'Task103_SSA.nc', netcdf_format='NETCDF3_CLASSIC')