Example #1
0
def load_from_dir(directory, filetype, constraint=None):
    """
    Loads a set of cubes from a given directory, single cubes are loaded
    and returned as a CubeList.

    Args:
        directory: a chosen directory
        to operate on. directory MUST start and end with forward
        slashes.

        filetype (optional): a string specifying the expected type
        Of files found in the dataset.

        constraints (optional): a string specifying any constraints
        You wish to load the dataset with.

    Returns:
        iris.cube.CubeList(loaded_cubes), a CubeList of the loaded
        Cubes.
    """
    if constraint is None:
        loaded_cubes = []
        cube_files = []
        directory = _parse_directory(directory)
        cube_paths = glob.glob(directory + '*' + filetype)
        for path in cube_paths:
            try:
                loaded_cubes.append(iris.load_cube(path))
                cube_files.append(path)
            except (MergeError, ConstraintMismatchError):
                for cube in iris.load_raw(path):
                    if isinstance(cube.standard_name, str):
                        loaded_cubes.append(cube)
                        cube_files.append(path)
        loaded_cubes.sort(key=sort_by_earliest_date)
        cube_files.sort(key=file_sort_by_earliest_date)
        return loaded_cubes, cube_files
    else:
        loaded_cubes = []
        cube_files = []
        directory = _parse_directory(directory)
        cube_paths = glob.glob(directory + '*' + filetype)
        if not _constraint_compatible(constraint, iris.load_cube(
                cube_paths[0])):
            constraint = _fix_partial_datetime(constraint)
        for path in cube_paths:
            try:
                loaded_cubes.append(iris.load_cube(path, constraint))
                cube_files.append(path)
            except (MergeError, ConstraintMismatchError):
                for cube in iris.load_raw(path, constraint):
                    if isinstance(cube.standard_name, str):
                        loaded_cubes.append(cube)
                        cube_files.append(path)
        loaded_cubes.sort(key=sort_by_earliest_date)
        cube_files.sort(key=file_sort_by_earliest_date)
        return loaded_cubes, cube_files
Example #2
0
 def test_concatenate_cell_measure_match(self):
     cube1, = iris.load_raw(self.fname)
     cube1 = cube1[:, :, 0, 0]
     cm_and_dims = cube1._cell_measures_and_dims
     cube2, = iris.load_raw(self.fname)
     cube2 = cube2[:, :, 0, 0]
     cube2.coord('time').points = cube2.coord('time').points + 1
     cubes = CubeList([cube1, cube2]).concatenate()
     self.assertEqual(cubes[0]._cell_measures_and_dims, cm_and_dims)
     self.assertEqual(len(cubes), 1)
Example #3
0
 def test_concatenate_cell_measure_match(self):
     cube1, = iris.load_raw(self.fname)
     cube1 = cube1[:, :, 0, 0]
     cm_and_dims = cube1._cell_measures_and_dims
     cube2, = iris.load_raw(self.fname)
     cube2 = cube2[:, :, 0, 0]
     cube2.coord('time').points = cube2.coord('time').points + 1
     cubes = CubeList([cube1, cube2]).concatenate()
     self.assertEqual(cubes[0]._cell_measures_and_dims, cm_and_dims)
     self.assertEqual(len(cubes), 1)
Example #4
0
 def test_concatenate_cell_measure_aware(self):
     (cube1, ) = iris.load_raw(self.fname)
     cube1 = cube1[:, :, 0, 0]
     cm_and_dims = cube1._cell_measures_and_dims
     (cube2, ) = iris.load_raw(self.fname)
     cube2 = cube2[:, :, 0, 0]
     cube2._cell_measures_and_dims[0][0].var_name = "not_areat"
     cube2.coord("time").points = cube2.coord("time").points + 1
     cubes = CubeList([cube1, cube2]).concatenate()
     self.assertEqual(cubes[0]._cell_measures_and_dims, cm_and_dims)
     self.assertEqual(len(cubes), 2)
Example #5
0
def load_files(date, forecast_time, src_path=None):
    '''
    Load the files with the chosen forecast time as an iris cube and select the desired
    wavelength. The files are merged along time when they are loaded.
    
    Parameters:
    date: (str) The date of the data to load in format 'YYYYMMDD'.
    forecast_time: (int) The number of hours ahead for the forecast.
        Possible choices: 0, 3, 6, 9, 12, 15, 18, 21, 24.
    src_path: (Optional) (str) The file path containing the extracted forecast files.
        Default: '/scratch/{USER}/aeroct/global-nwp/'
    '''
    if src_path is None:
        src_path = scratch_path

    if np.isin(np.arange(0, 166, 3), forecast_time).any():
        forecast_time_str = str(forecast_time + 3).zfill(3)
    else:
        raise ValueError(
            'Invalid forecast_time. It must be a multiple of 3 between 0 \
                          and 165.')

    # Get the dates of the two files containing data during 'date'
    days_before = int((forecast_time - 6) / 24)
    date1 = datetime.strptime(date,
                              '%Y%m%d') - timedelta(days=(days_before + 1))
    date2 = datetime.strptime(date, '%Y%m%d') - timedelta(days=days_before)
    date1 = date1.strftime('%Y%m%d')
    date2 = date2.strftime('%Y%m%d')

    # This loads files from src_path with filenames containing '*YYYYMMDD*_###.*' where
    # ### is the forecast time plus 3, ie. 003 is the analysis time.
    aod_cube1 = iris.load_raw('{0}*{1}*_{2}.*'.format(src_path, date1,
                                                      forecast_time_str))
    aod_cube2 = iris.load_raw('{0}*{1}*_{2}.*'.format(src_path, date2,
                                                      forecast_time_str))

    # Equalise attributes to solve issues with merging (eg. mismatching UM version)
    equalise_attributes(aod_cube1)
    equalise_attributes(aod_cube2)
    aod_cube1 = aod_cube1.merge_cube()
    aod_cube2 = aod_cube2.merge_cube()

    # Concatenate the two days into a single cube
    cube_list = iris.cube.CubeList([aod_cube1, aod_cube2])
    for cube in cube_list:
        cube.remove_coord('forecast_period')
        forecast_coord = iris.coords.DimCoord(forecast_time,
                                              units='hours',
                                              standard_name='forecast_period')
        cube.add_aux_coord(forecast_coord)
    equalise_attributes(cube_list)
    aod_cube = cube_list.concatenate_cube()
    return aod_cube
Example #6
0
def load_from_filelist(paths, filetype, constraint=None):
    """
    Loads the specified files. Individual files are
    returned in a
    CubeList.

    Args:
        paths: a chosen list of filenames to operate on.

        filetype (optional): a string specifying the expected type
        Of files found in the dataset

        constraints (optional): a string, iterable of strings or an
        iris.Constraint specifying any constraints you wish to load
        the dataset with.

    Returns:
        iris.cube.CubeList(loaded_cubes), a CubeList of the loaded
        Cubes.
    """
    loaded_cubes = []
    cube_files = []
    for filename in paths:
        if not filename.endswith(filetype):
            paths.remove(filename)

    for filename in paths:
        if constraint is None:
            try:
                loaded_cubes.append(iris.load_cube(filename))
                cube_files.append(filename)
            except (MergeError, ConstraintMismatchError):
                for cube in iris.load_raw(filename):
                    if isinstance(cube.standard_name, str):
                        loaded_cubes.append(cube)
                        cube_files.append(filename)

        else:
            if not _constraint_compatible(constraint, iris.load_cube(
                    paths[0])):
                constraint = _fix_partial_datetime(constraint)
            try:
                loaded_cubes.append(iris.load_cube(filename, constraint))
                cube_files.append(filename)
            except (MergeError, ConstraintMismatchError):
                for cube in iris.load_raw(filename, constraint):
                    if isinstance(cube.standard_name, str):
                        loaded_cubes.append(iris.load_raw(
                            filename, constraint))
                        cube_files.append(filename)
    loaded_cubes.sort(key=sort_by_earliest_date)
    cube_files.sort(key=file_sort_by_earliest_date)
    return loaded_cubes, cube_files
def get_cube(url, name_list, bbox=None, time=None, units=None, callback=None,
             constraint=None):
    """Only `url` and `name_list` are mandatory.  The kw args are:
    `bbox`, `callback`, `time`, `units`, `constraint`."""

    cubes = iris.load_raw(url, callback=callback)

    in_list = lambda cube: cube.standard_name in name_list
    cubes = CubeList([cube for cube in cubes if in_list(cube)])
    if not cubes:
        raise ValueError('Cube does not contain {!r}'.format(name_list))
    else:
        cube = cubes.merge_cube()

    if constraint:
        cube = cube.extract(constraint)
        if not cube:
            raise ValueError('No cube using {!r}'.format(constraint))
    if bbox:
        cube = subset(cube, bbox)
        if not cube:
            raise ValueError('No cube using {!r}'.format(bbox))
    if time:
        if isinstance(time, datetime):
            start, stop = time, None
        elif isinstance(time, tuple):
            start, stop = time[0], time[1]
        else:
            raise ValueError('Time must be start or (start, stop).'
                             '  Got {!r}'.format(time))
        cube = time_slice(cube, start, stop)
    if units:
        if cube.units != units:
            cube.convert_units(units)
    return cube
Example #8
0
    def test_number_of_raw_cubes(self):
        # Test the constraints generate the correct number of raw cubes.
        raw_cubes = iris.load_raw(self.theta_path)
        self.assertEqual(len(raw_cubes), 38)

        raw_cubes = iris.load_raw(self.theta_path, [self.level_10])
        self.assertEqual(len(raw_cubes), 1)

        raw_cubes = iris.load_raw(self.theta_path, [self.theta])
        self.assertEqual(len(raw_cubes), 38)

        raw_cubes = iris.load_raw(self.dec_path, [self.level_30])
        self.assertEqual(len(raw_cubes), 4)

        raw_cubes = iris.load_raw(self.dec_path, [self.theta])
        self.assertEqual(len(raw_cubes), 38)
Example #9
0
def stations_keys(config, key='station_name'):
    save_dir = os.path.join(os.path.abspath(config['run_name']))
    fname = os.path.join(save_dir, '{}.nc'.format('OBS_DATA'))
    cubes = iris.load_raw(fname)
    observations = [cube2series(cube) for cube in cubes]
    return {obs._metadata['station_code']: obs._metadata[key] for
            obs in observations}
Example #10
0
    def test_number_of_raw_cubes(self):
        # Test the constraints generate the correct number of raw cubes.
        raw_cubes = iris.load_raw(self.theta_path)
        self.assertEqual(len(raw_cubes), 38)

        raw_cubes = iris.load_raw(self.theta_path, [self.level_10])
        self.assertEqual(len(raw_cubes), 1)

        raw_cubes = iris.load_raw(self.theta_path, [self.theta])
        self.assertEqual(len(raw_cubes), 38)

        raw_cubes = iris.load_raw(self.dec_path, [self.level_30])
        self.assertEqual(len(raw_cubes), 4)

        raw_cubes = iris.load_raw(self.dec_path, [self.theta])
        self.assertEqual(len(raw_cubes), 38)
Example #11
0
def get_cube(url, name_list=None, bbox=None, callback=None,
             time=None, units=None, constraint=None):
    cubes = iris.load_raw(url, callback=callback)
    if constraint:
        cubes = cubes.extract(constraint)
    if name_list:
        in_list = lambda cube: cube.standard_name in name_list
        cubes = CubeList([cube for cube in cubes if in_list(cube)])
        if not cubes:
            raise ValueError('Cube does not contain {!r}'.format(name_list))
        else:
            cube = cubes.merge_cube()
    if bbox:
        cube = intersection(cube, bbox)
    if time:
        if isinstance(time, datetime):
            start, stop = time, None
        elif isinstance(time, tuple):
            start, stop = time[0], time[1]
        else:
            raise ValueError('Time must be start or (start, stop).'
                             '  Got {!r}'.format(time))
        cube = time_slice(cube, start, stop)
    if units:
        if not cube.units == units:
            cube.convert_units(units)
    return cube
Example #12
0
def woa_profile(
    lon, lat, variable="temperature", time_period="annual", resolution="1"
):
    """
    Return an iris.cube instance from a World Ocean Atlas variable at a
    given lon, lat point.

    Parameters
    ----------
    lon, lat: float
        point positions to extract the interpolated profile.
    Choose data `variable` from:
        'temperature', 'salinity', 'silicate', 'phosphate',
        'nitrate', 'oxygen_saturation', 'dissolved_oxygen', or
        'apparent_oxygen_utilization'.
    Choose `time_period` from:
        01-12: January to December
        13-16: seasonal (North Hemisphere `Winter`, `Spring`, `Summer`, and `Autumn` respectively)
        00: Annual
    Choose `resolution` from:
        '5', '1', or '1/4' degrees (str)

    Returns
    -------
    Iris.cube instance with the climatology.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from oceans.datasets import woa_profile
    >>> cube = woa_profile(-143, 10, variable='temperature',
    ...                    time_period='annual', resolution='5')
    >>> fig, ax = plt.subplots(figsize=(2.25, 5))
    >>> z = cube.coord(axis='Z').points
    >>> l = ax.plot(cube[0, :].data, z)
    >>> ax.grid(True)
    >>> ax.invert_yaxis()

    """
    import iris

    url = _woa_url(
        variable=variable, time_period=time_period, resolution=resolution
    )

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        cubes = iris.load_raw(url)

    # TODO: should we be using `an` instead of `mn`?
    v = _woa_variable(variable)
    cube = [c for c in cubes if c.var_name == f"{v}_mn"][0]
    scheme = iris.analysis.Nearest()
    sample_points = [("longitude", lon), ("latitude", lat)]
    kw = {
        "sample_points": sample_points,
        "scheme": scheme,
        "collapse_scalar": True,
    }
    return cube.interpolate(**kw)
Example #13
0
def stations_keys(config, key='station_name'):
    save_dir = os.path.join(os.path.abspath(config['run_name']))
    fname = os.path.join(save_dir, '{}.nc'.format('OBS_DATA'))
    cubes = iris.load_raw(fname)
    observations = [cube2series(cube) for cube in cubes]
    return {obs._metadata['station_code']: obs._metadata[key] for
            obs in observations}
Example #14
0
 def test_round_trip(self):
     cube, = iris.load(self.fname)
     with self.temp_filename(suffix='.nc') as filename:
         iris.save(cube, filename, unlimited_dimensions=[])
         round_cube, = iris.load_raw(filename)
         self.assertEqual(len(round_cube.cell_measures()), 1)
         self.assertEqual(round_cube.cell_measures()[0].measure, 'area')
Example #15
0
 def test_round_trip(self):
     cube, = iris.load(self.fname)
     with self.temp_filename(suffix='.nc') as filename:
         iris.save(cube, filename, unlimited_dimensions=[])
         round_cube, = iris.load_raw(filename)
         self.assertEqual(len(round_cube.cell_measures()), 1)
         self.assertEqual(round_cube.cell_measures()[0].measure, 'area')
Example #16
0
def extract_wind_air(sample_df, wind_air_data):
    '''
    :param sample_df: data frame to change
    :param wind_air_data: wind and air data path
    :return: sample data frame with 2 more columns
    '''
    raw = iris.load_raw(wind_air_data)
    wind_series = []
    pres_series = []
    i = 0
    for index, row in sample_df.iterrows():

        if np.mod(i, len(sample_df) // 5) == 0:
            print('Extracting wind/air on: %d/%d' % (i, len(sample_df)))
        i += 1

        date_list = row['date'].split('-')
        year = int(date_list[0])
        month = int(date_list[1])
        lat = row['LATITUDE']
        long = row['LONGITUDE']
        data = query_wind_pres(year, month, lat, long, raw[0], raw[1])
        wind_series.append(data[0])
        pres_series.append(data[1])

    sample_df['wind'] = wind_series
    sample_df['air_pressure'] = pres_series

    return sample_df
Example #17
0
def get_cube(url,
             name_list=None,
             bbox=None,
             callback=None,
             time=None,
             units=None,
             constraint=None):
    cubes = iris.load_raw(url, callback=callback)
    if constraint:
        cubes = cubes.extract(constraint)
    if name_list:
        in_list = lambda cube: cube.standard_name in name_list
        cubes = CubeList([cube for cube in cubes if in_list(cube)])
        if not cubes:
            raise ValueError('Cube does not contain {!r}'.format(name_list))
        else:
            cube = cubes.merge_cube()
    if bbox:
        cube = intersection(cube, bbox)
    if time:
        if isinstance(time, datetime):
            start, stop = time, None
        elif isinstance(time, tuple):
            start, stop = time[0], time[1]
        else:
            raise ValueError('Time must be start or (start, stop).'
                             '  Got {!r}'.format(time))
        cube = time_slice(cube, start, stop)
    if units:
        if not cube.units == units:
            cube.convert_units(units)
    return cube
Example #18
0
    def _test_file(self, src_path, reference_filename):
        """
        Checks the result of loading the given file spec, or creates the
        reference file if it doesn't exist.

        """
        cubes = iris.load_raw(tests.get_data_path(src_path))
        self.assertCML(cubes, ['file_load', reference_filename])
Example #19
0
 def test_real_data(self):
     data_path = tests.get_data_path(('PP', 'globClim1', 'theta.pp'))
     cubes = iris.load_raw(data_path)
     # Force the source 2-D cubes to load their data before the merge
     for cube in cubes:
         data = cube.data
     cubes = cubes.merge()
     self.assertCML(cubes, ['merge', 'theta.cml'])
Example #20
0
 def test_real_data(self):
     data_path = tests.get_data_path(("PP", "globClim1", "theta.pp"))
     cubes = iris.load_raw(data_path)
     # Force the source 2-D cubes to load their data before the merge
     for cube in cubes:
         _ = cube.data
     cubes = cubes.merge()
     self.assertCML(cubes, ["merge", "theta.cml"])
def test_quiet_load_raw():
    from jade_utils.iris_tools import quiet_load_raw

    test_cube_path = os.path.join(os.path.dirname(__file__), 'data', 'test.nc')
    loud_cubelist = iris.load_raw(test_cube_path)
    quiet_cubelist = quiet_load_raw(test_cube_path)

    assert loud_cubelist[0] == quiet_cubelist[0]
Example #22
0
 def test_real_data(self):
     data_path = tests.get_data_path(('PP', 'globClim1', 'theta.pp'))
     cubes = iris.load_raw(data_path)
     # Force the source 2-D cubes to load their data before the merge
     for cube in cubes:
         data = cube.data
     cubes = cubes.merge()
     self.assertCML(cubes, ['merge', 'theta.cml'])
Example #23
0
 def test_real_data(self):
     data_path = tests.get_data_path(("PP", "globClim1", "theta.pp"))
     cubes = iris.load_raw(data_path)
     # Force the source 2-D cubes to load their data before the merge
     for cube in cubes:
         data = cube.data
     cubes = cubes.merge()
     self.assertCML(cubes, ["merge", "theta.cml"])
Example #24
0
def load(file, callback=None):
    """Load iris cubes from files."""
    logger.debug("Loading:\n%s", file)
    raw_cubes = iris.load_raw(file, callback=callback)
    if not raw_cubes:
        raise Exception('Can not load cubes from {0}'.format(file))
    for cube in raw_cubes:
        cube.attributes['source_file'] = file
    return raw_cubes
Example #25
0
def woa_profile(lon, lat, variable='temperature', time_period='annual', resolution='1'):
    """
    Return an iris.cube instance from a World Ocean Atlas variable at a
    given lon, lat point.

    Parameters
    ----------
    lon, lat: float
        point positions to extract the interpolated profile.
    Choose data `variable` from:
        'temperature', 'salinity', 'silicate', 'phosphate',
        'nitrate', 'oxygen_saturation', 'dissolved_oxygen', or
        'apparent_oxygen_utilization'.
    Choose `time_period` from:
        01-12: January to December
        13-16: seasonal (North Hemisphere `Winter`, `Spring`, `Summer`, and `Autumn` respectively)
        00: Annual
    Choose `resolution` from:
        '5', '1', or '1/4' degrees (str)

    Returns
    -------
    Iris.cube instance with the climatology.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from oceans.datasets import woa_profile
    >>> cube = woa_profile(-143, 10, variable='temperature',
    ...                    time_period='annual', resolution='5')
    >>> fig, ax = plt.subplots(figsize=(2.25, 5))
    >>> z = cube.coord(axis='Z').points
    >>> l = ax.plot(cube[0, :].data, z)
    >>> ax.grid(True)
    >>> ax.invert_yaxis()

    """
    import iris
    url = _woa_url(variable=variable, time_period=time_period, resolution=resolution)

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        cubes = iris.load_raw(url)

    # TODO: should we be using `an` instead of `mn`?
    v = _woa_variable(variable)
    cube = [c for c in cubes if c.var_name == f'{v}_mn'][0]
    scheme = iris.analysis.Nearest()
    sample_points = [('longitude', lon), ('latitude', lat)]
    kw = {
        'sample_points': sample_points,
        'scheme': scheme,
        'collapse_scalar': True
    }
    return cube.interpolate(**kw)
Example #26
0
    def test_coord_attributes(self):
        def custom_coord_callback(cube, field, filename):
            cube.coord('time').attributes['monty'] = 'python'
            cube.coord('time').attributes['brain'] = 'hurts'

        # Load slices, decorating a coord with custom attributes
        cubes = iris.load_raw(self._data_path, callback=custom_coord_callback)
        # Merge
        merged = iris.cube.CubeList._extract_and_merge(cubes, constraints=None, strict=False, merge_unique=False)
        # Check the custom attributes are in the merged cube
        for cube in merged:
            assert(cube.coord('time').attributes['monty'] == 'python')
            assert(cube.coord('time').attributes['brain'] == 'hurts')
Example #27
0
    def test_coord_attributes(self):
        def custom_coord_callback(cube, field, filename):
            cube.coord("time").attributes["monty"] = "python"
            cube.coord("time").attributes["brain"] = "hurts"

        # Load slices, decorating a coord with custom attributes
        cubes = iris.load_raw(self._data_path, callback=custom_coord_callback)
        # Merge
        merged = iris.cube.CubeList._extract_and_merge(cubes, constraints=None, strict=False, merge_unique=False)
        # Check the custom attributes are in the merged cube
        for cube in merged:
            assert cube.coord("time").attributes["monty"] == "python"
            assert cube.coord("time").attributes["brain"] == "hurts"
Example #28
0
    def test_coord_attributes(self):
        def custom_coord_callback(cube, field, filename):
            cube.coord("time").attributes["monty"] = "python"
            cube.coord("time").attributes["brain"] = "hurts"

        # Load slices, decorating a coord with custom attributes
        cubes = iris.load_raw(self._data_path, callback=custom_coord_callback)
        # Merge
        merged = iris.cube.CubeList(cubes).merge()
        # Check the custom attributes are in the merged cube
        for cube in merged:
            assert cube.coord("time").attributes["monty"] == "python"
            assert cube.coord("time").attributes["brain"] == "hurts"
Example #29
0
    def to_iris(self, **kw):
        """Load the data request into an iris.CubeList.

        Accepts any `iris.load_raw` keyword arguments.
        """
        import iris

        url = self.get_download_url(response="nc")
        data = urlopen(url, params=self.params, **self.requests_kwargs).read()
        with _tempnc(data) as tmp:
            cubes = iris.load_raw(tmp.name, **kw)
            cubes.realise_data()
            return cubes
Example #30
0
def load(file, callback=None):
    """Load iris cubes from files."""
    logger.debug("Loading:\n%s", file)
    with catch_warnings():
        filterwarnings(
            'ignore',
            message="Missing CF-netCDF measure variable .*",
            category=UserWarning,
            module='iris',
        )
        raw_cubes = iris.load_raw(file, callback=callback)
    if not raw_cubes:
        raise Exception('Can not load cubes from {0}'.format(file))
    for cube in raw_cubes:
        cube.attributes['source_file'] = file
    return raw_cubes
def quiet_load_raw(*args, **kwargs):
    """Like iris.load_raw but quieter.

    When using `iris.load_raw` it often generates a whole bunch of warnings, this wrapper
    simply supresses them.

    Args:
        See `iris.load_raw`.

    Returns:
        See `iris.load_raw`.
    """

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        return iris.load_raw(*args, **kwargs)
Example #32
0
    def to_iris(self, **kw):
        """Load the data request into an iris.CubeList.

        Accepts any `iris.load_raw` keyword arguments.
        """
        import iris

        url = self.get_download_url(response="ncCF", **kw)
        data = urlopen(url, auth=self.auth).read()
        with _tempnc(data) as tmp:
            cubes = iris.load_raw(tmp, **kw)
            try:
                cubes.realise_data()
            except ValueError:
                iris.cube.CubeList([cube.data for cube in cubes])
            return cubes
Example #33
0
def filespecs_cubes(filespecs, raw, constraints, callback=None):
    # Return cubes from filespecs.
    for filespec in filespecs:
        print "filespec:", filespec
        # Return cubes from filespec. Could be multiple files and cubes.
        for filename in glob.iglob(filespec):
            if filename != filespec:
                print "  filename:", filename
            
            if raw is True:
                cubes = iris.load_raw(filename, constraints=constraints, callback=callback)
            else:
                cubes = iris.load_cubes(filename, constraints=constraints, callback=callback)
        
            for cube in cubes:
                yield cube
Example #34
0
def load_cube(filename):
    """
    Loads the specified file into a single Iris cube

    :param str filename: The path of the file to load
    :returns: An Iris cube containing the loaded file
    :raises FileValidationError: If the file generates more than a single cube
    """
    try:
        try:
            cubes = iris.load(filename)
        except AttributeError:
            # Until https://github.com/SciTools/iris/pull/2485 is complete
            # add this fix for certain hybrid height (model level) variables
            cubes = iris.load_raw(filename)
            bounds_cubes = iris.cube.CubeList()
            data_cube = None
            for cube in cubes:
                if cube.var_name.endswith('_bnds'):
                    bounds_cubes.append(cube)
                else:
                    data_cube = cube
            if not bounds_cubes or not data_cube:
                msg = ('Unable to find data and bounds when fixing hybrid '
                       'height bounds in file: {}'.format(filename))
                raise FileValidationError(msg)
            for bounds_cube in bounds_cubes:
                coord_name = bounds_cube.long_name.replace('+1/2', '')
                bounds_coord = data_cube.coord(coord_name)
                bounds_coord.bounds = bounds_cube.data
            cubes = iris.cube.CubeList([data_cube])
    except Exception:
        msg = 'Unable to load data from file: {}'.format(filename)
        raise FileValidationError(msg)

    var_name = os.path.basename(filename).split('_')[0]

    var_cubes = cubes.extract(
        iris.Constraint(cube_func=lambda lcube: lcube.var_name == var_name))

    if not var_cubes:
        msg = ("Filename '{}' does not load to a single variable".format(
            filename))
        raise FileValidationError(msg)

    return var_cubes[0]
Example #35
0
def load_var(var):
	pa = []
	for file in os.listdir('/group_workspaces/jasmin4/bas_climate/users/ellgil82/OFCAP/'):
	    if fnmatch.fnmatch(file, '*pa000.pp'):
	        pa.append(file)
	os.chdir('/group_workspaces/jasmin4/bas_climate/users/ellgil82/OFCAP/')
	raw = iris.load_raw(pa, long_name_dict[var])
	# copy the cubes to a new cubelist, with each cube having a 1-element time dimension plus auxiliary coordinates for forecast_reference_time and forecast_period
	cl = iris.cube.CubeList()
	for cube in raw:
	    new_cube = iris.util.new_axis(cube, 'time')
	    for coord_name in ['forecast_period', 'forecast_reference_time']:
	        coord = new_cube.coord(coord_name)
	        new_cube.remove_coord(coord_name)
	        new_cube.add_aux_coord(coord, new_cube.coord_dims('time')[0])
	    if new_cube.coord('forecast_period').points[0] != 0:
	        cl.append(new_cube)
	combo_cube = cl.concatenate_cube()
	return combo_cube
Example #36
0
def _get_cubes(filenames, constraints=None, callback=None):
    import iris

    # Removes warnings and prepares for future Iris change
    iris.FUTURE.netcdf_promote = True

    filenames_key = tuple(filenames)
    if filenames_key in gd.CACHED_CUBES:
        all_cubes = gd.CACHED_CUBES[filenames_key]
        # print("Reading cached files: {}".format(filenames_key))
    else:
        with demote_warnings():
            all_cubes = iris.load_raw(filenames, callback=callback)
        gd.CACHED_CUBES[filenames_key] = all_cubes
        # print("Caching files: {}".format(filenames_key))
    if constraints is not None:
        cubes = all_cubes.extract(constraints=constraints)
    else:
        cubes = all_cubes
    return cubes
Example #37
0
def _get_cubes(filenames, constraints=None, callback=None):
    import iris

    # Removes warnings and prepares for future Iris change
    iris.FUTURE.netcdf_promote = True

    filenames_key = tuple(filenames)
    if filenames_key in gd.CACHED_CUBES:
        all_cubes = gd.CACHED_CUBES[filenames_key]
        # print("Reading cached files: {}".format(filenames_key))
    else:
        with demote_warnings():
            all_cubes = iris.load_raw(filenames, callback=callback)
        gd.CACHED_CUBES[filenames_key] = all_cubes
        # print("Caching files: {}".format(filenames_key))
    if constraints is not None:
        cubes = all_cubes.extract(constraints=constraints)
    else:
        cubes = all_cubes
    return cubes
Example #38
0
    def __init__(self,
                 sel_bbox,
                 sel_time_mean,
                 sel_experiment,
                 data_dir='data'):
        """
        Args:
        * sell_box (dict):
            bounding box for averaging: e.g. {"lat": np.array([32, 35]), "lon": np.array([12, 14])}
        * sel_time_mean (string):
            the temporal mean method of source dataset (e.g. summer, winter, annual)
        * sel_experiment (string):
            the experiment of source dataset (e.g. rcp26, rcp45, rcp85)
        Kwargs:
        * data_dir (path):
            directory where intermediate files will be written
        """

        self.var_selector = {
            "tasmin":
            "climate change signal of daily minimum near-surface air temperature",
            "tasmax":
            "climate change signal of daily maximum near-surface air temperature",
            "rsds":
            "climate change signal of daily mean surface downwelling shortwave radiation",
            "pr":
            "relative climate change signal of {} precipitation".format(
                sel_time_mean)
        }

        self.time_selector = {"near": 0, "mid": 1, "far": 2}

        self.sel_bbox = sel_bbox
        self.sel_time_mean = sel_time_mean
        self.sel_experiment = sel_experiment
        self.data_dir = data_dir

        self.cl_ccs = iris.load_raw(
            os.path.join(
                data_dir, "ccs_{}_*_{}_*.nc".format(self.sel_time_mean,
                                                    self.sel_experiment)))
Example #39
0
def get_cube(url,
             name_list,
             bbox=None,
             time=None,
             units=None,
             callback=None,
             constraint=None):
    """Only `url` and `name_list` are mandatory.  The kw args are:
    `bbox`, `callback`, `time`, `units`, `constraint`."""

    cubes = iris.load_raw(url, callback=callback)

    in_list = lambda cube: cube.standard_name in name_list
    cubes = CubeList([cube for cube in cubes if in_list(cube)])
    if not cubes:
        raise ValueError('Cube does not contain {!r}'.format(name_list))
    else:
        cube = cubes.merge_cube()

    if constraint:
        cube = cube.extract(constraint)
        if not cube:
            raise ValueError('No cube using {!r}'.format(constraint))
    if bbox:
        cube = subset(cube, bbox)
        if not cube:
            raise ValueError('No cube using {!r}'.format(bbox))
    if time:
        if isinstance(time, datetime):
            start, stop = time, None
        elif isinstance(time, tuple):
            start, stop = time[0], time[1]
        else:
            raise ValueError('Time must be start or (start, stop).'
                             '  Got {!r}'.format(time))
        cube = time_slice(cube, start, stop)
    if units:
        if cube.units != units:
            cube.convert_units(units)
    return cube
Example #40
0
def load_ncs(config):
    save_dir = os.path.join(os.path.abspath(config['run_name']))
    fname = '{}.nc'.format
    fname = os.path.join(save_dir, fname('OBS_DATA'))

    cubes = iris.load_raw(fname)
    data = [cube2series(cube) for cube in cubes]
    index = pd.date_range(start=config['date']['start'].replace(tzinfo=None),
                          end=config['date']['stop'].replace(tzinfo=None),
                          freq='1H')
    # Preserve metadata with `reindex`.
    observations = []
    for series in data:
        _metadata = series._metadata
        obs = series.reindex(index=index, limit=1, method='nearest')
        obs._metadata = _metadata
        observations.append(obs)

    for obs in observations:
        obs.name = obs._metadata.get('station_code')
    ALL_OBS_DATA = pd.DataFrame(observations).T

    dfs = {'OBS_DATA': ALL_OBS_DATA}
    for fname in glob(os.path.join(config['run_name'], '*.nc')):
        if 'OBS_DATA' in fname:
            continue
        else:
            model = os.path.splitext(
                os.path.split(fname)[-1])[0].split('-')[-1]  # noqa
            df = nc2df(fname, columns_name='station_code')
            # FIXME: Horrible work around duplicate times.
            if len(df.index.values) != len(np.unique(df.index.values)):
                kw = {'subset': 'index', 'take_last': True}
                df = df.reset_index().drop_duplicates(**kw).set_index('index')
            kw = {'method': 'time', 'limit': 2}
            df = df.reindex(index).interpolate(**kw).ix[index]
            dfs.update({model: df})
    kw = {'orient': 'items', 'intersect': False}
    dfs = pd.Panel.from_dict(dfs, **kw).swapaxes(0, 2)
    return dfs
Example #41
0
    def test_load_raw(self):
        fldset_1 = self.fields(c_t="015", phn="001")
        fldset_2 = self.fields(c_t="234")
        file_1 = self.save_fieldcubes(fldset_1)
        file_2 = self.save_fieldcubes(fldset_2)
        results = iris.load_raw((file_1, file_2))
        if not self.do_fast_loads:
            # Each 'raw' cube is just one field.
            expected = CubeList(fldset_1 + fldset_2)
        else:
            # 'Raw' cubes have combined (vector) times within each file.
            # The 'other' phenomenon appears seperately.
            expected = CubeList(
                [
                    CubeList(fldset_1[:2]).merge_cube(),
                    CubeList(fldset_2).merge_cube(),
                    fldset_1[2],
                ]
            )

        # Again here, the order of these results is not stable :
        # It varies with random characters in the temporary filepath.
        #
        # *****************************************************************
        # *** Here, this is clearly ALSO the case for "standard" loads. ***
        # *****************************************************************
        #
        # E.G. run "test_fast_load.py -v TestCallDetails__Iris.test_load_raw" :
        # If you remove the sort operations, this fails "sometimes".
        #
        # To fix this, sort both expected and results by (first) timepoint
        # - for which purpose we made all the time values different.

        def timeorder(cube):
            return cube.coord("time").points[0]

        expected = sorted(expected, key=timeorder)
        results = sorted(results, key=timeorder)

        self.assertEqual(results, expected)
Example #42
0
def load_phenomena(url, name_list, callback=None, strict=False):
    """
    Return cube(s) for a certain phenomena in `name_list`.
    The `name_list` must be a collection of CF-1.6 `standard_name`s.

    If `strict` is set to True the function will return **only** one cube,
    if only one is expected to exist, otherwise an exception will be raise.
    (Similar to iris `extract_strict` method.)

    The user may also pass a `callback` function to coerce the metadata
    to CF-conventions.

    Examples
    --------
    >>> import iris
    >>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
    ...        "SABGOM_Forecast_Model_Run_Collection_best.ncd")
    >>> name_list = cf_name_list['sea_water_temperature']
    >>> cubes = load_phenomena(url, name_list)
    >>> cube = load_phenomena(url, name_list, strict=True)
    >>> isinstance(cubes, CubeList)
    True
    >>> isinstance(cube, iris.cube.Cube)
    True
    """

    cubes = iris.load_raw(url, callback=callback)
    cubes = [cube for cube in cubes if _in_list(cube, name_list)]
    cubes = _filter_none(cubes)
    cubes = CubeList(cubes)
    if not cubes:
        raise ValueError('Cannot find {!r} in {}.'.format(name_list, url))
    if strict:
        if len(cubes) == 1:
            return cubes[0]
        else:
            msg = "> 1 cube found!  Expected just one.\n {!r}".format
        raise ValueError(msg(cubes))
    return cubes
Example #43
0
def load_phenomena(url, name_list, callback=None, strict=False):
    """
    Return cube(s) for a certain phenomena in `name_list`.
    The `name_list` must be a collection of CF-1.6 `standard_name`s.

    If `strict` is set to True the function will return **only** one cube,
    if only one is expected to exist, otherwise an exception will be raise.
    (Similar to iris `extract_strict` method.)

    The user may also pass a `callback` function to coerce the metadata
    to CF-conventions.

    Examples
    --------
    >>> import iris
    >>> url = ("http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/"
    ...        "SABGOM_Forecast_Model_Run_Collection_best.ncd")
    >>> name_list = cf_name_list['sea_water_temperature']
    >>> cubes = load_phenomena(url, name_list)
    >>> cube = load_phenomena(url, name_list, strict=True)
    >>> isinstance(cubes, CubeList)
    True
    >>> isinstance(cube, iris.cube.Cube)
    True
    """

    cubes = iris.load_raw(url, callback=callback)
    cubes = [cube for cube in cubes if _in_list(cube, name_list)]
    cubes = _filter_none(cubes)
    cubes = CubeList(cubes)
    if not cubes:
        raise ValueError('Cannot find {!r} in {}.'.format(name_list, url))
    if strict:
        if len(cubes) == 1:
            return cubes[0]
        else:
            msg = "> 1 cube found!  Expected just one.\n {!r}".format
        raise ValueError(msg(cubes))
    return cubes
Example #44
0
def load_ncs(config):
    save_dir = os.path.join(os.path.abspath(config['run_name']))
    fname = '{}.nc'.format
    fname = os.path.join(save_dir, fname('OBS_DATA'))

    cubes = iris.load_raw(fname)
    data = [cube2series(cube) for cube in cubes]
    index = pd.date_range(start=config['date']['start'].replace(tzinfo=None),
                          end=config['date']['stop'].replace(tzinfo=None),
                          freq='1H')
    # Preserve metadata with `reindex`.
    observations = []
    for series in data:
        _metadata = series._metadata
        obs = series.reindex(index=index, limit=1, method='nearest')
        obs._metadata = _metadata
        observations.append(obs)

    for obs in observations:
        obs.name = obs._metadata.get('station_code')
    ALL_OBS_DATA = pd.DataFrame(observations).T

    dfs = dict(OBS_DATA=ALL_OBS_DATA)
    for fname in glob(os.path.join(config['run_name'], "*.nc")):
        if 'OBS_DATA' in fname:
            continue
        else:
            model = os.path.splitext(os.path.split(fname)[-1])[0].split('-')[-1]  # noqa
            df = nc2df(fname, columns_name='station_code')
            # FIXME: Horrible work around duplicate times.
            if len(df.index.values) != len(np.unique(df.index.values)):
                kw = dict(subset='index', take_last=True)
                df = df.reset_index().drop_duplicates(**kw).set_index('index')
            kw = dict(method='time', limit=2)
            df = df.reindex(index).interpolate(**kw).ix[index]
            dfs.update({model: df})
    kw = dict(orient='items', intersect=False)
    dfs = pd.Panel.from_dict(dfs, **kw).swapaxes(0, 2)
    return dfs
Example #45
0
    def test_load_raw(self):
        fldset_1 = self.fields(c_t='015', phn='001')
        fldset_2 = self.fields(c_t='234')
        file_1 = self.save_fieldcubes(fldset_1)
        file_2 = self.save_fieldcubes(fldset_2)
        results = iris.load_raw((file_1, file_2))
        if not self.do_fast_loads:
            # Each 'raw' cube is just one field.
            expected = CubeList(fldset_1 + fldset_2)
        else:
            # 'Raw' cubes have combined (vector) times within each file.
            # The 'other' phenomenon appears seperately.
            expected = CubeList([
                CubeList(fldset_1[:2]).merge_cube(),
                CubeList(fldset_2).merge_cube(),
                fldset_1[2],
                ])

        # Again here, the order of these results is not stable :
        # It varies with random characters in the temporary filepath.
        #
        # *****************************************************************
        # *** Here, this is clearly ALSO the case for "standard" loads. ***
        # *****************************************************************
        #
        # E.G. run "test_fast_load.py -v TestCallDetails__Iris.test_load_raw" :
        # If you remove the sort operations, this fails "sometimes".
        #
        # To fix this, sort both expected and results by (first) timepoint
        # - for which purpose we made all the time values different.

        def timeorder(cube):
            return cube.coord('time').points[0]

        expected = sorted(expected, key=timeorder)
        results = sorted(results, key=timeorder)

        self.assertEqual(results, expected)
# <codecell>

%%timeit -r 3

url='http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/coawst_4_use_best.ncd'
var='sea_water_potential_temperature'

cube = iris.load_cube(url,var)

# <codecell>

%%timeit -r 3
url='http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/coawst_4_use_best.ncd'
var='sea_water_potential_temperature'
cube = iris.load_raw(url,var)

# <codecell>

import netCDF4

# <codecell>

%%timeit -r 3
url='http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/coawst_4_use_best.ncd'
nc = netCDF4.Dataset(url)
t = nc.variables['temp']
lon = nc.variables['lon_rho']
lat = nc.variables['lat_rho']
tvar = nc.variables['time']
z = nc.variables['s_rho']
lat = lat[inbox]


# In[19]:

print(inbox.shape)
print(inbox)
print(adcirc.nodes_in_ss.shape)
print(adcirc.nodes_in_ss)


# In[20]:

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    cubes = iris.load_raw(ncfile)


# In[21]:

ua = cubes.extract_strict('Eastward Water Velocity Amplitude')
up = cubes.extract_strict('Eastward Water Velocity Phase')
va = cubes.extract_strict('Northward Water Velocity Amplitude')
vp = cubes.extract_strict('Northward Water Velocity Phase')


# In[22]:

uamp = ua.data[0, inbox, :][:, ind_nc]
vamp = va.data[0, inbox, :][:, ind_nc]
upha = up.data[0, inbox, :][:, ind_nc]
Example #48
0
def woa_profile(lon, lat, variable='temperature', clim_type='00',
                resolution='1.00', full=False):
    """Return an iris.cube instance from a World Ocean Atlas 2013 variable at a
    given lon, lat point.

    Parameters
    ----------
    lon, lat: float
          point positions to extract the profile.
    Choose data `variable` from:
          'temperature', 'silicate', 'salinity', 'phosphate',
          'oxygen', 'o2sat', 'nitrate', and 'AOU'.
    Choose `clim_type` averages from:
        01-12 :: monthly
        13-16 :: seasonal (North Hemisphere Winter, Spring, Summer,
                           and Autumn respectively)
        00 :: annual
    Choose `resolution` from:
        1 (1 degree), or 4 (0.25 degrees)

    Returns
    -------
    Iris.cube instance with the climatology.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from oceans.datasets import woa_profile
    >>> cube = woa_profile(-143, 10, variable='temperature',
    ...                    clim_type='00', resolution='1.00', full=False)
    >>> fig, ax = plt.subplots(figsize=(2.25, 5))
    >>> z = cube.coord(axis='Z').points
    >>> l = ax.plot(cube[0, :].data, z)
    >>> ax.grid(True)
    >>> ax.invert_yaxis()
    """

    if variable not in ['salinity', 'temperature']:
        resolution = '1.00'
        decav = 'all'
        msg = '{} is only available at 1 degree resolution'.format
        warnings.warn(msg(variable))
    else:
        decav = 'decav'

    v = dict(temperature='t', silicate='i', salinity='s', phosphate='p',
             oxygen='o', o2sat='O', nitrate='n', AOU='A')

    r = dict({'1.00': '1', '0.25': '4'})

    var = v[variable]
    res = r[resolution]

    uri = ("http://data.nodc.noaa.gov/thredds/dodsC/woa/WOA13/DATA/"
           "{variable}/netcdf/{decav}/{resolution}/woa13_{decav}_{var}"
           "{clim_type}_0{res}.nc").format
    url = uri(**dict(variable=variable, decav=decav, resolution=resolution,
                     var=var, clim_type=clim_type, res=res))

    cubes = iris.load_raw(url)
    cubes = [extract_nearest_neighbour(cube, [('longitude', lon),
                                              ('latitude', lat)])
             for cube in cubes]
    cubes = iris.cube.CubeList(cubes)
    if full:
        return cubes
    else:
        cubes = [c for c in cubes if c.var_name == '{}_an'.format(var)]
        return cubes[0]
Example #49
0
 def test_load_raw(self):
     cube, = iris.load_raw(self.fname)
     self.assertEqual(len(cube.cell_measures()), 1)
     self.assertEqual(cube.cell_measures()[0].measure, 'area')
Example #50
0
def woa_subset(bbox, variable='temperature', time_period='annual', resolution='5', full=False):
    """
    Return an iris.cube instance from a World Ocean Atlas variable at a
    given lon, lat bounding box.

    Parameters
    ----------
    bbox: list, tuple
          minx, maxx, miny, maxy positions to extract.
    See `woa_profile` for the other options.

    Returns
    -------
    `iris.Cube` instance with the climatology.

    Examples
    --------
    >>> # Extract a 2D surface -- Annual temperature climatology:
    >>> import iris.plot as iplt
    >>> import matplotlib.pyplot as plt
    >>> from oceans.colormaps import cm
    >>> bbox = [2.5, 357.5, -87.5, 87.5]
    >>> cube = woa_subset(bbox, variable='temperature', time_period='annual', resolution='5')
    >>> c = cube[0, 0, ...]  # Slice singleton time and first level.
    >>> cs = iplt.pcolormesh(c, cmap=cm.avhrr)
    >>> cbar = plt.colorbar(cs)

    >>> # Extract a square around the Mariana Trench averaging into a profile.
    >>> import iris
    >>> from oceans.colormaps import get_color
    >>> colors = get_color(12)
    >>> months = 'Jan Feb Apr Mar May Jun Jul Aug Sep Oct Nov Dec'.split()
    >>> bbox = [-143, -141, 10, 12]
    >>> fig, ax = plt.subplots(figsize=(5, 5))
    >>> for month in months:
    ...     cube = woa_subset(bbox, time_period=month, variable='temperature', resolution='1')
    ...     grid_areas = iris.analysis.cartography.area_weights(cube)
    ...     c = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
    ...     z = c.coord(axis='Z').points
    ...     l = ax.plot(c[0, :].data, z, label=month, color=next(colors))
    >>> ax.grid(True)
    >>> ax.invert_yaxis()
    >>> leg = ax.legend(loc='lower left')
    >>> _ = ax.set_ylim(200, 0)

    """
    import iris

    v = _woa_variable(variable)
    url = _woa_url(variable, time_period, resolution)
    cubes = iris.load_raw(url)
    cubes = [
        cube.intersection(
            longitude=(bbox[0], bbox[1]),
            latitude=(bbox[2], bbox[3])) for cube in cubes
    ]

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        cubes = iris.cube.CubeList(cubes)

    if full:
        return cubes
    else:
        return [c for c in cubes if c.var_name == f'{v}_mn'][0]
Example #51
0
from datetime import datetime
from pandas import date_range
import iris
import warnings
import pyugrid


# In[8]:

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
#    ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
#              'vdatum_gulf_of_maine/adcirc54_38_orig.nc')
    url = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
              'vdatum_fl_sab/adcirc54.nc')
    cubes = iris.load_raw(url)

print(cubes)


# In[9]:

units = dict({'knots': 1.9438, 'm/s': 1.0})
consts = ['STEADY', 'M2', 'S2', 'N2', 'K1', 'O1', 'P1', 'M4', 'M6']

bbox = [-70.7234, -70.4532, 41.4258, 41.5643]  # Vineyard sound 2.
bbox = [-85.25, -84.75, 29.58, 29.83]  # Apalachicola Bay
halo = 0.1
ax2 = [bbox[0] - halo * (bbox[1] - bbox[0]),
       bbox[1] + halo * (bbox[1] - bbox[0]),
       bbox[2] - halo * (bbox[3] - bbox[2]),
Example #52
0
 def test_raw_to_table_count(self):
     filename = tests.get_data_path(('FF', 'n48_multi_field_table_count'))
     cubes = iris.load_raw(filename)
     ff_header = ff.FFHeader(filename)
     table_count = ff_header.lookup_table[2]
     self.assertEqual(len(cubes), table_count)
Example #53
0
def woa_subset(bbox=[2.5, 357.5, -87.5, 87.5], variable='temperature',
               clim_type='00', resolution='1.00', full=False):
    """Return an iris.cube instance from a World Ocean Atlas 2013 variable at a
    given lon, lat bounding box.

    Parameters
    ----------
    bbox: list, tuple
          minx, maxx, miny, maxy positions to extract.
    Choose data `variable` from:
        `dissolved_oxygen`, `salinity`, `temperature`, `oxygen_saturation`,
        `apparent_oxygen_utilization`, `phosphate`, `silicate`, or `nitrate`.
    Choose `clim_type` averages from:
        01-12 :: monthly
        13-16 :: seasonal (North Hemisphere Winter, Spring, Summer,
                           and Autumn respectively)
        00 :: annual
    Choose `resolution` from:
        1 (1 degree), or 4 (0.25 degrees)

    Returns
    -------
    Iris.cube instance with the climatology.

    Examples
    --------
    >>> import cartopy.crs as ccrs
    >>> import matplotlib.pyplot as plt
    >>> import cartopy.feature as cfeature
    >>> from cartopy.mpl.gridliner import (LONGITUDE_FORMATTER,
    ...                                    LATITUDE_FORMATTER)
    >>> LAND = cfeature.NaturalEarthFeature('physical', 'land', '50m',
    ...                                     edgecolor='face',
    ...                                     facecolor=cfeature.COLORS['land'])
    >>> def make_map(bbox, projection=ccrs.PlateCarree()):
    ...     fig, ax = plt.subplots(figsize=(8, 6),
    ...                            subplot_kw=dict(projection=projection))
    ...     ax.set_extent(bbox)
    ...     ax.add_feature(LAND, facecolor='0.75')
    ...     ax.coastlines(resolution='50m')
    ...     gl = ax.gridlines(draw_labels=True)
    ...     gl.xlabels_top = gl.ylabels_right = False
    ...     gl.xformatter = LONGITUDE_FORMATTER
    ...     gl.yformatter = LATITUDE_FORMATTER
    ...     return fig, ax
    >>> # Extract a 2D surface -- Annual temperature climatology:
    >>> import matplotlib.pyplot as plt
    >>> from oceans.ff_tools import wrap_lon180
    >>> from oceans.colormaps import cm, get_color
    >>> import iris.plot as iplt
    >>> from oceans.datasets import woa_subset
    >>> bbox = [2.5, 357.5, -87.5, 87.5]
    >>> kw = dict(bbox=bbox, variable='temperature', clim_type='00',
    ...           resolution='0.25')
    >>> cube = woa_subset(**kw)
    >>> c = cube[0, 0, ...]  # Slice singleton time and first level.
    >>> cs = iplt.pcolormesh(c, cmap=cm.avhrr)
    >>> cbar = plt.colorbar(cs)
    >>> # Extract a square around the Mariana Trench averaging into a profile.
    >>> bbox = [-143, -141, 10, 12]
    >>> kw = dict(bbox=bbox, variable='temperature', resolution='0.25',
    ...           clim_type=None)
    >>> fig, ax = plt.subplots(figsize=(5, 5))
    >>> colors = get_color(12)
    >>> months = 'Jan Feb Apr Mar May Jun Jul Aug Sep Oct Nov Dec'.split()
    >>> months = dict(zip(months, range(12)))
    >>> for month, clim_type in months.items():
    ...     clim_type = '{0:02d}'.format(clim_type+1)
    ...     kw.update(clim_type=clim_type)
    ...     cube = woa_subset(**kw)
    ...     grid_areas = iris.analysis.cartography.area_weights(cube)
    ...     c = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN,
    ...                         weights=grid_areas)
    ...     z = c.coord(axis='Z').points
    ...     l = ax.plot(c[0, :].data, z, label=month, color=next(colors))
    >>> ax.grid(True)
    >>> ax.invert_yaxis()
    >>> leg = ax.legend(loc='lower left')
    >>> _ = ax.set_ylim(200, 0)
    """

    if variable not in ['salinity', 'temperature']:
        resolution = '1.00'
        decav = 'all'
        msg = '{} is only available at 1 degree resolution'.format
        warnings.warn(msg(variable))
    else:
        decav = 'decav'

    v = dict(temperature='t', silicate='i', salinity='s', phosphate='p',
             oxygen='o', o2sat='O', nitrate='n', AOU='A')

    r = dict({'1.00': '1', '0.25': '4'})

    var = v[variable]
    res = r[resolution]

    uri = ("http://data.nodc.noaa.gov/thredds/dodsC/woa/WOA13/DATA/"
           "{variable}/netcdf/{decav}/{resolution}/woa13_{decav}_{var}"
           "{clim_type}_0{res}.nc").format
    url = uri(**dict(variable=variable, decav=decav, resolution=resolution,
                     var=var, clim_type=clim_type, res=res))

    cubes = iris.load_raw(url)
    cubes = [cube.intersection(longitude=(bbox[0], bbox[1]),
                               latitude=(bbox[2], bbox[3])) for cube in cubes]
    cubes = iris.cube.CubeList(cubes)
    if full:
        return cubes
    else:
        cubes = [c for c in cubes if c.var_name == '{}_an'.format(var)]
        return cubes[0]
cefas_dir='/media/Win7_Data/cefas_data/'
cefas_files=['Cefas_hindcast_monthlyvals_site_a.nc','Cefas_hindcast_monthlyvals_site_b.nc','Cefas_hindcast_monthlyvals_site_c.nc','Cefas_hindcast_monthlyvals_site_d.nc']

moll_dir='/media/Win7_Data/butler_data/'
moll_files=['Chickens_IOM_Glycymeris_chronology_1941.txt','Ramsey_IOM_Glycymeris_chronology_1921.txt','IOM_Arctica_chronology_1516.txt','Tiree_Passage_Scotland_Glycymeris_Chronology_1805.txt','north_sea_arctica_f1_chron.txt']
moll_site_names=['Isle of Man 1','Isle of Man 2','Isle of Man 3','Tiree Passage (nr Oban)','N. Sea (N)']

# Isle of Man Arctica from several positions off the west coast of the IOM, but centred around 4 50 E  54 10 N.   Between 25 and 80 metres depth.  For the full range see the map in Butler et al 2009 (EPSL).
# Isle of Man Glycymeris: Chickens is 54 06.031N, 04 23.195W  about 60 metres depth                                   
#                                         Ramsey is 54  06.031N, 04 23.195W about 35-40 metres depth  (These are from the attached paper by Brocas et al)
# North Sea F1 Arctica      59 23.1N, 0 31.0E  about 140m depth (from Butler et al 2009 (Palaeoceanography))
# Tiree Passage Glycymeris  56 37N, 6 24W, 50m water depth   (unpublished)


cube0=iris.load_raw(cefas_dir+cefas_files[0])
cube1=iris.load_raw(cefas_dir+cefas_files[1])
cube2=iris.load_raw(cefas_dir+cefas_files[2])
cube3=iris.load_raw(cefas_dir+cefas_files[3])
cubes=[cube0,cube1,cube2,cube3]

cube=cube0

var_name=[]
for i in np.arange(np.size(cube)):
    var_name.append(cube[i].metadata[1])

coord = cube[0].coord('time')
dt = coord.units.num2date(coord.points)
cfas_year = np.array([coord.units.num2date(value).year for value in coord.points])
cfas_month = np.array([coord.units.num2date(value).month for value in coord.points])
Example #55
0
 def test_merge_cell_measure_aware(self):
     cube1, = iris.load_raw(self.fname)
     cube2, = iris.load_raw(self.fname)
     cube2._cell_measures_and_dims[0][0].var_name = 'not_areat'
     cubes = CubeList([cube1, cube2]).merge()
     self.assertEqual(len(cubes), 2)
Example #56
0
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(bbox)
ax.coastlines(resolution='10m')
plt.tricontourf(triang, zcube.data, levels=levs)
plt.colorbar(fraction=0.046, pad=0.04)
plt.tricontour(triang, zcube.data, colors='k',levels=levs)
tstr = tvar.units.num2date(tvar.points[itime])
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
plt.title('%s: %s: %s' % (var,tstr,zcube.attributes['title']));


# In[13]:

ucube = iris.load_raw(url,'eastward_sea_water_velocity')[0]
vcube = iris.load_raw(url,'northward_sea_water_velocity')[0]


# In[14]:

# layer to plot (0 is surface, -1 is bottom)
klev = 0

u = ucube[itime,klev,:].data
v = vcube[itime,klev,:].data


# In[15]:

lonc = cube.mesh.face_coordinates[:,0]