Beispiel #1
0
def test_grid_gradient():
    x = 4
    y = 6
    time = np.linspace(0, 2, 3)
    field = Field(
        "Test",
        data=createSimpleGrid(x, y, time),
        time=time,
        lon=np.linspace(0, x - 1, x, dtype=np.float32),
        lat=np.linspace(-y / 2, y / 2 - 1, y, dtype=np.float32),
    )

    # Calculate field gradients for testing against numpy gradients.
    grad_fields = field.gradient()

    # Create numpy fields.
    r = 6.371e6
    deg2rd = np.pi / 180.0
    numpy_grad_fields = np.gradient(np.transpose(field.data[0, :, :]), (r * np.diff(field.lat) * deg2rd)[0])

    # Arbitrarily set relative tolerance to 1%.
    assert np.allclose(
        grad_fields[0].data[0, :, :], np.array(np.transpose(numpy_grad_fields[0])), rtol=1e-2
    )  # Field gradient dx.
    assert np.allclose(
        grad_fields[1].data[0, :, :], np.array(np.transpose(numpy_grad_fields[1])), rtol=1e-2
    )  # Field gradient dy.
Beispiel #2
0
def test_add_duplicate_field(dupobject):
    data, dimensions = generate_fieldset(100, 100)
    fieldset = FieldSet.from_data(data, dimensions)
    field = Field('newfld', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat)
    fieldset.add_field(field)
    error_thrown = False
    try:
        if dupobject == 'same':
            fieldset.add_field(field)
        elif dupobject == 'new':
            field2 = Field('newfld', np.ones((2, 2)), lon=np.array([0, 1]), lat=np.array([0, 2]))
            fieldset.add_field(field2)
    except RuntimeError:
        error_thrown = True

    assert error_thrown
Beispiel #3
0
def test_fieldset_defer_loading_with_diff_time_origin(tmpdir, fail, filename='test_parcels_defer_loading'):
    filepath = tmpdir.join(filename)
    data0, dims0 = generate_fieldset(10, 10, 1, 10)
    dims0['time'] = np.arange(0, 10, 1) * 3600
    fieldset_out = FieldSet.from_data(data0, dims0)
    fieldset_out.U.grid.time_origin = TimeConverter(np.datetime64('2018-04-20'))
    fieldset_out.V.grid.time_origin = TimeConverter(np.datetime64('2018-04-20'))
    data1, dims1 = generate_fieldset(10, 10, 1, 10)
    if fail:
        dims1['time'] = np.arange(0, 10, 1) * 3600
    else:
        dims1['time'] = np.arange(0, 10, 1) * 1800 + (24+25)*3600
    if fail:
        Wtime_origin = TimeConverter(np.datetime64('2018-04-22'))
    else:
        Wtime_origin = TimeConverter(np.datetime64('2018-04-18'))
    gridW = RectilinearZGrid(dims1['lon'], dims1['lat'], dims1['depth'], dims1['time'], time_origin=Wtime_origin)
    fieldW = Field('W', np.zeros(data1['U'].shape), grid=gridW)
    fieldset_out.add_field(fieldW)
    fieldset_out.write(filepath)
    fieldset = FieldSet.from_parcels(filepath, extra_fields={'W': 'W'})
    assert fieldset.U.creation_log == 'from_parcels'
    pset = ParticleSet.from_list(fieldset, pclass=JITParticle, lon=[0.5], lat=[0.5], depth=[0.5],
                                 time=[datetime.datetime(2018, 4, 20, 1)])
    pset.execute(AdvectionRK4_3D, runtime=delta(hours=4), dt=delta(hours=1))
Beispiel #4
0
    def from_data(cls,
                  data,
                  dimensions,
                  transpose=True,
                  mesh='spherical',
                  allow_time_extrapolation=True,
                  time_periodic=False,
                  **kwargs):
        """Initialise FieldSet object from raw data

        :param data: Dictionary mapping field names to numpy arrays.
               Note that at least a 'U' and 'V' numpy array need to be given
        :param dimensions: Dictionary mapping field dimensions (lon,
               lat, depth, time) to numpy arrays.
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param transpose: Boolean whether to transpose data on read-in
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        """

        u_units, v_units = unit_converters(mesh)
        units = defaultdict(UnitConverter)
        units.update({'U': u_units, 'V': v_units})
        fields = {}
        for name, datafld in data.items():
            # Use dimensions[name] if dimensions is a dict of dicts
            dims = dimensions[name] if name in dimensions else dimensions

            lon = dims['lon']
            lat = dims['lat']
            depth = np.zeros(
                1, dtype=np.float32) if 'depth' not in dims else dims['depth']
            time = np.zeros(
                1, dtype=np.float64) if 'time' not in dims else dims['time']

            fields[name] = Field(
                name,
                datafld,
                lon,
                lat,
                depth=depth,
                time=time,
                transpose=transpose,
                units=units[name],
                allow_time_extrapolation=allow_time_extrapolation,
                time_periodic=time_periodic,
                **kwargs)
        u = fields.pop('U')
        v = fields.pop('V')
        return cls(u, v, fields=fields)
Beispiel #5
0
def test_add_field(xdim, ydim, tmpdir, filename='test_add'):
    filepath = tmpdir.join(filename)
    data, dimensions = generate_fieldset(xdim, ydim)
    fieldset = FieldSet.from_data(data, dimensions)
    field = Field('newfld', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat)
    fieldset.add_field(field)
    assert fieldset.newfld.data.shape == fieldset.U.data.shape
    fieldset.write(filepath)
Beispiel #6
0
def test_add_field(xdim, ydim, tmpdir, filename='test_add'):
    filepath = tmpdir.join(filename)
    u, v, lon, lat, depth, time = generate_grid(xdim, ydim)
    grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
    field = Field('newfld', grid.U.data, grid.U.lon, grid.U.lat)
    grid.add_field(field)
    assert grid.newfld.data.shape == grid.U.data.shape
    grid.write(filepath)
Beispiel #7
0
def test_add_field_after_pset(pset_mode, fieldtype):
    data, dimensions = generate_fieldset(100, 100)
    fieldset = FieldSet.from_data(data, dimensions)
    pset = pset_type[pset_mode]['pset'](fieldset, ScipyParticle, lon=0, lat=0)  # noqa ; to trigger fieldset.check_complete
    field1 = Field('field1', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat)
    field2 = Field('field2', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat)
    vfield = VectorField('vfield', field1, field2)
    error_thrown = False
    try:
        if fieldtype == 'normal':
            fieldset.add_field(field1)
        elif fieldtype == 'vector':
            fieldset.add_vector_field(vfield)
    except RuntimeError:
        error_thrown = True

    assert error_thrown
Beispiel #8
0
def test_fieldset_defer_loading_function(zdim,
                                         scale_fac,
                                         tmpdir,
                                         filename='test_parcels_defer_loading'
                                         ):
    filepath = tmpdir.join(filename)
    data0, dims0 = generate_fieldset(3, 3, zdim, 10)
    data0[
        'U'][:,
             0, :, :] = np.nan  # setting first layer to nan, which will be changed to zero (and all other layers to 1)
    dims0['time'] = np.arange(0, 10, 1) * 3600
    dims0['depth'] = np.arange(0, zdim, 1)
    fieldset_out = FieldSet.from_data(data0, dims0)
    fieldset_out.write(filepath)
    fieldset = FieldSet.from_parcels(filepath,
                                     chunksize={
                                         'time': ('time_counter', 1),
                                         'depth': ('depthu', 1),
                                         'lat': ('y', 2),
                                         'lon': ('x', 2)
                                     })

    # testing for combination of deferred-loaded and numpy Fields
    with pytest.raises(ValueError):
        fieldset.add_field(
            Field('numpyfield',
                  np.zeros((10, zdim, 3, 3)),
                  grid=fieldset.U.grid))

    # testing for scaling factors
    fieldset.U.set_scaling_factor(scale_fac)

    dz = np.gradient(fieldset.U.depth)
    DZ = np.moveaxis(
        np.tile(dz, (fieldset.U.grid.ydim, fieldset.U.grid.xdim, 1)),
        [0, 1, 2], [1, 2, 0])

    def compute(fieldset):
        # Calculating vertical weighted average
        for f in [fieldset.U, fieldset.V]:
            for tind in f.loaded_time_indices:
                data = da.sum(f.data[tind, :] * DZ, axis=0) / sum(dz)
                data = da.broadcast_to(
                    data, (1, f.grid.zdim, f.grid.ydim, f.grid.xdim))
                f.data = f.data_concatenate(f.data, data, tind)

    fieldset.compute_on_defer = compute
    fieldset.computeTimeChunk(1, 1)
    assert isinstance(fieldset.U.data, da.core.Array)
    assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.) / zdim)

    pset = ParticleSet(fieldset, JITParticle, 0, 0)

    def DoNothing(particle, fieldset, time):
        return ErrorCode.Success

    pset.execute(DoNothing, dt=3600)
    assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.) / zdim)
def Create_Landmask(grid, lim=1e-45):
    def isocean(p, lim):
        return 1 if p < lim else 0
    def isshallow(p, lim):
        return 1 if p < lim else 0

    nx = grid.H.lon.size
    ny = grid.H.lat.size
    mask = np.zeros([nx, ny, 1], dtype=np.int8)
    pbar = ProgressBar()
    for i in pbar(range(nx)):
        for j in range(1, ny-1):
            if isshallow(np.abs(grid.bathy.data[0, 2, j, i]), lim):
                mask[i,j] = 2
            if isocean(grid.H.data[0, j, i],lim):  # For each land point
                mask[i,j] = 1

    Mask = Field('LandMask', mask, grid.H.lon, grid.H.lat, transpose=True)
    Mask.interp_method = 'nearest'
    return Mask#ClosestLon, ClosestLat
Beispiel #10
0
    def from_netcdf(cls,
                    filenames,
                    variables,
                    dimensions,
                    indices={},
                    mesh='spherical',
                    allow_time_extrapolation=False,
                    **kwargs):
        """Initialises grid data from files using NEMO conventions.

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files.
        :param variables: Dictionary mapping variables to variable
               names in the netCDF file(s).
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
        """

        # Determine unit converters for all fields
        u_units, v_units = unit_converters(mesh)
        units = defaultdict(UnitConverter)
        units.update({'U': u_units, 'V': v_units})
        fields = {}
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            basepath = path.local(filenames[var])
            paths = [path.local(fp) for fp in glob(str(basepath))]
            if len(paths) == 0:
                raise IOError("Grid files not found: %s" % str(basepath))
            for fp in paths:
                if not fp.exists():
                    raise IOError("Grid file not found: %s" % str(fp))
            dimensions['data'] = name
            fields[var] = Field.from_netcdf(
                var,
                dimensions,
                paths,
                indices,
                units=units[var],
                allow_time_extrapolation=allow_time_extrapolation,
                **kwargs)
        u = fields.pop('U')
        v = fields.pop('V')
        return cls(u, v, fields=fields)
Beispiel #11
0
def test_fieldset_nonstandardtime(calendar, cftime_datetime, tmpdir, filename='test_nonstandardtime.nc', xdim=4, ydim=6):
    filepath = tmpdir.join(filename)
    dates = [getattr(cftime, cftime_datetime)(1, m, 1) for m in range(1, 13)]
    da = xr.DataArray(np.random.rand(12, xdim, ydim),
                      coords=[dates, range(xdim), range(ydim)],
                      dims=['time', 'lon', 'lat'], name='U')
    da.to_netcdf(str(filepath))

    dims = {'lon': 'lon', 'lat': 'lat', 'time': 'time'}
    field = Field.from_netcdf(filepath, 'U', dims)
    assert field.grid.time_origin.calendar == calendar
Beispiel #12
0
def test_fieldset_gradient():
    x = 4
    y = 6
    time = np.linspace(0, 2, 3, dtype=np.int)
    field = Field("Test", data=create_simple_fieldset(x, y, time), time=time,
                  lon=np.linspace(0, x-1, x, dtype=np.float32),
                  lat=np.linspace(-y/2, y/2-1, y, dtype=np.float32))

    # Calculate field gradients for testing against numpy gradients.
    grad_fields = field.gradient()

    # Create numpy fields.
    r = 6.371e6
    deg2rd = np.pi / 180.
    numpy_grad_fields = np.gradient(np.transpose(field.data[0, :, :]), (r * np.diff(field.lat) * deg2rd)[0])

    # Arbitrarily set relative tolerance to 1%.
    assert np.allclose(grad_fields[0].data[0, :, :], np.array(np.transpose(numpy_grad_fields[0])),
                       rtol=1e-2)  # Field gradient dx.
    assert np.allclose(grad_fields[1].data[0, :, :], np.array(np.transpose(numpy_grad_fields[1])),
                       rtol=1e-2)  # Field gradient dy.
Beispiel #13
0
def Create_Landmask(grid, lim=1e-45):
    def isocean(p, lim):
        return 1 if p < lim else 0

    def isshallow(p, lim):
        return 1 if p < lim else 0

    nx = grid.H.lon.size
    ny = grid.H.lat.size
    mask = np.zeros([nx, ny, 1], dtype=np.int8)
    pbar = ProgressBar()
    for i in pbar(range(nx)):
        for j in range(1, ny - 1):
            if isshallow(np.abs(grid.bathy.data[0, 2, j, i]), lim):
                mask[i, j] = 2
            if isocean(grid.H.data[0, j, i], lim):  # For each land point
                mask[i, j] = 1

    Mask = Field('LandMask', mask, grid.H.lon, grid.H.lat, transpose=True)
    Mask.interp_method = 'nearest'
    return Mask  #ClosestLon, ClosestLat
Beispiel #14
0
    def from_netcdf(cls, filenames, variables, dimensions, indices={},
                    mesh='spherical', allow_time_extrapolation=False, time_periodic=False, **kwargs):
        """Initialises FieldSet data from files using NEMO conventions.

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files,
               or be a list of file.
        :param variables: Dictionary mapping variables to variable
               names in the netCDF file(s).
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        """

        fields = {}
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            if isinstance(filenames[var], list):
                paths = filenames[var]
            else:
                paths = sorted(glob(str(filenames[var])))
            if len(paths) == 0:
                raise IOError("FieldSet files not found: %s" % str(filenames[var]))
            for fp in paths:
                if not path.exists(fp):
                    raise IOError("FieldSet file not found: %s" % str(fp))

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            dims['data'] = name
            inds = indices[var] if var in indices else indices

            fields[var] = Field.from_netcdf(var, dims, paths, inds, mesh=mesh,
                                            allow_time_extrapolation=allow_time_extrapolation,
                                            time_periodic=time_periodic, **kwargs)
        u = fields.pop('U')
        v = fields.pop('V')
        return cls(u, v, fields=fields)
Beispiel #15
0
    def from_data(cls, data, dimensions, transpose=False, mesh='spherical',
                  allow_time_extrapolation=None, time_periodic=False, **kwargs):
        """Initialise FieldSet object from raw data

        :param data: Dictionary mapping field names to numpy arrays.
               Note that at least a 'U' and 'V' numpy array need to be given

               1. If data shape is [xdim, ydim], [xdim, ydim, zdim], [xdim, ydim, tdim] or [xdim, ydim, zdim, tdim],
                  whichever is relevant for the dataset, use the flag transpose=True
               2. If data shape is [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim],
                  use the flag transpose=False (default value)
               3. If data has any other shape, you first need to reorder it
        :param dimensions: Dictionary mapping field dimensions (lon,
               lat, depth, time) to numpy arrays.
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param transpose: Boolean whether to transpose data on read-in
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        """

        fields = {}
        for name, datafld in data.items():
            # Use dimensions[name] if dimensions is a dict of dicts
            dims = dimensions[name] if name in dimensions else dimensions

            if allow_time_extrapolation is None:
                allow_time_extrapolation = False if 'time' in dims else True

            lon = dims['lon']
            lat = dims['lat']
            depth = np.zeros(1, dtype=np.float32) if 'depth' not in dims else dims['depth']
            time = np.zeros(1, dtype=np.float64) if 'time' not in dims else dims['time']
            grid = RectilinearZGrid(lon, lat, depth, time, time_origin=TimeConverter(), mesh=mesh)

            fields[name] = Field(name, datafld, grid=grid, transpose=transpose,
                                 allow_time_extrapolation=allow_time_extrapolation, time_periodic=time_periodic, **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #16
0
def test_fieldset_defer_loading_function(zdim,
                                         scale_fac,
                                         tmpdir,
                                         filename='test_parcels_defer_loading'
                                         ):
    filepath = tmpdir.join(filename)
    data0, dims0 = generate_fieldset(3, 3, zdim, 10)
    data0[
        'U'][:,
             0, :, :] = np.nan  # setting first layer to nan, which will be changed to zero (and all other layers to 1)
    dims0['time'] = np.arange(0, 10, 1) * 3600
    dims0['depth'] = np.arange(0, zdim, 1)
    fieldset_out = FieldSet.from_data(data0, dims0)
    fieldset_out.write(filepath)
    fieldset = FieldSet.from_parcels(filepath)

    # testing for combination of deferred-loaded and numpy Fields
    fieldset.add_field(
        Field('numpyfield', np.zeros((10, zdim, 3, 3)), grid=fieldset.U.grid))

    # testing for scaling factors
    fieldset.U.set_scaling_factor(scale_fac)

    dFdx, dFdy = fieldset.V.gradient()

    dz = np.gradient(fieldset.U.depth)
    DZ = np.moveaxis(
        np.tile(dz, (fieldset.U.grid.ydim, fieldset.U.grid.xdim, 1)),
        [0, 1, 2], [1, 2, 0])

    def compute(fieldset):
        # Calculating vertical weighted average
        for f in [fieldset.U, fieldset.V]:
            for tind in f.loaded_time_indices:
                f.data[tind, :] = np.sum(f.data[tind, :] * DZ,
                                         axis=0) / sum(dz)

    fieldset.compute_on_defer = compute
    fieldset.computeTimeChunk(1, 1)
    assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.) / zdim)
    assert np.allclose(dFdx.data, 0)

    pset = ParticleSet(fieldset, JITParticle, 0, 0)

    def DoNothing(particle, fieldset, time, dt):
        return ErrorCode.Success

    pset.execute(DoNothing, dt=3600)
    assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.) / zdim)
    assert np.allclose(dFdx.data, 0)
Beispiel #17
0
    def from_xarray_dataset(cls, ds, variables, dimensions, indices=None, mesh='spherical', allow_time_extrapolation=None,
                            time_periodic=False, deferred_load=True, **kwargs):
        """Initialises FieldSet data from xarray Datasets.

        :param ds: xarray Dataset.
               Note that the built-in Advection kernels assume that U and V are in m/s
        :param variables: Dictionary mapping parcels variable names to data variables in the xarray Dataset.
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the xarray Dataset.
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
        :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
               (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation, see also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param deferred_load: boolean whether to only pre-load data (in deferred mode) or
               fully load them (default: True). It is advised to deferred load the data, since in
               that case Parcels deals with a better memory management during particle set execution.
               deferred_load=False is however sometimes necessary for plotting the fields.
        """

        fields = {}
        if 'creation_log' not in kwargs.keys():
            kwargs['creation_log'] = 'from_xarray_dataset'
        for var, name in variables.items():

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            inds = indices[var] if (indices and var in indices) else indices

            fields[var] = Field.from_netcdf(None, ds[name], dimensions=dims, indices=inds, grid=None, mesh=mesh,
                                            allow_time_extrapolation=allow_time_extrapolation, var_name=var,
                                            time_periodic=time_periodic, deferred_load=deferred_load, **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #18
0
def Create_SEAPODYM_Diffusion_Field(H,
                                    timestep=30 * 24 * 60 * 60,
                                    sigma=0.1769952864978924,
                                    c=0.662573993401526,
                                    P=3,
                                    start_age=4,
                                    Vmax_slope=1,
                                    units='m_per_s',
                                    diffusion_boost=0,
                                    diffusion_scale=1,
                                    sig_scale=1,
                                    c_scale=1,
                                    verbose=True):
    # Old parameters sigma=0.1999858740340303, c=0.9817751085550976,
    K = np.zeros(np.shape(H.data), dtype=np.float32)
    months = start_age
    age = months * 30 * 24 * 60 * 60
    for t in range(H.time.size):
        # Increase age in months if required, to incorporate appropriate Vmax
        # months in SEAPODYM are all assumed to be 30 days long
        #age = H.time[t] - H.time[0] + start_age*30*24*60*60 # this is for 'true' ageing
        age = (start_age + t) * 30 * 24 * 60 * 60
        if age - (months * 30 * 24 * 60 * 60) >= (30 * 24 * 60 * 60):
            months += 1
        if verbose:
            print('age in days = %s' % (age / (24 * 60 * 60)))
            print("Calculating diffusivity for fish aged %s months" % months)
        if units == 'nm_per_mon':
            Dmax = (np.power(
                GetLengthFromAge(months) *
                ((30 * 24 * 60 * 60) / 1852), 2) / 4) * timestep / (
                    60 * 60 * 24 * 30)  #vmax = L for diffusion
        else:
            Dmax = (np.power(GetLengthFromAge(months), 2) /
                    4) * timestep  #fixed b parameter for diffusion
        sig_D = sigma * Dmax
        for x in range(H.lon.size):
            for y in range(H.lat.size):
                K[t, y, x] = sig_scale * sig_D * (1 - c_scale * c * np.power(
                    H.data[t, y, x], P)) * diffusion_scale + diffusion_boost

    return Field('K',
                 K,
                 H.lon,
                 H.lat,
                 time=H.time,
                 interp_method='nearest',
                 allow_time_extrapolation=True)
Beispiel #19
0
def test_fieldset_nonstandardtime(calendar, tmpdir, filename='test_nonstandardtime.nc', xdim=4, ydim=6):
    from cftime import DatetimeNoLeap, Datetime360Day
    filepath = tmpdir.join(filename)

    if calendar == 'noleap':
        dates = [DatetimeNoLeap(0, m, 1) for m in range(1, 13)]
    else:
        dates = [Datetime360Day(0, m, 1) for m in range(1, 13)]
    da = xr.DataArray(np.random.rand(12, xdim, ydim),
                      coords=[dates, range(xdim), range(ydim)],
                      dims=['time', 'lon', 'lat'], name='U')
    da.to_netcdf(str(filepath))

    dims = {'lon': 'lon', 'lat': 'lat', 'time': 'time'}
    field = Field.from_netcdf(filepath, 'U', dims)
    assert field.grid.time_origin.calendar == 'cftime'
Beispiel #20
0
    def add_data(self, data, dimensions, transpose=True, mesh='spherical',
                 allow_time_extrapolation=True, **kwargs):
        """Initialise FieldSet object from raw data

        :param data: Dictionary mapping field names to numpy arrays.
               Note that at least a 'U' and 'V' numpy array need to be given
        :param dimensions: Dictionary mapping field dimensions (lon,
               lat, depth, time) to numpy arrays.
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param transpose: Boolean whether to transpose data on read-in
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
        """

        fields = {}
        for name, datafld in data.items():
            # Use dimensions[name] if dimensions is a dict of dicts
            dims = dimensions[name] if name in dimensions else dimensions

            lon = dims['lon']
            lat = dims['lat']
            depth = np.zeros(1, dtype=np.float32) if 'depth' not in dims else dims['depth']
            time = np.zeros(1, dtype=np.float64) if 'time' not in dims else dims['time']
            grid = RectilinearZGrid('auto_gen_grid', lon, lat, depth, time, mesh=mesh)

            fields[name] = Field(name, datafld, grid=grid, transpose=transpose,
                                 allow_time_extrapolation=allow_time_extrapolation, **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        if u:
            self.add_field(u)
        if v:
            self.add_field(v)

        for f in fields:
            self.add_field(f)
Beispiel #21
0
    def from_xarray_dataset(cls, ds, variables, dimensions, indices=None, mesh='spherical', allow_time_extrapolation=None,
                            time_periodic=False, full_load=False, **kwargs):
        """Initialises FieldSet data from xarray Datasets.

        :param ds: xarray Dataset
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the xarray Dataset.
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param full_load: boolean whether to fully load the data or only pre-load them. (default: False)
               It is advised not to fully load the data, since in that case Parcels deals with
               a better memory management during particle set execution.
               full_load is however sometimes necessary for plotting the fields.
        """

        fields = {}
        for var, name in variables.items():

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            inds = indices[var] if (indices and var in indices) else indices

            fields[var] = Field.from_netcdf(None, ds[name], dimensions=dims, indices=inds, grid=None, mesh=mesh,
                                            allow_time_extrapolation=allow_time_extrapolation, var_name=var,
                                            time_periodic=time_periodic, full_load=full_load, **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #22
0
    def from_netcdf(cls, filenames, variables, dimensions,
                    mesh='spherical', **kwargs):
        """Initialises grid data from files using NEMO conventions.

        :param filenames: Dictionary mapping variables to file(s). The
        filepath may contain wildcards to indicate multiple files.
        :param variabels: Dictionary mapping variables to variable
        names in the netCDF file(s).
        :param dimensions: Dictionary mapping data dimensions (lon,
        lat, depth, time, data) to dimensions in the netCF file(s).
        :param mesh: String indicating the type of mesh coordinates and
                     units used during velocity interpolation:
                       * sperical (default): Lat and lon in degree, with a
                         correction for zonal velocity U near the poles.
                       * flat: No conversion, lat/lon are assumed to be in m.
        """
        # Determine unit converters for all fields
        u_units, v_units = unit_converters(mesh)
        units = defaultdict(UnitConverter)
        units.update({'U': u_units, 'V': v_units})
        fields = {}
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            basepath = path.local(filenames[var])
            paths = [path.local(fp) for fp in glob(str(basepath))]
            if len(paths) == 0:
                raise IOError("Grid files not found: %s" % str(basepath))
            for fp in paths:
                if not fp.exists():
                    raise IOError("Grid file not found: %s" % str(fp))
            dimensions['data'] = name
            fields[var] = Field.from_netcdf(var, dimensions, paths,
                                            units=units[var], **kwargs)
        u = fields.pop('U')
        v = fields.pop('V')
        return cls(u, v, u.depth, u.time, fields=fields)
def Create_SEAPODYM_Grid(forcing_files, startD=None, startD_dims=None,
                         forcing_vars={'U': 'u', 'V': 'v', 'H': 'habitat'},
                         forcing_dims={'lon': 'lon', 'lat': 'lon', 'time': 'time'}, K_timestep=30*24*60*60,
                         diffusion_file=None, field_units='m_per_s',
                         diffusion_dims={'lon': 'longitude', 'lat': 'latitude', 'time': 'time', 'data': 'skipjack_diffusion_rate'},
                         scaleH=None, start_age=4, output_density=False, diffusion_scale=1, sig_scale=1, c_scale=1,
                         verbose=False):
    if startD_dims is None:
        startD_dims = forcing_dims
    if verbose:
        print("Creating Grid\nLoading files:")
        for f in forcing_files.values():
            print(f)

    grid = FieldSet.from_netcdf(filenames=forcing_files, variables=forcing_vars, dimensions=forcing_dims,
                            vmin=-200, vmax=1e34, interp_method='nearest', allow_time_extrapolation=True)
    print(forcing_files['U'])
    Depthdata = Field.from_netcdf('u', dimensions={'lon': 'longitude', 'lat': 'latitude', 'time': 'time', 'depth': 'depth'},
                                     filenames=forcing_files['U'], allow_time_extrapolation=True)
    Depthdata.name = 'bathy'
    grid.add_field(Depthdata)


    if startD is not None:
        grid.add_field(Field.from_netcdf('Start', dimensions=startD_dims,
                                         filenames=path.local(startD), vmax=1000,
                                         interp_method='nearest', allow_time_extrapolation=True))

    if output_density:
        # Add a density field that will hold particle densities
        grid.add_field(Field('Density', np.full([grid.U.lon.size, grid.U.lat.size, grid.U.time.size],-1, dtype=np.float64),
                       grid.U.lon, grid.U.lat, depth=grid.U.depth, time=grid.U.time, transpose=True))

    LandMask = Create_Landmask(grid)
    grid.add_field(LandMask)
    grid.U.data[grid.U.data > 1e5] = 0
    grid.V.data[grid.V.data > 1e5] = 0
    grid.H.data[grid.H.data > 1e5] = 0
    # Scale the H field between zero and one if required
    if scaleH is not None:
        grid.H.data /= np.max(grid.H.data)
        grid.H.data[np.where(grid.H.data < 0)] = 0
        grid.H.data *= scaleH

    # Offline calculate the 'diffusion' grid as a function of habitat
    if verbose:
        print("Creating Diffusion Field")
    if diffusion_file is None:
        K = Create_SEAPODYM_Diffusion_Field(grid.H, timestep=K_timestep, start_age=start_age,
                                            diffusion_scale=diffusion_scale, units=field_units,
                                            sig_scale=sig_scale, c_scale=c_scale, verbose=verbose)
    else:
        if verbose:
            print("Loading from file: %s" % diffusion_file)
        K = Field.from_netcdf('K', diffusion_dims, [diffusion_file], interp_method='nearest', vmax=1000000)
        if diffusion_units == 'nm2_per_mon':
            K.data *= 1.30427305

    grid.add_field(K)

    if verbose:
        print("Calculating H Gradient Fields")
    dHdx, dHdy = getGradient(grid.H, grid.LandMask)
    grid.add_field(dHdx)
    grid.add_field(dHdy)
    #gradients = grid.H.gradient()
    #for field in gradients:
    #    grid.add_field(field)

    if verbose:
        print("Calculating Taxis Fields")
    T_Fields = Create_SEAPODYM_Taxis_Fields(grid.dH_dx, grid.dH_dy, start_age=start_age, units=field_units)
    for field in T_Fields:
        grid.add_field(field)

    if verbose:
        print("Creating combined Taxis and Advection field")
    grid.add_field(Field('TU', grid.U.data+grid.Tx.data, grid.U.lon, grid.U.lat, time=grid.U.time, vmin=-200, vmax=1e34,
                         interp_method='nearest', allow_time_extrapolation=True))# units=unit_converters('spherical')[0]))
    grid.add_field(Field('TV', grid.V.data+grid.Ty.data, grid.U.lon, grid.U.lat, time=grid.U.time, vmin=-200, vmax=1e34,
                         interp_method='nearest', allow_time_extrapolation=True))#, units=unit_converters('spherical')[1]))


    if verbose:
        print("Calculating K Gradient Fields")
    #K_gradients = grid.K.gradient()
    #for field in K_gradients:
    #    grid.add_field(field)
    dKdx, dKdy = getGradient(grid.K, grid.LandMask, False)
    grid.add_field(dKdx)
    grid.add_field(dKdy)
    grid.K.interp_method = grid.dK_dx.interp_method = grid.dK_dy.interp_method = grid.H.interp_method = \
                           grid.dH_dx.interp_method = grid.dH_dy.interp_method = grid.U.interp_method = grid.V.interp_method = 'nearest'

    #grid.K.allow_time_extrapolation = grid.dK_dx.allow_time_extrapolation = grid.dK_dy.allow_time_extrapolation = \
    #                                  grid.H.allow_time_extrapolation = grid.dH_dx.allow_time_extrapolation = \
    #                                  grid.dH_dy.allow_time_extrapolation = grid.U.allow_time_extrapolation = \
    #                                  grid.V.allow_time_extrapolation = True

    return grid
Beispiel #24
0
    def from_netcdf(cls,
                    filenames,
                    variables,
                    dimensions,
                    indices=None,
                    mesh='spherical',
                    timestamps=None,
                    allow_time_extrapolation=None,
                    time_periodic=False,
                    deferred_load=True,
                    **kwargs):
        """Initialises FieldSet object from NetCDF files

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files
               or be a list of file.
               filenames can be a list [files], a dictionary {var:[files]},
               a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
               or a dictionary of dictionaries {var:{dim:[files]}}.
               time values are in filenames[data]
        :param variables: Dictionary mapping variables to variable names in the netCDF file(s).
               Note that the built-in Advection kernels assume that U and V are in m/s
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
               Note that negative indices are not allowed.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation, see also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param timestamps: A numpy array containing the timestamps for each of the files in filenames.
               Default is None if dimensions includes time.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param deferred_load: boolean whether to only pre-load data (in deferred mode) or
               fully load them (default: True). It is advised to deferred load the data, since in
               that case Parcels deals with a better memory management during particle set execution.
               deferred_load=False is however sometimes necessary for plotting the fields.
        :param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',
               but in cases where this doesn't work, setting netcdf_engine='scipy' could help
        """
        # Ensure that times are not provided both in netcdf file and in 'timestamps'.
        if timestamps is not None and 'time' in dimensions:
            logger.warning_once(
                "Time already provided, defaulting to dimensions['time'] over timestamps."
            )
            timestamps = None

        # Typecast timestamps to numpy array & correct shape.
        if timestamps is not None:
            if isinstance(timestamps, list):
                timestamps = np.array(timestamps)
            timestamps = np.reshape(timestamps, [timestamps.size, 1])

        fields = {}
        if 'creation_log' not in kwargs.keys():
            kwargs['creation_log'] = 'from_netcdf'
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            paths = filenames[var] if type(
                filenames) is dict and var in filenames else filenames
            if type(paths) is not dict:
                paths = cls.parse_wildcards(paths, filenames, var)
            else:
                for dim, p in paths.items():
                    paths[dim] = cls.parse_wildcards(p, filenames, var)

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            cls.checkvaliddimensionsdict(dims)
            inds = indices[var] if (indices and var in indices) else indices

            grid = None
            # check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)
            for procvar, _ in fields.items():
                procdims = dimensions[
                    procvar] if procvar in dimensions else dimensions
                procinds = indices[procvar] if (
                    indices and procvar in indices) else indices
                procpaths = filenames[procvar] if isinstance(
                    filenames, dict) and procvar in filenames else filenames
                nowpaths = filenames[var] if isinstance(
                    filenames, dict) and var in filenames else filenames
                if procdims == dims and procinds == inds and procpaths == nowpaths:
                    sameGrid = False
                    if ((not isinstance(filenames, dict))
                            or filenames[procvar] == filenames[var]):
                        sameGrid = True
                    elif isinstance(filenames[procvar], dict):
                        sameGrid = True
                        for dim in ['lon', 'lat', 'depth']:
                            if dim in dimensions:
                                sameGrid *= filenames[procvar][
                                    dim] == filenames[var][dim]
                    if sameGrid:
                        grid = fields[procvar].grid
                        kwargs['dataFiles'] = fields[procvar].dataFiles
                        break
            fields[var] = Field.from_netcdf(
                paths, (var, name),
                dims,
                inds,
                grid=grid,
                mesh=mesh,
                timestamps=timestamps,
                allow_time_extrapolation=allow_time_extrapolation,
                time_periodic=time_periodic,
                deferred_load=deferred_load,
                **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #25
0
    def from_data(cls,
                  data_u,
                  lon_u,
                  lat_u,
                  data_v,
                  lon_v,
                  lat_v,
                  depth=None,
                  time=None,
                  field_data={},
                  transpose=True,
                  mesh='spherical',
                  allow_time_extrapolation=True,
                  **kwargs):
        """Initialise Grid object from raw data

        :param data_u: Zonal (U) velocity data
        :param lon_u: Longitude coordinates of the U data
        :param lat_u: Latitude coordinates of the U data
        :param data_v: Meridional (V) velocity data
        :param lon_v: Longitude coordinates of the V data
        :param lat_v: Latitude coordinates of the V data
        :param depth: Depth coordinates of all :class:`Field` objects on the grid
        :param time: Time coordinates of all :class:`Field` objects on the grid
        :param field_data: Dictionary of extra fields (name, data)
        :param transpose: Boolean whether to transpose data on read-in
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
        """

        depth = np.zeros(1, dtype=np.float32) if depth is None else depth
        time = np.zeros(1, dtype=np.float64) if time is None else time
        u_units, v_units = unit_converters(mesh)
        # Create velocity fields
        ufield = Field('U',
                       data_u,
                       lon_u,
                       lat_u,
                       depth=depth,
                       time=time,
                       transpose=transpose,
                       units=u_units,
                       allow_time_extrapolation=allow_time_extrapolation,
                       **kwargs)
        vfield = Field('V',
                       data_v,
                       lon_v,
                       lat_v,
                       depth=depth,
                       time=time,
                       transpose=transpose,
                       units=v_units,
                       allow_time_extrapolation=allow_time_extrapolation,
                       **kwargs)
        # Create additional data fields
        fields = {}
        for name, data in field_data.items():
            fields[name] = Field(
                name,
                data,
                lon_v,
                lat_u,
                depth=depth,
                time=time,
                transpose=transpose,
                allow_time_extrapolation=allow_time_extrapolation,
                **kwargs)
        return cls(ufield, vfield, fields=fields)
Beispiel #26
0
    def from_netcdf(cls,
                    filenames,
                    variables,
                    dimensions,
                    indices=None,
                    mesh='spherical',
                    allow_time_extrapolation=None,
                    time_periodic=False,
                    full_load=False,
                    **kwargs):
        """Initialises FieldSet object from NetCDF files

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files,
               or be a list of file.
        :param variables: Dictionary mapping variables to variable
               names in the netCDF file(s).
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param full_load: boolean whether to fully load the data or only pre-load them. (default: False)
               It is advised not to fully load the data, since in that case Parcels deals with
               a better memory management during particle set execution.
               full_load is however sometimes necessary for plotting the fields.
        """

        fields = {}
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            paths = filenames[var] if type(filenames) is dict else filenames
            if not isinstance(paths, list):
                paths = sorted(glob(str(paths)))
            if len(paths) == 0:
                raise IOError("FieldSet files not found: %s" % str(paths))
            for fp in paths:
                if not path.exists(fp):
                    raise IOError("FieldSet file not found: %s" % str(fp))

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            dims['data'] = name
            inds = indices[var] if (indices and var in indices) else indices

            grid = None
            # check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)
            for procvar, _ in fields.items():
                procdims = dimensions[
                    procvar] if procvar in dimensions else dimensions
                procinds = indices[procvar] if (
                    indices and procvar in indices) else indices
                if (type(filenames) is not dict or filenames[procvar] == filenames[var]) \
                        and procdims == dims and procinds == inds:
                    grid = fields[procvar].grid
                    kwargs['dataFiles'] = fields[procvar].dataFiles
                    break
            fields[var] = Field.from_netcdf(
                paths,
                var,
                dims,
                inds,
                grid=grid,
                mesh=mesh,
                allow_time_extrapolation=allow_time_extrapolation,
                time_periodic=time_periodic,
                full_load=full_load,
                **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #27
0
    def from_netcdf(cls,
                    filenames,
                    variables,
                    dimensions,
                    indices=None,
                    mesh='spherical',
                    allow_time_extrapolation=None,
                    time_periodic=False,
                    full_load=False,
                    **kwargs):
        """Initialises FieldSet object from NetCDF files

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files,
               or be a list of file.
               filenames can be a list [files], a dictionary {var:[files]},
               a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
               or a dictionary of dictionaries {var:{dim:[files]}}.
               time values are in filenames[data]
        :param variables: Dictionary mapping variables to variable
               names in the netCDF file(s).
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
               Note that negative indices are not allowed.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param full_load: boolean whether to fully load the data or only pre-load them. (default: False)
               It is advised not to fully load the data, since in that case Parcels deals with
               a better memory management during particle set execution.
               full_load is however sometimes necessary for plotting the fields.
        :param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',
               but in cases where this doesn't work, setting netcdf_engine='scipy' could help
        """

        fields = {}
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            paths = filenames[var] if type(
                filenames) is dict and var in filenames else filenames
            if type(paths) is not dict:
                paths = cls.parse_wildcards(paths, filenames, var)
            else:
                for dim, p in paths.items():
                    paths[dim] = cls.parse_wildcards(p, filenames, var)

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            dims['data'] = name
            inds = indices[var] if (indices and var in indices) else indices

            grid = None
            # check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)
            for procvar, _ in fields.items():
                procdims = dimensions[
                    procvar] if procvar in dimensions else dimensions
                procinds = indices[procvar] if (
                    indices and procvar in indices) else indices
                if procdims == dims and procinds == inds:
                    sameGrid = False
                    if (type(filenames) is not dict
                            or filenames[procvar] == filenames[var]):
                        sameGrid = True
                    elif type(filenames[procvar]) == dict:
                        sameGrid = True
                        for dim in ['lon', 'lat', 'depth', 'data']:
                            if dim in dimensions:
                                sameGrid *= filenames[procvar][
                                    dim] == filenames[var][dim]
                    if sameGrid:
                        grid = fields[procvar].grid
                        kwargs['dataFiles'] = fields[procvar].dataFiles
                        break
            fields[var] = Field.from_netcdf(
                paths,
                var,
                dims,
                inds,
                grid=grid,
                mesh=mesh,
                allow_time_extrapolation=allow_time_extrapolation,
                time_periodic=time_periodic,
                full_load=full_load,
                **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
Beispiel #28
0
def Field_from_DYM(filename,
                   name=None,
                   xlim=None,
                   ylim=None,
                   fromyear=None,
                   frommonth=0,
                   toyear=None,
                   tomonth=0):

    if name is None:
        name = str.split(filename, '/')[-1]
    print("Name = %s" % name)

    def lat_to_j(lat, latmax, deltaY):
        j = (int)((latmax - lat) / deltaY + 1.5)
        return j - 1

    def lon_to_i(lon, lonmin, deltaX):
        if lon < 0:
            lon = lon + 360
        i = (int)((lon - lonmin) / deltaX + 1.5)
        return i - 1

    def get_tcount_start(zlevel, nlevel, date):
        n = 0
        while date > zlevel[n] and n < (nlevel - 1):
            n += 1
        return n

    def get_tcount_end(zlevel, nlevel, date):
        n = nlevel - 1
        while date < zlevel[n] and n > 0:
            n -= 1
        return n

    class DymReader:
        # Map well-known type names into struct format characters.
        typeNames = {
            'int32': 'i',
            'uint32': 'I',
            'int64': 'q',
            'uint64': 'Q',
            'float': 'f',
            'double': 'd',
            'char': 's'
        }

        DymInputSize = 4

        def __init__(self, fileName):
            self.file = open(fileName, 'rb')

        def read(self, typeName):
            typeFormat = DymReader.typeNames[typeName.lower()]
            scale = 1
            if (typeFormat is 's'):
                scale = self.DymInputSize
            typeSize = struct.calcsize(typeFormat) * scale
            value = self.file.read(typeSize)
            decoded = struct.unpack(typeFormat * scale, value)
            #print(decoded)
            decoded = [x for x in decoded]

            if (typeFormat is 's'):
                decoded = ''.join(decoded)
                return decoded
            return decoded[0]

        def move(self, pos):
            self.file.seek(pos, 1)

        def close(self):
            self.file.close()

    if xlim is not None:
        x1 = xlim[0]
        x2 = xlim[1]
    else:
        x1 = x2 = 0

    if ylim is not None:
        y1 = ylim[0]
        y2 = ylim[1]
    else:
        y1 = y2 = 0

    file = DymReader(filename)

    # Get header
    print("-- Reading .dym file --")
    idformat = file.read('char')
    print("ID Format = %s" % idformat)
    idfunc = file.read('int32')
    print("IF Function = %s" % idfunc)
    minval = file.read('float')
    print("minval = %s" % minval)
    maxval = file.read('float')
    print("maxval = %s" % maxval)
    nlon = file.read('int32')
    print("nlon = %s" % nlon)
    nlat = file.read('int32')
    print("nlat = %s" % nlat)
    nlevel = file.read('int32')
    print("nlevel = %s" % nlevel)
    startdate = file.read('float')
    print("startdate = %s" % startdate)
    enddate = file.read('float')
    print("enddate = %s" % enddate)

    if fromyear is None:
        fromyear = np.floor(startdate)
    if toyear is None:
        toyear = np.floor(enddate)

    x = np.zeros([nlat, nlon], dtype=np.float32)
    y = np.zeros([nlat, nlon], dtype=np.float32)

    for i in range(nlat):
        for j in range(nlon):
            x[i, j] = file.read('float')

    for i in range(nlat):
        for j in range(nlon):
            y[i, j] = file.read('float')

    dx = x[0, 1] - x[0, 0]
    dy = y[0, 0] - y[1, 0]

    i1 = lon_to_i(x1, x[0, 0], dx)
    i2 = lon_to_i(x2, x[0, 0], dx)
    j1 = lat_to_j(y2, y[0, 0], dy)
    j2 = lat_to_j(y1, y[0, 0], dy)
    if xlim is None:
        i1 = 0
        i2 = nlon
    if ylim is None:
        j1 = 0
        j2 = nlat
    nlon_new = i2 - i1
    nlat_new = j2 - j1

    if xlim is None:
        nlon_new = nlon
        nlat_new = nlat
        i1 = 0
        i2 = nlon
        j1 = 0
        j2 = nlat

    for j in range(nlat_new):
        for i in range(nlon_new):
            x[j, i] = x[j + j1, i + i1]
            y[j, i] = y[j + j1, i + i1]

    mask = np.zeros([nlat_new, nlon_new], dtype=np.float32)

    zlevel = np.zeros(nlevel, dtype=np.float32)
    for n in range(nlevel):
        zlevel[n] = file.read('float')
    nlevel_new = nlevel
    ts1 = 0
    firstdate = fromyear + (frommonth -
                            1) / 12  #start at the beginning of a given month
    lastdate = toyear + tomonth / 12  ## stop at the end of a given month
    ts1 = get_tcount_start(zlevel, nlevel, firstdate)
    ts2 = get_tcount_end(zlevel, nlevel, lastdate)
    nlevel_new = ts2 - ts1 + 1

    zlevel_new = np.zeros(nlevel_new, dtype=np.float32)
    for n in range(nlevel_new):
        zlevel_new[n] = zlevel[n + ts1]

    for j in range(nlat):
        for i in range(nlon):
            temp = file.read('int32')
            if i2 > i >= i1 and j2 > j >= j1:
                mask[j - j1, i - i1] = temp

    data = np.zeros([nlevel_new, nlat_new, nlon_new], dtype=np.float32)
    t_count = ts1
    if t_count < 0:
        t_count = 0

    print(
        "Start reading data time series skipping %s and reading for %s time steps"
        % (t_count, nlevel_new))

    nbytetoskip = nlon * nlat * t_count * 4
    file.move(nbytetoskip)

    val = 0
    for n in range(nlevel_new):
        for j in range(nlat)[::-1]:
            for i in range(nlon):
                val = file.read('float')
                if i2 > i >= i1 and j2 > j >= j1:
                    data[n, j - j1, i - i1] = val * 1852 / (
                        30 * 24 * 60 * 60)  #Convert from nm/m to m/s

    file.close()

    # Create datetime objects for t index
    times = [0] * nlevel_new
    dt = np.round((enddate - startdate) / nlevel * 365)
    for t in range(nlevel_new):
        times[t] = datetime(int(fromyear + np.floor(t / 12)), (t % 12) + 1, 15,
                            0, 0, 0)
    origin = datetime(1970, 1, 1, 0, 0, 0)
    times_in_s = times
    for t in range(len(times)):
        times_in_s[t] = (times[t] - origin).total_seconds()
    times_in_s = np.array(times_in_s, dtype=np.float32)

    return Field(name,
                 data,
                 lon=x[0, :],
                 lat=y[:, 0][::-1],
                 time=times_in_s,
                 time_origin=origin)
Beispiel #29
0
def Create_SEAPODYM_Grid(forcing_files,
                         startD=None,
                         startD_dims=None,
                         forcing_vars={
                             'U': 'u',
                             'V': 'v',
                             'H': 'habitat'
                         },
                         forcing_dims={
                             'lon': 'lon',
                             'lat': 'lon',
                             'time': 'time'
                         },
                         K_timestep=30 * 24 * 60 * 60,
                         diffusion_file=None,
                         field_units='m_per_s',
                         diffusion_dims={
                             'lon': 'longitude',
                             'lat': 'latitude',
                             'time': 'time',
                             'data': 'skipjack_diffusion_rate'
                         },
                         scaleH=None,
                         start_age=4,
                         output_density=False,
                         diffusion_scale=1,
                         sig_scale=1,
                         c_scale=1,
                         verbose=False):
    if startD_dims is None:
        startD_dims = forcing_dims
    if verbose:
        print("Creating Grid\nLoading files:")
        for f in forcing_files.values():
            print(f)

    grid = FieldSet.from_netcdf(filenames=forcing_files,
                                variables=forcing_vars,
                                dimensions=forcing_dims,
                                vmin=-200,
                                vmax=1e34,
                                interp_method='nearest',
                                allow_time_extrapolation=True)
    print(forcing_files['U'])
    Depthdata = Field.from_netcdf('u',
                                  dimensions={
                                      'lon': 'longitude',
                                      'lat': 'latitude',
                                      'time': 'time',
                                      'depth': 'depth'
                                  },
                                  filenames=forcing_files['U'],
                                  allow_time_extrapolation=True)
    Depthdata.name = 'bathy'
    grid.add_field(Depthdata)

    if startD is not None:
        grid.add_field(
            Field.from_netcdf('Start',
                              dimensions=startD_dims,
                              filenames=path.local(startD),
                              vmax=1000,
                              interp_method='nearest',
                              allow_time_extrapolation=True))

    if output_density:
        # Add a density field that will hold particle densities
        grid.add_field(
            Field('Density',
                  np.full([grid.U.lon.size, grid.U.lat.size, grid.U.time.size],
                          -1,
                          dtype=np.float64),
                  grid.U.lon,
                  grid.U.lat,
                  depth=grid.U.depth,
                  time=grid.U.time,
                  transpose=True))

    LandMask = Create_Landmask(grid)
    grid.add_field(LandMask)
    grid.U.data[grid.U.data > 1e5] = 0
    grid.V.data[grid.V.data > 1e5] = 0
    grid.H.data[grid.H.data > 1e5] = 0
    # Scale the H field between zero and one if required
    if scaleH is not None:
        grid.H.data /= np.max(grid.H.data)
        grid.H.data[np.where(grid.H.data < 0)] = 0
        grid.H.data *= scaleH

    # Offline calculate the 'diffusion' grid as a function of habitat
    if verbose:
        print("Creating Diffusion Field")
    if diffusion_file is None:
        K = Create_SEAPODYM_Diffusion_Field(grid.H,
                                            timestep=K_timestep,
                                            start_age=start_age,
                                            diffusion_scale=diffusion_scale,
                                            units=field_units,
                                            sig_scale=sig_scale,
                                            c_scale=c_scale,
                                            verbose=verbose)
    else:
        if verbose:
            print("Loading from file: %s" % diffusion_file)
        K = Field.from_netcdf('K',
                              diffusion_dims, [diffusion_file],
                              interp_method='nearest',
                              vmax=1000000)
        if diffusion_units == 'nm2_per_mon':
            K.data *= 1.30427305

    grid.add_field(K)

    if verbose:
        print("Calculating H Gradient Fields")
    dHdx, dHdy = getGradient(grid.H, grid.LandMask)
    grid.add_field(dHdx)
    grid.add_field(dHdy)
    #gradients = grid.H.gradient()
    #for field in gradients:
    #    grid.add_field(field)

    if verbose:
        print("Calculating Taxis Fields")
    T_Fields = Create_SEAPODYM_Taxis_Fields(grid.dH_dx,
                                            grid.dH_dy,
                                            start_age=start_age,
                                            units=field_units)
    for field in T_Fields:
        grid.add_field(field)

    if verbose:
        print("Creating combined Taxis and Advection field")
    grid.add_field(
        Field('TU',
              grid.U.data + grid.Tx.data,
              grid.U.lon,
              grid.U.lat,
              time=grid.U.time,
              vmin=-200,
              vmax=1e34,
              interp_method='nearest',
              allow_time_extrapolation=True)
    )  # units=unit_converters('spherical')[0]))
    grid.add_field(
        Field('TV',
              grid.V.data + grid.Ty.data,
              grid.U.lon,
              grid.U.lat,
              time=grid.U.time,
              vmin=-200,
              vmax=1e34,
              interp_method='nearest',
              allow_time_extrapolation=True)
    )  #, units=unit_converters('spherical')[1]))

    if verbose:
        print("Calculating K Gradient Fields")
    #K_gradients = grid.K.gradient()
    #for field in K_gradients:
    #    grid.add_field(field)
    dKdx, dKdy = getGradient(grid.K, grid.LandMask, False)
    grid.add_field(dKdx)
    grid.add_field(dKdy)
    grid.K.interp_method = grid.dK_dx.interp_method = grid.dK_dy.interp_method = grid.H.interp_method = \
                           grid.dH_dx.interp_method = grid.dH_dy.interp_method = grid.U.interp_method = grid.V.interp_method = 'nearest'

    #grid.K.allow_time_extrapolation = grid.dK_dx.allow_time_extrapolation = grid.dK_dy.allow_time_extrapolation = \
    #                                  grid.H.allow_time_extrapolation = grid.dH_dx.allow_time_extrapolation = \
    #                                  grid.dH_dy.allow_time_extrapolation = grid.U.allow_time_extrapolation = \
    #                                  grid.V.allow_time_extrapolation = True

    return grid
def createSimpleGrid(x, y, time):
    field = np.zeros((time.size, x, y), dtype=np.float32)
    ltri = np.triu_indices(n=x, m=y)
    for t in time:
        temp = np.zeros((x, y), dtype=np.float32)
        temp[ltri] = 1
        field[t, :, :] = np.reshape(temp.T, np.shape(field[t, :, :]))

    return field

if __name__ == "__main__":
    x = 4
    y = 6
    time = np.linspace(0, 2, 3)
    field = Field("Test", data=createSimpleGrid(x, y, time), time=time, lon=np.linspace(0, x-1, x),
                  lat=np.linspace(-y/2, y/2-1, y))
    print("          ----- Raw Field Data -----")
    print(np.round(field.data[0, :, :], 0))
    grad_fields = field.gradient()
    # Use numpy gradient function for comparison, using fixed spacing of latitudinal cell distance
    r = 6.371e6
    deg2rd = np.pi / 180
    numpy_grad_fields = np.gradient(np.transpose(field.data[0, :, :]), (r * np.diff(field.lat) * deg2rd)[0])
    print("          ----- Field Gradient dx -----")
    print(grad_fields[0].data[0, :, :])
    print("          ----- Field Gradient dx (numpy, will be different) -----")
    print(np.array(np.transpose(numpy_grad_fields[0])))
    print("          ----- Field Gradient dy -----")
    print(grad_fields[1].data[0, :, :])
    print("          ----- Field Gradient dy (numpy, should be the same) -----")
    print(np.array(np.transpose(numpy_grad_fields[1])))