示例#1
0
    def __init__(self,
                 lon,
                 lat,
                 depth,
                 time=None,
                 time_origin=None,
                 mesh='flat'):
        super(CurvilinearSGrid, self).__init__(lon, lat, time, time_origin,
                                               mesh)
        assert (isinstance(depth, np.ndarray) and len(depth.shape)
                in [3, 4]), 'depth is not a 4D numpy array'

        self.gtype = GridCode.CurvilinearSGrid
        self.depth = depth
        self.zdim = self.depth.shape[-3]
        self.z4d = len(self.depth.shape) == 4
        if self.z4d:
            # self.depth.shape[0] is 0 for S grids loaded from netcdf file
            assert self.tdim == self.depth.shape[0] or self.depth.shape[
                0] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.xdim == self.depth.shape[-1] or self.depth.shape[
                -1] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[-2] or self.depth.shape[
                -2] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
        else:
            assert self.xdim == self.depth.shape[
                -1], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[
                -2], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
示例#2
0
 def check_fieldsets_in_kernels(self, pyfunc):
     """
     function checks the integrity of the fieldset with the kernels.
     This function is to be called from the derived class when setting up the 'pyfunc'.
     """
     if self.fieldset is not None:
         if pyfunc is AdvectionRK4_3D:
             warning = False
             if isinstance(self._fieldset.W, Field) and self._fieldset.W.creation_log != 'from_nemo' and \
                self._fieldset.W._scaling_factor is not None and self._fieldset.W._scaling_factor > 0:
                 warning = True
             if type(self._fieldset.W) in [SummedField, NestedField]:
                 for f in self._fieldset.W:
                     if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:
                         warning = True
             if warning:
                 logger.warning_once(
                     'Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n'
                     '  If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)'
                 )
         elif pyfunc is AdvectionAnalytical:
             if self._ptype.uses_jit:
                 raise NotImplementedError(
                     'Analytical Advection only works in Scipy mode')
             if self._fieldset.U.interp_method != 'cgrid_velocity':
                 raise NotImplementedError(
                     'Analytical Advection only works with C-grids')
             if self._fieldset.U.grid.gtype not in [
                     GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid
             ]:
                 raise NotImplementedError(
                     'Analytical Advection only works with Z-grids in the vertical'
                 )
示例#3
0
    def __init__(self,
                 lon,
                 lat,
                 depth,
                 time=None,
                 time_origin=None,
                 mesh='flat'):
        super(RectilinearSGrid, self).__init__(lon, lat, time, time_origin,
                                               mesh)
        assert (isinstance(depth, np.ndarray) and len(depth.shape)
                in [3, 4]), 'depth is not a 3D or 4D numpy array'

        self.gtype = GridCode.RectilinearSGrid
        self.depth = depth
        self.zdim = self.depth.shape[-3]
        self.z4d = len(self.depth.shape) == 4
        if self.z4d:
            assert self.tdim == self.depth.shape[
                0], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.xdim == self.depth.shape[
                -1], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[
                -2], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
        else:
            assert self.xdim == self.depth.shape[
                -1], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[
                -2], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
        if self.lat_flipped:
            self.depth = np.flip(self.depth, axis=-2)
示例#4
0
    def advancetime(self, fieldset_new):
        """Replace oldest time on FieldSet with new FieldSet
        :param fieldset_new: FieldSet snapshot with which the oldest time has to be replaced"""

        logger.warning_once("Fieldset.advancetime() is deprecated.\n \
                             Parcels deals automatically with loading only 3 time steps simustaneously\
                             such that the total allocated memory remains limited."
                            )

        advance = 0
        for gnew in fieldset_new.gridset.grids:
            gnew.advanced = False

        for fnew in fieldset_new.get_fields():
            if isinstance(fnew, VectorField):
                continue
            f = getattr(self, fnew.name)
            gnew = fnew.grid
            if not gnew.advanced:
                g = f.grid
                advance2 = g.advancetime(gnew)
                if advance2 * advance < 0:
                    raise RuntimeError(
                        "Some Fields of the Fieldset are advanced forward and other backward"
                    )
                advance = advance2
                gnew.advanced = True
            f.advancetime(fnew, advance == 1)
示例#5
0
文件: grid.py 项目: rabernat/parcels
    def add_periodic_halo(self, zonal, meridional, halosize=5):
        """Add a 'halo' to the Grid, through extending the Grid (and lon/lat)
        similarly to the halo created for the Fields

        :param zonal: Create a halo in zonal direction (boolean)
        :param meridional: Create a halo in meridional direction (boolean)
        :param halosize: size of the halo (in grid points). Default is 5 grid points
        """
        if zonal:
            lonshift = self.lon[:, -1] - 2 * self.lon[:, 0] + self.lon[:, 1]
            if not np.allclose(self.lon[:, 1]-self.lon[:, 0], self.lon[:, -1]-self.lon[:, -2]):
                logger.warning_once("The zonal halo is located at the east and west of current grid, with a dx = lon[:,1]-lon[:,0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[:,1]-lon[:,0] != lon[:,-1]-lon[:,-2]. Is the halo computed as you expect?")
            self.lon = np.concatenate((self.lon[:, -halosize:] - lonshift[:, np.newaxis],
                                       self.lon, self.lon[:, 0:halosize] + lonshift[:, np.newaxis]),
                                      axis=len(self.lon.shape)-1)
            self.lat = np.concatenate((self.lat[:, -halosize:],
                                       self.lat, self.lat[:, 0:halosize]),
                                      axis=len(self.lat.shape)-1)
            self.xdim = self.lon.shape[1]
            self.ydim = self.lat.shape[0]
            self.zonal_periodic = True
            self.zonal_halo = halosize
        if meridional:
            if not np.allclose(self.lat[1, :]-self.lat[0, :], self.lat[-1, :]-self.lat[-2, :]):
                logger.warning_once("The meridional halo is located at the north and south of current grid, with a dy = lat[1,:]-lat[0,:] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1,:]-lat[0,:] != lat[-1,:]-lat[-2,:]. Is the halo computed as you expect?")
            latshift = self.lat[-1, :] - 2 * self.lat[0, :] + self.lat[1, :]
            self.lat = np.concatenate((self.lat[-halosize:, :] - latshift[np.newaxis, :],
                                       self.lat, self.lat[0:halosize, :] + latshift[np.newaxis, :]),
                                      axis=len(self.lat.shape)-2)
            self.lon = np.concatenate((self.lon[-halosize:, :],
                                       self.lon, self.lon[0:halosize, :]),
                                      axis=len(self.lon.shape)-2)
            self.xdim = self.lon.shape[1]
            self.ydim = self.lat.shape[0]
            self.meridional_halo = halosize
示例#6
0
文件: grid.py 项目: pocean23/parcels
    def add_periodic_halo(self, zonal, meridional, halosize=5):
        """Add a 'halo' to the Grid, through extending the Grid (and lon/lat)
        similarly to the halo created for the Fields

        :param zonal: Create a halo in zonal direction (boolean)
        :param meridional: Create a halo in meridional direction (boolean)
        :param halosize: size of the halo (in grid points). Default is 5 grid points
        """
        if zonal:
            lonshift = (self.lon[-1] - 2 * self.lon[0] + self.lon[1])
            if not np.allclose(self.lon[1]-self.lon[0], self.lon[-1]-self.lon[-2]):
                logger.warning_once("The zonal halo is located at the east and west of current grid, with a dx = lon[1]-lon[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[1]-lon[0] != lon[-1]-lon[-2]. Is the halo computed as you expect?")
            self.lon = np.concatenate((self.lon[-halosize:] - lonshift,
                                      self.lon, self.lon[0:halosize] + lonshift))
            self.xdim = self.lon.size
            self.zonal_periodic = True
            self.zonal_halo = halosize
        if meridional:
            if not np.allclose(self.lat[1]-self.lat[0], self.lat[-1]-self.lat[-2]):
                logger.warning_once("The meridional halo is located at the north and south of current grid, with a dy = lat[1]-lat[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1]-lat[0] != lat[-1]-lat[-2]. Is the halo computed as you expect?")
            latshift = (self.lat[-1] - 2 * self.lat[0] + self.lat[1])
            self.lat = np.concatenate((self.lat[-halosize:] - latshift,
                                      self.lat, self.lat[0:halosize] + latshift))
            self.ydim = self.lat.size
            self.meridional_halo = halosize
        self.lonlat_minmax = np.array([np.nanmin(self.lon), np.nanmax(self.lon), np.nanmin(self.lat), np.nanmax(self.lat)], dtype=np.float32)
        if isinstance(self, RectilinearSGrid):
            self.add_Sdepth_periodic_halo(zonal, meridional, halosize)
示例#7
0
    def execute_python(self, pset, endtime, dt):
        """Performs the core update loop via Python"""
        # sign of dt: { [0, 1]: forward simulation; -1: backward simulation }
        sign_dt = np.sign(dt)

        analytical = False
        if 'AdvectionAnalytical' in self._pyfunc.__name__:
            analytical = True
            if not np.isinf(dt):
                logger.warning_once(
                    'dt is not used in AnalyticalAdvection, so is set to np.inf'
                )
            dt = np.inf

        if self.fieldset is not None:
            for f in self.fieldset.get_fields():
                if type(f) in [VectorField, NestedField, SummedField]:
                    continue
                f.data = np.array(f.data)

        for p in pset:
            self.evaluate_particle(p,
                                   endtime,
                                   sign_dt,
                                   dt,
                                   analytical=analytical)
示例#8
0
    def __init__(self):
        ptype = self.getPType()
        # Explicit initialisation of all particle variables
        for v in ptype.variables:
            if isinstance(v.initial, attrgetter):
                initial = v.initial(self)
            elif isinstance(v.initial, Field):
                lon = self.getInitialValue(ptype, name='lon')
                lat = self.getInitialValue(ptype, name='lat')
                depth = self.getInitialValue(ptype, name='depth')
                time = self.getInitialValue(ptype, name='time')
                if time is None:
                    raise RuntimeError(
                        'Cannot initialise a Variable with a Field if no time provided. '
                        'Add a "time=" to ParticleSet construction')
                v.initial.fieldset.computeTimeChunk(time, 0)
                initial = v.initial[time, depth, lat, lon]
                logger.warning_once(
                    "Particle initialisation from field can be very slow as it is computed in scipy mode."
                )
            else:
                initial = v.initial
            # Enforce type of initial value
            if v.dtype != c_void_p:
                setattr(self, v.name, v.dtype(initial))

        # Placeholder for explicit error handling
        self.exception = None
示例#9
0
    def to_dict(self, pfile, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.

        :param pfile: ParticleFile object requesting the conversion
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        pd = self.particle_data

        if pfile.lasttime_written != time and \
           (pfile.write_ondelete is False or deleted_only is not False):
            if pd['id'].size == 0:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)
            else:
                if deleted_only is not False:
                    to_write = deleted_only
                else:
                    to_write = _to_write_particles(pd, time)
                if np.any(to_write) > 0:
                    for var in pfile.var_names:
                        data_dict[var] = pd[var][to_write]
                    pfile.maxid_written = np.maximum(pfile.maxid_written,
                                                     np.max(data_dict['id']))

                pset_errs = (to_write & (pd['state'] != OperationCode.Delete)
                             & np.less(1e-3,
                                       np.abs(time - pd['time']),
                                       where=np.isfinite(pd['time'])))
                if np.count_nonzero(pset_errs) > 0:
                    logger.warning_once(
                        'time argument in pfile.write() is {}, but particles have time {}'
                        .format(time, pd['time'][pset_errs]))

                if time not in pfile.time_written:
                    pfile.time_written.append(time)

                if len(pfile.var_names_once) > 0:
                    first_write = (_to_write_particles(pd, time) & np.isin(
                        pd['id'], pfile.written_once, invert=True))
                    if np.any(first_write):
                        data_dict_once['id'] = np.array(
                            pd['id'][first_write]).astype(dtype=np.int64)
                        for var in pfile.var_names_once:
                            data_dict_once[var] = pd[var][first_write]
                        pfile.written_once.extend(pd['id'][first_write])

            if deleted_only is False:
                pfile.lasttime_written = time

        return data_dict, data_dict_once
示例#10
0
    def __enter__(self):
        """
        This function enters the physical file (equivalent to a 'with open(...)' statement) and returns a file object.
        In Dask, with dynamic loading, this is the point where we have access to the header-information of the file.
        Hence, this function initializes the dynamic loading by parsing the chunksize-argument and maps the requested
        chunksizes onto the variables found in the file. For auto-chunking, educated guesses are made (e.g. with the
        dask configuration file in the background) to determine the ideal chunk sizes. This is also the point
        where - due to the chunking, the file is 'locked', meaning that it cannot be simultaneously accessed by
        another process. This is significant in a cluster setup.
        """
        if self.chunksize not in [False, None, 'auto'] and type(
                self.chunksize) is not dict:
            raise AttributeError(
                "'chunksize' is of wrong type. Parameter is expected to be a dict per data dimension, or be False, None or 'auto'."
            )
        if isinstance(self.chunksize, list):
            self.chunksize = tuple(self.chunksize)

        init_chunk_dict = None
        if self.chunksize not in [False, None]:
            init_chunk_dict = self._get_initial_chunk_dictionary()
        try:
            # Unfortunately we need to do if-else here, cause the lock-parameter is either False or a Lock-object
            # (which we would rather want to have being auto-managed).
            # If 'lock' is not specified, the Lock-object is auto-created and managed by xarray internally.
            if self.lock_file:
                self.dataset = xr.open_dataset(str(self.filename),
                                               decode_cf=True,
                                               engine=self.netcdf_engine,
                                               chunks=init_chunk_dict)
            else:
                self.dataset = xr.open_dataset(str(self.filename),
                                               decode_cf=True,
                                               engine=self.netcdf_engine,
                                               chunks=init_chunk_dict,
                                               lock=False)
            self.dataset['decoded'] = True
        except:
            logger.warning_once(
                "File %s could not be decoded properly by xarray (version %s).\n         It will be opened with no decoding. Filling values might be wrongly parsed."
                % (self.filename, xr.__version__))
            if self.lock_file:
                self.dataset = xr.open_dataset(str(self.filename),
                                               decode_cf=False,
                                               engine=self.netcdf_engine,
                                               chunks=init_chunk_dict)
            else:
                self.dataset = xr.open_dataset(str(self.filename),
                                               decode_cf=False,
                                               engine=self.netcdf_engine,
                                               chunks=init_chunk_dict,
                                               lock=False)
            self.dataset['decoded'] = False

        for inds in self.indices.values():
            if type(inds) not in [list, range]:
                raise RuntimeError(
                    'Indices for field subsetting need to be a list')
        return self
示例#11
0
    def toDictionary(self, pfile, time, deleted_only=False):
        """
        Convert all Particle data from one time step to a python dictionary.
        :param pfile: ParticleFile object requesting the conversion
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once

        This function depends on the specific collection in question and thus needs to be specified in specific
        derivative classes.
        """

        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        indices_to_write = []
        if pfile.lasttime_written != time and \
           (pfile.write_ondelete is False or deleted_only is not False):
            if self._data['id'].size == 0:
                logger.warning("ParticleSet is empty on writing as array at time %g" % time)
            else:
                if deleted_only is not False:
                    if type(deleted_only) not in [list, np.ndarray] and deleted_only in [True, 1]:
                        indices_to_write = np.where(np.isin(self._data['state'],
                                                            [OperationCode.Delete]))[0]
                    elif type(deleted_only) in [list, np.ndarray]:
                        indices_to_write = deleted_only
                else:
                    indices_to_write = _to_write_particles(self._data, time)
                if np.any(indices_to_write) > 0:
                    for var in pfile.var_names:
                        data_dict[var] = self._data[var][indices_to_write]
                    pfile.maxid_written = np.maximum(pfile.maxid_written, np.max(data_dict['id']))

                pset_errs = ((self._data['state'][indices_to_write] != OperationCode.Delete) & np.greater(np.abs(time - self._data['time'][indices_to_write]), 1e-3, where=np.isfinite(self._data['time'][indices_to_write])))
                if np.count_nonzero(pset_errs) > 0:
                    logger.warning_once('time argument in pfile.write() is {}, but particles have time {}'.format(time, self._data['time'][pset_errs]))

                # ==== this function should probably move back somewhere into the particle-file instead of the to_dict ==== #
                if time not in pfile.time_written:
                    pfile.time_written.append(time)

                if len(pfile.var_names_once) > 0:
                    first_write = (_to_write_particles(self._data, time) & _is_particle_started_yet(self._data, time) & np.isin(self._data['id'], pfile.written_once, invert=True))
                    if np.any(first_write):
                        data_dict_once['id'] = np.array(self._data['id'][first_write]).astype(dtype=np.int64)
                        for var in pfile.var_names_once:
                            data_dict_once[var] = self._data[var][first_write]
                        pfile.written_once.extend(np.array(self._data['id'][first_write]).astype(dtype=np.int64).tolist())

            if deleted_only is False:
                pfile.lasttime_written = time

        return data_dict, data_dict_once
示例#12
0
    def write(self, pset, time, sync=True, deleted_only=False):
        """Write :class:`parcels.particleset.ParticleSet` data to file

        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param sync: Optional argument whether to write data to disk immediately. Default is True

        """
        if self.dataset is None:
            self.open_dataset()
        if isinstance(time, delta):
            time = time.total_seconds()
        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size > 0:

                first_write = [
                    p for p in pset if (p.fileid < 0 or len(self.idx) == 0)
                    and _is_particle_started_yet(p, time)
                ]  # len(self.idx)==0 in case pset is written to new ParticleFile
                for p in first_write:
                    p.fileid = self.lasttraj  # particle id in current file
                    self.lasttraj += 1

                self.idx = np.append(self.idx, np.zeros(len(first_write)))

                for p in pset:
                    if _is_particle_started_yet(p, time):
                        i = p.fileid
                        self.id[i, self.idx[i]] = p.id
                        self.time[i, self.idx[i]] = p.time
                        self.lat[i, self.idx[i]] = p.lat
                        self.lon[i, self.idx[i]] = p.lon
                        self.z[i, self.idx[i]] = p.depth
                        for var in self.user_vars:
                            getattr(self, var)[i,
                                               self.idx[i]] = getattr(p, var)
                        if p.state != ErrorCode.Delete and not np.allclose(
                                p.time, time):
                            logger.warning_once(
                                'time argument in pfile.write() is %g, but a particle has time %g.'
                                % (time, p.time))

                for p in first_write:
                    for var in self.user_vars_once:
                        getattr(self, var)[p.fileid] = getattr(p, var)
            else:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)

            if not deleted_only:
                self.idx += 1
                self.lasttime_written = time

        if sync:
            self.sync()
示例#13
0
 def cleanup_unload_lib(lib):
     # Clean-up the in-memory dynamic linked libraries.
     # This is not really necessary, as these programs are not that large, but with the new random
     # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel.
     if lib is not None:
         try:
             _ctypes.FreeLibrary(lib._handle) if platform == 'win32' else _ctypes.dlclose(lib._handle)
         except:
             logger.warning_once("compiled library already freed.")
示例#14
0
    def add_grid(self, field):
        grid = field.grid
        existing_grid = False
        for g in self.grids:
            if field.field_chunksize != grid.master_chunksize:
                logger.warning_once(
                    "Field chunksize and Grid master chunksize are not equal - erroneous behaviour expected."
                )
                break
            if g == grid:
                existing_grid = True
                break
            sameGrid = True
            sameDims = True
            if grid.time_origin != g.time_origin:
                sameDims = False
                continue
            for attr in ['lon', 'lat', 'depth', 'time']:
                gattr = getattr(g, attr)
                gridattr = getattr(grid, attr)
                if gattr.shape != gridattr.shape or not np.allclose(
                        gattr, gridattr):
                    sameGrid = False
                    sameDims = False
                    break
            if not sameDims:
                continue
            sameGrid &= (grid.master_chunksize == g.master_chunksize) or (
                grid.master_chunksize in [False, None]
                and g.master_chunksize in [False, None])
            if not sameGrid and sameDims and grid.master_chunksize is not None:
                res = False
                if (isinstance(grid.master_chunksize, tuple) and isinstance(g.master_chunksize, tuple)) or \
                        (isinstance(grid.master_chunksize, dict) and isinstance(g.master_chunksize, dict)):
                    res |= functools.reduce(
                        lambda i, j: i and j,
                        map(lambda m, k: m == k, grid.master_chunksize,
                            g.master_chunksize), True)
                if res:
                    sameGrid = True
                    logger.warning_once(
                        "Trying to initialize a shared grid with different chunking sizes - action prohibited. Replacing requested field_chunksize with grid's master chunksize."
                    )
                else:
                    raise ValueError(
                        "Conflict between grids of the same gridset: major grid chunksize and requested sibling-grid chunksize as well as their chunk-dimension names are not equal - Please apply the same chunksize to all fields in a shared grid!"
                    )
                break
            if sameGrid:
                existing_grid = True
                field.grid = g
                break

        if not existing_grid:
            self.grids.append(grid)
        field.igrid = self.grids.index(field.grid)
示例#15
0
    def convert_pset_to_dict(self, pset, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.
        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size == 0:
                logger.warning("ParticleSet is empty on writing as array at time %g" % time)
            else:
                for var in self.var_names:
                    data_dict[var] = np.nan * np.zeros(len(pset))

                i = 0
                for p in pset:
                    if p.dt*p.time <= p.dt*time:
                        for var in self.var_names:
                            data_dict[var][i] = getattr(p, var)
                        if p.state != ErrorCode.Delete and not np.allclose(p.time, time):
                            logger.warning_once('time argument in pfile.write() is %g, but a particle has time %g.' % (time, p.time))
                        self.maxid_written = np.max([self.maxid_written, p.id])
                        i += 1

                save_ind = np.isfinite(data_dict["id"])
                for key in self.var_names:
                    data_dict[key] = data_dict[key][save_ind]

                if time not in self.time_written:
                    self.time_written.append(time)

                if len(self.var_names_once) > 0:
                    first_write = [p for p in pset if (p.id not in self.written_once) and _is_particle_started_yet(p, time)]
                    data_dict_once['id'] = np.nan * np.zeros(len(first_write))
                    for var in self.var_names_once:
                        data_dict_once[var] = np.nan * np.zeros(len(first_write))

                    i = 0
                    for p in first_write:
                        self.written_once.append(p.id)
                        data_dict_once['id'][i] = p.id
                        for var in self.var_names_once:
                            data_dict_once[var][i] = getattr(p, var)
                        i += 1

            if not deleted_only:
                self.lasttime_written = time

        return data_dict, data_dict_once
示例#16
0
    def execute_python(self, pset, endtime, dt):
        """Performs the core update loop via Python

        InteractionKernels do not implement ways to catch or recover from
        errors caused during execution of the kernel function(s).
        It is strongly recommended not to sample from fields inside an
        InteractionKernel.
        """
        if self.fieldset is not None:
            for f in self.fieldset.get_fields():
                if type(f) in [VectorField, NestedField, SummedField]:
                    continue
                f.data = np.array(f.data)

        reset_particle_idx = []
        for pyfunc in self._pyfunc:
            pset.compute_neighbor_tree(endtime, dt)
            active_idx = pset._active_particle_idx

            mutator = defaultdict(lambda: [])

            # Loop only over particles that are in a positive state and have started.
            for particle_idx in active_idx:
                p = pset[particle_idx]
                # Don't use particles that are not started.
                if (endtime - p.time) / dt <= -1e-7:
                    continue
                elif (endtime - p.time) / dt < 1:
                    p.dt = endtime - p.time
                    reset_particle_idx.append(particle_idx)

                neighbors = pset.neighbors_by_index(particle_idx)
                try:
                    res = pyfunc(p, pset.fieldset, p.time, neighbors, mutator)
                except Exception as e:
                    res = ErrorCode.Error
                    p.exception = e

                # InteractionKernels do not implement a way to recover
                # from errors.
                if res != StateCode.Success:
                    logger.warning_once(
                        "Some InteractionKernel was not completed succesfully, likely because a Particle threw an error that was not captured."
                    )

            for particle_idx in active_idx:
                p = pset[particle_idx]
                try:
                    for mutator_func, args in mutator[p.id]:
                        mutator_func(p, *args)
                except KeyError:
                    pass
            for particle_idx in reset_particle_idx:
                pset[particle_idx].dt = dt
示例#17
0
文件: grid.py 项目: pocean23/parcels
    def __init__(self, lon, lat, depth=None, time=None, time_origin=None, mesh='flat'):
        super(CurvilinearZGrid, self).__init__(lon, lat, time, time_origin, mesh)
        if isinstance(depth, np.ndarray):
            assert(len(depth.shape) == 1), 'depth is not a vector'

        self.gtype = GridCode.CurvilinearZGrid
        self.depth = np.zeros(1, dtype=np.float32) if depth is None else depth
        self.zdim = self.depth.size
        self.z4d = -1  # only for SGrid
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
示例#18
0
文件: grid.py 项目: pocean23/parcels
    def __init__(self, lon, lat, time, time_origin, mesh):
        assert(isinstance(lon, np.ndarray) and len(lon.shape) == 1), 'lon is not a numpy vector'
        assert(isinstance(lat, np.ndarray) and len(lat.shape) == 1), 'lat is not a numpy vector'
        assert (isinstance(time, np.ndarray) or not time), 'time is not a numpy array'
        if isinstance(time, np.ndarray):
            assert(len(time.shape) == 1), 'time is not a vector'

        super(RectilinearGrid, self).__init__(lon, lat, time, time_origin, mesh)
        self.xdim = self.lon.size
        self.ydim = self.lat.size
        self.tdim = self.time.size
        if self.lat[-1] < self.lat[0]:
            self.lat = np.flip(self.lat, axis=0)
            self.lat_flipped = True
            logger.warning_once("Flipping lat data from North-South to South-North")
示例#19
0
 def __enter__(self):
     try:
         # Unfortunately we need to do if-else here, cause the lock-parameter is either False or a Lock-object
         # (which we would rather want to have being auto-managed).
         # If 'lock' is not specified, the Lock-object is auto-created and managed by xarray internally.
         self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine)
         self.dataset['decoded'] = True
     except:
         logger.warning_once("File %s could not be decoded properly by xarray (version %s).\n         "
                             "It will be opened with no decoding. Filling values might be wrongly parsed."
                             % (self.filename, xr.__version__))
         self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine)
         self.dataset['decoded'] = False
     for inds in self.indices.values():
         if type(inds) not in [list, range]:
             raise RuntimeError('Indices for field subsetting need to be a list')
     return self
示例#20
0
文件: grid.py 项目: pocean23/parcels
 def __init__(self, lon, lat, time, time_origin, mesh):
     self.lon = lon
     self.lat = lat
     self.time = np.zeros(1, dtype=np.float64) if time is None else time
     if not self.lon.dtype == np.float32:
         logger.warning_once("Casting lon data to np.float32")
         self.lon = self.lon.astype(np.float32)
     if not self.lat.dtype == np.float32:
         logger.warning_once("Casting lat data to np.float32")
         self.lat = self.lat.astype(np.float32)
     if not self.time.dtype == np.float64:
         assert isinstance(self.time[0], (np.integer, np.floating, float, int)), 'Time vector must be an array of int or floats'
         logger.warning_once("Casting time data to np.float64")
         self.time = self.time.astype(np.float64)
     self.time_full = self.time  # needed for deferred_loaded Fields
     self.time_origin = TimeConverter() if time_origin is None else time_origin
     assert isinstance(self.time_origin, TimeConverter), 'time_origin needs to be a TimeConverter object'
     self.mesh = mesh
     self.cstruct = None
     self.cell_edge_sizes = {}
     self.zonal_periodic = False
     self.zonal_halo = 0
     self.meridional_halo = 0
     self.lat_flipped = False
     self.defer_load = False
     self.lonlat_minmax = np.array([np.nanmin(lon), np.nanmax(lon), np.nanmin(lat), np.nanmax(lat)], dtype=np.float32)
     self.periods = 0
示例#21
0
    def __init__(self, lon, lat, time, time_origin, mesh):
        assert (isinstance(lon, np.ndarray)
                and len(lon.shape) <= 1), 'lon is not a numpy vector'
        assert (isinstance(lat, np.ndarray)
                and len(lat.shape) <= 1), 'lat is not a numpy vector'
        assert (isinstance(time, np.ndarray)
                or not time), 'time is not a numpy array'
        if isinstance(time, np.ndarray):
            assert (len(time.shape) == 1), 'time is not a vector'

        super(RectilinearGrid, self).__init__(lon, lat, time, time_origin,
                                              mesh)
        self.xdim = self.lon.size
        self.ydim = self.lat.size
        self.tdim = self.time.size
        if self.ydim > 1 and self.lat[-1] < self.lat[0]:
            self.lat = np.flip(self.lat, axis=0)
            self.lat_flipped = True
            logger.warning_once(
                "Flipping lat data from North-South to South-North. "
                "Note that this may lead to wrong sign for meridional velocity, so tread very carefully"
            )
示例#22
0
    def execute_python(self, pset, endtime, dt):
        """Performs the core update loop via Python"""
        sign_dt = np.sign(dt)

        if 'AdvectionAnalytical' in self.pyfunc.__name__:
            analytical = True
            if not np.isinf(dt):
                logger.warning_once('dt is not used in AnalyticalAdvection, so is set to np.inf')
            dt = np.inf
        else:
            analytical = False

        particles = pset.data_accessor()

        # back up variables in case of OperationCode.Repeat
        p_var_back = {}

        if self.fieldset is not None:
            for f in self.fieldset.get_fields():
                if type(f) in [VectorField, NestedField, SummedField]:
                    continue
                f.data = np.array(f.data)

        for p in range(pset.size):
            particles.set_index(p)

            # Don't execute particles that aren't started yet
            sign_end_part = np.sign(endtime - particles.time)
            # Compute min/max dt for first timestep
            dt_pos = min(abs(particles.dt), abs(endtime - particles.time))

            # ==== numerically stable; also making sure that continuously-recovered particles do end successfully,
            # as they fulfil the condition here on entering at the final calculation here. ==== #
            if ((sign_end_part != sign_dt) or np.isclose(dt_pos, 0)) and not np.isclose(dt, 0):
                if abs(particles.time) >= abs(endtime):
                    particles.set_state(StateCode.Success)
                continue

            while particles.state in [StateCode.Evaluate, OperationCode.Repeat] or np.isclose(dt, 0):

                for var in pset.ptype.variables:
                    p_var_back[var.name] = getattr(particles, var.name)
                try:
                    pdt_prekernels = sign_dt * dt_pos
                    particles.dt = pdt_prekernels
                    state_prev = particles.state
                    res = self.pyfunc(particles, pset.fieldset, particles.time)
                    if res is None:
                        res = StateCode.Success

                    if res is StateCode.Success and particles.state != state_prev:
                        res = particles.state

                    if not analytical and res == StateCode.Success and not np.isclose(particles.dt, pdt_prekernels):
                        res = OperationCode.Repeat

                except FieldOutOfBoundError as fse_xy:
                    res = ErrorCode.ErrorOutOfBounds
                    particles.exception = fse_xy
                except FieldOutOfBoundSurfaceError as fse_z:
                    res = ErrorCode.ErrorThroughSurface
                    particles.exception = fse_z
                except TimeExtrapolationError as fse_t:
                    res = ErrorCode.ErrorTimeExtrapolation
                    particles.exception = fse_t

                except Exception as e:
                    res = ErrorCode.Error
                    particles.exception = e

                # Handle particle time and time loop
                if res in [StateCode.Success, OperationCode.Delete]:
                    # Update time and repeat
                    particles.time += particles.dt
                    particles.update_next_dt()
                    if analytical:
                        particles.dt = np.inf
                    dt_pos = min(abs(particles.dt), abs(endtime - particles.time))

                    sign_end_part = np.sign(endtime - particles.time)
                    if res != OperationCode.Delete and not np.isclose(dt_pos, 0) and (sign_end_part == sign_dt):
                        res = StateCode.Evaluate
                    if sign_end_part != sign_dt:
                        dt_pos = 0

                    particles.set_state(res)
                    if np.isclose(dt, 0):
                        break
                else:
                    particles.set_state(res)
                    # Try again without time update
                    for var in pset.ptype.variables:
                        if var.name not in ['dt', 'state']:
                            setattr(particles, var.name, p_var_back[var.name])
                    dt_pos = min(abs(particles.dt), abs(endtime - particles.time))

                    sign_end_part = np.sign(endtime - particles.time)
                    if sign_end_part != sign_dt:
                        dt_pos = 0
                    break
示例#23
0
    def execute(self,
                pyfunc=AdvectionRK4,
                endtime=None,
                runtime=None,
                dt=1.,
                moviedt=None,
                recovery=None,
                output_file=None,
                movie_background_field=None,
                verbose_progress=None):
        """Execute a given kernel function over the particle set for
        multiple timesteps. Optionally also provide sub-timestepping
        for particle output.

        :param pyfunc: Kernel function to execute. This can be the name of a
                       defined Python function or a :class:`parcels.kernel.Kernel` object.
                       Kernels can be concatenated using the + operator
        :param endtime: End time for the timestepping loop.
                        It is either a datetime object or a positive double.
        :param runtime: Length of the timestepping loop. Use instead of endtime.
                        It is either a timedelta object or a positive double.
        :param dt: Timestep interval to be passed to the kernel.
                   It is either a timedelta object or a double.
                   Use a negative value for a backward-in-time simulation.
        :param moviedt:  Interval for inner sub-timestepping (leap), which dictates
                         the update frequency of animation.
                         It is either a timedelta object or a positive double.
                         None value means no animation.
        :param output_file: :mod:`parcels.particlefile.ParticleFile` object for particle output
        :param recovery: Dictionary with additional `:mod:parcels.tools.error`
                         recovery kernels to allow custom recovery behaviour in case of
                         kernel errors.
        :param movie_background_field: field plotted as background in the movie if moviedt is set.
                                       'vector' shows the velocity as a vector field.
        :param verbose_progress: Boolean for providing a progress bar for the kernel execution loop.

        """

        # check if pyfunc has changed since last compile. If so, recompile
        if self.kernel is None or (self.kernel.pyfunc is not pyfunc
                                   and self.kernel is not pyfunc):
            # Generate and store Kernel
            if isinstance(pyfunc, Kernel):
                self.kernel = pyfunc
            else:
                self.kernel = self.Kernel(pyfunc)
            # Prepare JIT kernel execution
            if self.ptype.uses_jit:
                self.kernel.remove_lib()
                self.kernel.compile(compiler=GNUCompiler())
                self.kernel.load_lib()

        # Convert all time variables to seconds
        if isinstance(endtime, delta):
            raise RuntimeError('endtime must be either a datetime or a double')
        if isinstance(endtime, datetime):
            endtime = np.datetime64(endtime)
        if isinstance(endtime, np.datetime64):
            if self.time_origin.calender is None:
                raise NotImplementedError(
                    'If fieldset.time_origin is not a date, execution endtime must be a double'
                )
            endtime = self.time_origin.reltime(endtime)
        if isinstance(runtime, delta):
            runtime = runtime.total_seconds()
        if isinstance(dt, delta):
            dt = dt.total_seconds()
        outputdt = output_file.outputdt if output_file else np.infty
        if isinstance(outputdt, delta):
            outputdt = outputdt.total_seconds()
        if isinstance(moviedt, delta):
            moviedt = moviedt.total_seconds()

        assert runtime is None or runtime >= 0, 'runtime must be positive'
        assert outputdt is None or outputdt >= 0, 'outputdt must be positive'
        assert moviedt is None or moviedt >= 0, 'moviedt must be positive'

        # Set particle.time defaults based on sign of dt, if not set at ParticleSet construction
        for p in self:
            if np.isnan(p.time):
                mintime, maxtime = self.fieldset.gridset.dimrange('time_full')
                p.time = mintime if dt >= 0 else maxtime

        # Derive _starttime and endtime from arguments or fieldset defaults
        if runtime is not None and endtime is not None:
            raise RuntimeError(
                'Only one of (endtime, runtime) can be specified')
        _starttime = min([p.time for p in self]) if dt >= 0 else max(
            [p.time for p in self])
        if self.repeatdt is not None and self.repeat_starttime is None:
            self.repeat_starttime = _starttime
        if runtime is not None:
            endtime = _starttime + runtime * np.sign(dt)
        elif endtime is None:
            mintime, maxtime = self.fieldset.gridset.dimrange('time_full')
            endtime = maxtime if dt >= 0 else mintime

        if abs(endtime - _starttime) < 1e-5 or dt == 0 or runtime == 0:
            dt = 0
            runtime = 0
            endtime = _starttime
            logger.warning_once(
                "dt or runtime are zero, or endtime is equal to Particle.time. "
                "The kernels will be executed once, without incrementing time")

        # Initialise particle timestepping
        for p in self:
            p.dt = dt

        # First write output_file, because particles could have been added
        if output_file:
            output_file.write(self, _starttime)
        if moviedt:
            self.show(field=movie_background_field,
                      show_time=_starttime,
                      animation=True)

        if moviedt is None:
            moviedt = np.infty
        time = _starttime
        if self.repeatdt:
            next_prelease = self.repeat_starttime + (
                abs(time - self.repeat_starttime) // self.repeatdt +
                1) * self.repeatdt * np.sign(dt)
        else:
            next_prelease = np.infty if dt > 0 else -np.infty
        next_output = time + outputdt if dt > 0 else time - outputdt
        next_movie = time + moviedt if dt > 0 else time - moviedt
        next_input = self.fieldset.computeTimeChunk(time, np.sign(dt))

        tol = 1e-12
        if verbose_progress is None:
            walltime_start = time_module.time()
        if verbose_progress:
            try:
                pbar = progressbar.ProgressBar(
                    max_value=abs(endtime - _starttime)).start()
            except:  # for old versions of progressbar
                pbar = progressbar.ProgressBar(
                    maxvalue=abs(endtime - _starttime)).start()
        while (time < endtime and dt > 0) or (time > endtime
                                              and dt < 0) or dt == 0:
            if verbose_progress is None and time_module.time(
            ) - walltime_start > 10:
                # Showing progressbar if runtime > 10 seconds
                pbar = progressbar.ProgressBar(
                    max_value=abs(endtime - _starttime)).start()
                verbose_progress = True
            if dt > 0:
                time = min(next_prelease, next_input, next_output, next_movie,
                           endtime)
            else:
                time = max(next_prelease, next_input, next_output, next_movie,
                           endtime)
            self.kernel.execute(self,
                                endtime=time,
                                dt=dt,
                                recovery=recovery,
                                output_file=output_file)
            if abs(time - next_prelease) < tol:
                pset_new = ParticleSet(fieldset=self.fieldset,
                                       time=time,
                                       lon=self.repeatlon,
                                       lat=self.repeatlat,
                                       depth=self.repeatdepth,
                                       pclass=self.repeatpclass)
                for p in pset_new:
                    p.dt = dt
                self.add(pset_new)
                next_prelease += self.repeatdt * np.sign(dt)
            if abs(time - next_output) < tol:
                if output_file:
                    output_file.write(self, time)
                next_output += outputdt * np.sign(dt)
            if abs(time - next_movie) < tol:
                self.show(field=movie_background_field,
                          show_time=time,
                          animation=True)
                next_movie += moviedt * np.sign(dt)
            next_input = self.fieldset.computeTimeChunk(time, dt)
            if dt == 0:
                break
            if verbose_progress:
                pbar.update(abs(time - _starttime))

        if output_file:
            output_file.write(self, time)
        if verbose_progress:
            pbar.finish()
    def __init__(self,
                 fieldset=None,
                 pclass=JITParticle,
                 lon=None,
                 lat=None,
                 depth=None,
                 time=None,
                 repeatdt=None,
                 lonlatdepth_dtype=None,
                 pid_orig=None,
                 **kwargs):
        self.fieldset = fieldset
        if self.fieldset is None:
            logger.warning_once(
                "No FieldSet provided in ParticleSet generation. "
                "This breaks most Parcels functionality")
        else:
            self.fieldset.check_complete()
        partitions = kwargs.pop('partitions', None)

        def convert_to_array(var):
            # Convert lists and single integers/floats to one-dimensional numpy arrays
            if isinstance(var, np.ndarray):
                return var.flatten()
            elif isinstance(var, (int, float, np.float32, np.int32)):
                return np.array([var])
            else:
                return np.array(var)

        lon = np.empty(shape=0) if lon is None else convert_to_array(lon)
        lat = np.empty(shape=0) if lat is None else convert_to_array(lat)
        if isinstance(pid_orig, (type(None), type(False))):
            pid_orig = np.arange(lon.size)
        pid = pid_orig + pclass.lastID

        if depth is None:
            mindepth = self.fieldset.gridset.dimrange(
                'depth')[0] if self.fieldset is not None else 0
            depth = np.ones(lon.size) * mindepth
        else:
            depth = convert_to_array(depth)
        assert lon.size == lat.size and lon.size == depth.size, (
            'lon, lat, depth don'
            't all have the same lenghts')

        time = convert_to_array(time)
        time = np.repeat(time, lon.size) if time.size == 1 else time

        def _convert_to_reltime(time):
            if isinstance(time, np.datetime64) or (hasattr(time, 'calendar')
                                                   and time.calendar
                                                   in _get_cftime_calendars()):
                return True
            return False

        if time.size > 0 and type(time[0]) in [datetime, date]:
            time = np.array([np.datetime64(t) for t in time])
        self.time_origin = fieldset.time_origin if self.fieldset is not None else 0
        if time.size > 0 and isinstance(
                time[0], np.timedelta64) and not self.time_origin:
            raise NotImplementedError(
                'If fieldset.time_origin is not a date, time of a particle must be a double'
            )
        time = np.array([
            self.time_origin.reltime(t) if _convert_to_reltime(t) else t
            for t in time
        ])
        assert lon.size == time.size, (
            'time and positions (lon, lat, depth) don'
            't have the same lengths.')

        if partitions is not None and partitions is not False:
            partitions = convert_to_array(partitions)

        for kwvar in kwargs:
            kwargs[kwvar] = convert_to_array(kwargs[kwvar])
            assert lon.size == kwargs[kwvar].size, (
                '%s and positions (lon, lat, depth) don'
                't have the same lengths.' % kwvar)

        offset = np.max(pid) if len(pid) > 0 else -1
        if MPI:
            mpi_comm = MPI.COMM_WORLD
            mpi_rank = mpi_comm.Get_rank()
            mpi_size = mpi_comm.Get_size()

            if lon.size < mpi_size and mpi_size > 1:
                raise RuntimeError(
                    'Cannot initialise with fewer particles than MPI processors'
                )

            if mpi_size > 1:
                if partitions is not False:
                    if partitions is None:
                        if mpi_rank == 0:
                            coords = np.vstack((lon, lat)).transpose()
                            kmeans = KMeans(n_clusters=mpi_size,
                                            random_state=0).fit(coords)
                            partitions = kmeans.labels_
                        else:
                            partitions = None
                        partitions = mpi_comm.bcast(partitions, root=0)
                    elif np.max(partitions) >= mpi_size:
                        raise RuntimeError(
                            'Particle partitions must vary between 0 and the number of mpi procs'
                        )
                    lon = lon[partitions == mpi_rank]
                    lat = lat[partitions == mpi_rank]
                    time = time[partitions == mpi_rank]
                    depth = depth[partitions == mpi_rank]
                    pid = pid[partitions == mpi_rank]
                    for kwvar in kwargs:
                        kwargs[kwvar] = kwargs[kwvar][partitions == mpi_rank]
                offset = MPI.COMM_WORLD.allreduce(offset, op=MPI.MAX)

        self.repeatdt = repeatdt.total_seconds() if isinstance(
            repeatdt, delta) else repeatdt
        if self.repeatdt:
            if self.repeatdt <= 0:
                raise ('Repeatdt should be > 0')
            if time[0] and not np.allclose(time, time[0]):
                raise (
                    'All Particle.time should be the same when repeatdt is not None'
                )
            self.repeat_starttime = time[0]
            self.repeatlon = lon
            self.repeatlat = lat
            self.repeatpid = pid - pclass.lastID
            self.repeatdepth = depth
            self.repeatpclass = pclass
            self.partitions = partitions
            self.repeatkwargs = kwargs
        pclass.setLastID(offset + 1)

        if lonlatdepth_dtype is None:
            if fieldset is not None:
                self.lonlatdepth_dtype = self.lonlatdepth_dtype_from_field_interp_method(
                    fieldset.U)
            else:
                self.lonlatdepth_dtype = np.float32
        else:
            self.lonlatdepth_dtype = lonlatdepth_dtype
        assert self.lonlatdepth_dtype in [np.float32, np.float64], \
            'lon lat depth precision should be set to either np.float32 or np.float64'
        pclass.set_lonlatdepth_dtype(self.lonlatdepth_dtype)

        self.ptype = pclass.getPType()
        self.kernel = None

        # store particle data as an array per variable (structure of arrays approach)
        self.particle_data = {}
        initialised = set()
        for v in self.ptype.variables:
            if v.name in ['xi', 'yi', 'zi', 'ti']:
                ngrid = fieldset.gridset.size if fieldset is not None else 1
                self.particle_data[v.name] = np.empty((len(lon), ngrid),
                                                      dtype=v.dtype)
            else:
                self.particle_data[v.name] = np.empty(len(lon), dtype=v.dtype)

        if lon is not None and lat is not None:
            # Initialise from lists of lon/lat coordinates
            assert self.size == len(lon) and self.size == len(lat), (
                'Size of ParticleSet does not match length of lon and lat.')

            # mimic the variables that get initialised in the constructor
            self.particle_data['lat'][:] = lat
            self.particle_data['lon'][:] = lon
            self.particle_data['depth'][:] = depth
            self.particle_data['time'][:] = time
            self.particle_data['id'][:] = pid
            self.particle_data['fileid'][:] = -1

            # special case for exceptions which can only be handled from scipy
            self.particle_data['exception'] = np.empty(self.size, dtype=object)

            initialised |= {'lat', 'lon', 'depth', 'time', 'id'}

            # any fields that were provided on the command line
            for kwvar, kwval in kwargs.items():
                if not hasattr(pclass, kwvar):
                    raise RuntimeError(
                        'Particle class does not have Variable %s' % kwvar)
                self.particle_data[kwvar][:] = kwval
                initialised.add(kwvar)

            # initialise the rest to their default values
            for v in self.ptype.variables:
                if v.name in initialised:
                    continue

                if isinstance(v.initial, Field):
                    for i in range(self.size):
                        if (time[i] is None) or (np.isnan(time[i])):
                            raise RuntimeError(
                                'Cannot initialise a Variable with a Field if no time provided. '
                                'Add a "time=" to ParticleSet construction')
                        v.initial.fieldset.computeTimeChunk(time[i], 0)
                        self.particle_data[v.name][i] = v.initial[time[i],
                                                                  depth[i],
                                                                  lat[i],
                                                                  lon[i]]
                        logger.warning_once(
                            "Particle initialisation from field can be very slow as it is computed in scipy mode."
                        )
                elif isinstance(v.initial, attrgetter):
                    self.particle_data[v.name][:] = v.initial(self)
                else:
                    self.particle_data[v.name][:] = v.initial

                initialised.add(v.name)
        else:
            raise ValueError(
                "Latitude and longitude required for generating ParticleSet")
示例#25
0
    def from_netcdf(cls,
                    filenames,
                    variables,
                    dimensions,
                    indices=None,
                    mesh='spherical',
                    timestamps=None,
                    allow_time_extrapolation=None,
                    time_periodic=False,
                    deferred_load=True,
                    **kwargs):
        """Initialises FieldSet object from NetCDF files

        :param filenames: Dictionary mapping variables to file(s). The
               filepath may contain wildcards to indicate multiple files
               or be a list of file.
               filenames can be a list [files], a dictionary {var:[files]},
               a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
               or a dictionary of dictionaries {var:{dim:[files]}}.
               time values are in filenames[data]
        :param variables: Dictionary mapping variables to variable names in the netCDF file(s).
               Note that the built-in Advection kernels assume that U and V are in m/s
        :param dimensions: Dictionary mapping data dimensions (lon,
               lat, depth, time, data) to dimensions in the netCF file(s).
               Note that dimensions can also be a dictionary of dictionaries if
               dimension names are different for each variable
               (e.g. dimensions['U'], dimensions['V'], etc).
        :param indices: Optional dictionary of indices for each dimension
               to read from file(s), to allow for reading of subset of data.
               Default is to read the full extent of each dimension.
               Note that negative indices are not allowed.
        :param mesh: String indicating the type of mesh coordinates and
               units used during velocity interpolation, see also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb:

               1. spherical (default): Lat and lon in degree, with a
                  correction for zonal velocity U near the poles.
               2. flat: No conversion, lat/lon are assumed to be in m.
        :param timestamps: A numpy array containing the timestamps for each of the files in filenames.
               Default is None if dimensions includes time.
        :param allow_time_extrapolation: boolean whether to allow for extrapolation
               (i.e. beyond the last available time snapshot)
               Default is False if dimensions includes time, else True
        :param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
               This flag overrides the allow_time_interpolation and sets it to False
        :param deferred_load: boolean whether to only pre-load data (in deferred mode) or
               fully load them (default: True). It is advised to deferred load the data, since in
               that case Parcels deals with a better memory management during particle set execution.
               deferred_load=False is however sometimes necessary for plotting the fields.
        :param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',
               but in cases where this doesn't work, setting netcdf_engine='scipy' could help
        """
        # Ensure that times are not provided both in netcdf file and in 'timestamps'.
        if timestamps is not None and 'time' in dimensions:
            logger.warning_once(
                "Time already provided, defaulting to dimensions['time'] over timestamps."
            )
            timestamps = None

        # Typecast timestamps to numpy array & correct shape.
        if timestamps is not None:
            if isinstance(timestamps, list):
                timestamps = np.array(timestamps)
            timestamps = np.reshape(timestamps, [timestamps.size, 1])

        fields = {}
        if 'creation_log' not in kwargs.keys():
            kwargs['creation_log'] = 'from_netcdf'
        for var, name in variables.items():
            # Resolve all matching paths for the current variable
            paths = filenames[var] if type(
                filenames) is dict and var in filenames else filenames
            if type(paths) is not dict:
                paths = cls.parse_wildcards(paths, filenames, var)
            else:
                for dim, p in paths.items():
                    paths[dim] = cls.parse_wildcards(p, filenames, var)

            # Use dimensions[var] and indices[var] if either of them is a dict of dicts
            dims = dimensions[var] if var in dimensions else dimensions
            cls.checkvaliddimensionsdict(dims)
            inds = indices[var] if (indices and var in indices) else indices

            grid = None
            # check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)
            for procvar, _ in fields.items():
                procdims = dimensions[
                    procvar] if procvar in dimensions else dimensions
                procinds = indices[procvar] if (
                    indices and procvar in indices) else indices
                procpaths = filenames[procvar] if isinstance(
                    filenames, dict) and procvar in filenames else filenames
                nowpaths = filenames[var] if isinstance(
                    filenames, dict) and var in filenames else filenames
                if procdims == dims and procinds == inds and procpaths == nowpaths:
                    sameGrid = False
                    if ((not isinstance(filenames, dict))
                            or filenames[procvar] == filenames[var]):
                        sameGrid = True
                    elif isinstance(filenames[procvar], dict):
                        sameGrid = True
                        for dim in ['lon', 'lat', 'depth']:
                            if dim in dimensions:
                                sameGrid *= filenames[procvar][
                                    dim] == filenames[var][dim]
                    if sameGrid:
                        grid = fields[procvar].grid
                        kwargs['dataFiles'] = fields[procvar].dataFiles
                        break
            fields[var] = Field.from_netcdf(
                paths, (var, name),
                dims,
                inds,
                grid=grid,
                mesh=mesh,
                timestamps=timestamps,
                allow_time_extrapolation=allow_time_extrapolation,
                time_periodic=time_periodic,
                deferred_load=deferred_load,
                **kwargs)
        u = fields.pop('U', None)
        v = fields.pop('V', None)
        return cls(u, v, fields=fields)
示例#26
0
    def convert_pset_to_dict(self, pset, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.
        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size == 0:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)
            else:
                if deleted_only:
                    pset_towrite = pset
                elif pset[0].dt > 0:
                    pset_towrite = [
                        p for p in pset
                        if time <= p.time < time + p.dt and np.isfinite(p.id)
                    ]
                else:
                    pset_towrite = [
                        p for p in pset
                        if time + p.dt < p.time <= time and np.isfinite(p.id)
                    ]
                if len(pset_towrite) > 0:
                    for var in self.var_names:
                        data_dict[var] = np.array(
                            [getattr(p, var) for p in pset_towrite])
                    self.maxid_written = np.max(
                        [self.maxid_written,
                         np.max(data_dict['id'])])

                pset_errs = [
                    p for p in pset_towrite
                    if p.state != ErrorCode.Delete and abs(time -
                                                           p.time) > 1e-3
                ]
                for p in pset_errs:
                    logger.warning_once(
                        'time argument in pfile.write() is %g, but a particle has time % g.'
                        % (time, p.time))

                if time not in self.time_written:
                    self.time_written.append(time)

                if len(self.var_names_once) > 0:
                    first_write = [
                        p for p in pset if (p.id not in self.written_once)
                        and _is_particle_started_yet(p, time)
                    ]
                    data_dict_once['id'] = np.array(
                        [p.id for p in first_write])
                    for var in self.var_names_once:
                        data_dict_once[var] = np.array(
                            [getattr(p, var) for p in first_write])
                    self.written_once += [p.id for p in first_write]

            if not deleted_only:
                self.lasttime_written = time

        return data_dict, data_dict_once
示例#27
0
def plotfield(field,
              show_time=None,
              domain=None,
              depth_level=0,
              projection=None,
              land=True,
              vmin=None,
              vmax=None,
              savefile=None,
              **kwargs):
    """Function to plot a Parcels Field

    :param show_time: Time at which to show the Field
    :param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
    :param depth_level: depth level to be plotted (default 0)
    :param projection: type of cartopy projection to use (default PlateCarree)
    :param land: Boolean whether to show land. This is ignored for flat meshes
    :param vmin: minimum colour scale (only in single-plot mode)
    :param vmax: maximum colour scale (only in single-plot mode)
    :param savefile: Name of a file to save the plot to
    :param animation: Boolean whether result is a single plot, or an animation
    """

    if type(field) is VectorField:
        spherical = True if field.U.grid.mesh == 'spherical' else False
        field = [field.U, field.V]
        plottype = 'vector'
    elif type(field) is Field:
        spherical = True if field.grid.mesh == 'spherical' else False
        field = [field]
        plottype = 'scalar'
    else:
        raise RuntimeError('field needs to be a Field or VectorField object')

    if field[0].grid.gtype in [
            GridCode.CurvilinearZGrid, GridCode.CurvilinearSGrid
    ]:
        logger.warning(
            'Field.show() does not always correctly determine the domain for curvilinear grids. '
            'Use plotting with caution and perhaps use domain argument as in the NEMO 3D tutorial'
        )

    plt, fig, ax, cartopy = create_parcelsfig_axis(spherical,
                                                   land,
                                                   projection=projection)
    if plt is None:
        return None, None, None, None  # creating axes was not possible

    data = {}
    plotlon = {}
    plotlat = {}
    for i, fld in enumerate(field):
        show_time = fld.grid.time[0] if show_time is None else show_time
        if fld.grid.defer_load:
            fld.fieldset.computeTimeChunk(show_time, 1)
        (idx, periods) = fld.time_index(show_time)
        show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
        if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
            raise TimeExtrapolationError(show_time, field=fld, msg='show_time')

        latN, latS, lonE, lonW = parsedomain(domain, fld)
        if isinstance(fld.grid, CurvilinearGrid):
            plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
            plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
        else:
            plotlon[i] = fld.grid.lon[lonW:lonE]
            plotlat[i] = fld.grid.lat[latS:latN]
        if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
            raise RuntimeError(
                'VectorField needs to be on an A-grid for plotting')
        if fld.grid.time.size > 1:
            if fld.grid.zdim > 1:
                data[i] = np.squeeze(
                    fld.temporal_interpolate_fullfield(idx,
                                                       show_time))[depth_level,
                                                                   latS:latN,
                                                                   lonW:lonE]
            else:
                data[i] = np.squeeze(
                    fld.temporal_interpolate_fullfield(idx,
                                                       show_time))[latS:latN,
                                                                   lonW:lonE]
        else:
            if fld.grid.zdim > 1:
                data[i] = np.squeeze(fld.data)[depth_level, latS:latN,
                                               lonW:lonE]
            else:
                data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]

    if plottype == 'vector':
        if field[0].interp_method == 'cgrid_velocity':
            logger.warning_once(
                'Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy'
            )
            d = np.empty_like(data[0])
            d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
            d[-1, :] = data[0][-1, :]
            data[0] = d
            d = np.empty_like(data[0])
            d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
            d[:, -1] = data[0][:, -1]
            data[1] = d

        spd = data[0]**2 + data[1]**2
        speed = np.where(spd > 0, np.sqrt(spd), 0)
        vmin = speed.min() if vmin is None else vmin
        vmax = speed.max() if vmax is None else vmax
        if isinstance(field[0].grid, CurvilinearGrid):
            x, y = plotlon[0], plotlat[0]
        else:
            x, y = np.meshgrid(plotlon[0], plotlat[0])
        u = np.where(speed > 0., data[0] / speed, 0)
        v = np.where(speed > 0., data[1] / speed, 0)
        if cartopy:
            cs = ax.quiver(np.asarray(x),
                           np.asarray(y),
                           np.asarray(u),
                           np.asarray(v),
                           speed,
                           cmap=plt.cm.gist_ncar,
                           clim=[vmin, vmax],
                           scale=50,
                           transform=cartopy.crs.PlateCarree())
        else:
            cs = ax.quiver(x,
                           y,
                           u,
                           v,
                           speed,
                           cmap=plt.cm.gist_ncar,
                           clim=[vmin, vmax],
                           scale=50)
    else:
        vmin = data[0].min() if vmin is None else vmin
        vmax = data[0].max() if vmax is None else vmax
        assert len(data[0].shape) == 2
        if field[0].interp_method == 'cgrid_tracer':
            d = data[0][1:, 1:]
        elif field[0].interp_method == 'cgrid_velocity':
            if field[0].fieldtype == 'U':
                d = np.empty_like(data[0])
                d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
            elif field[0].fieldtype == 'V':
                d = np.empty_like(data[0])
                d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
            else:  # W
                d = data[0][1:, 1:]
        else:  # if A-grid
            d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] +
                 data[0][1:, 1:]) / 4.
            d = np.where(data[0][:-1, :-1] == 0, 0, d)
            d = np.where(data[0][1:, :-1] == 0, 0, d)
            d = np.where(data[0][1:, 1:] == 0, 0, d)
            d = np.where(data[0][:-1, 1:] == 0, 0, d)
        if cartopy:
            cs = ax.pcolormesh(plotlon[0],
                               plotlat[0],
                               d,
                               transform=cartopy.crs.PlateCarree())
        else:
            cs = ax.pcolormesh(plotlon[0], plotlat[0], d)

    if cartopy is None:
        ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
        ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
    elif domain is not None:
        ax.set_extent([
            np.nanmin(plotlon[0]),
            np.nanmax(plotlon[0]),
            np.nanmin(plotlat[0]),
            np.nanmax(plotlat[0])
        ],
                      crs=cartopy.crs.PlateCarree())
    cs.cmap.set_over('k')
    cs.cmap.set_under('w')
    cs.set_clim(vmin, vmax)

    cartopy_colorbar(cs, plt, fig, ax)

    timestr = parsetimestr(field[0].grid.time_origin, show_time)
    titlestr = kwargs.pop('titlestr', '')
    if field[0].grid.zdim > 1:
        if field[0].grid.gtype in [
                GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid
        ]:
            gphrase = 'depth'
            depth_or_level = field[0].grid.depth[depth_level]
        else:
            gphrase = 'level'
            depth_or_level = depth_level
        depthstr = ' at %s %g ' % (gphrase, depth_or_level)
    else:
        depthstr = ''
    if plottype == 'vector':
        ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
    else:
        ax.set_title(titlestr + field[0].name + depthstr + timestr)

    if not spherical:
        ax.set_xlabel('Zonal distance [m]')
        ax.set_ylabel('Meridional distance [m]')

    plt.draw()

    if savefile:
        plt.savefig(savefile)
        logger.info('Plot saved to ' + savefile + '.png')
        plt.close()

    return plt, fig, ax, cartopy
示例#28
0
    def execute(self, pset, endtime, dt, recovery=None, output_file=None, execute_once=False):
        """Execute this Kernel over a ParticleSet for several timesteps"""
        particles = pset.data_accessor()
        for p in range(pset.size):
            particles.set_index(p)
            particles.set_state(StateCode.Evaluate)

        if abs(dt) < 1e-6 and not execute_once:
            logger.warning_once("'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)")

        def remove_deleted(pset):
            """Utility to remove all particles that signalled deletion"""
            indices = pset.particle_data['state'] == OperationCode.Delete
            if np.count_nonzero(indices) > 0 and output_file is not None:
                output_file.write(pset, endtime, deleted_only=indices)
            pset.remove_booleanvector(indices)

        if recovery is None:
            recovery = {}
        elif ErrorCode.ErrorOutOfBounds in recovery and ErrorCode.ErrorThroughSurface not in recovery:
            recovery[ErrorCode.ErrorThroughSurface] = recovery[ErrorCode.ErrorOutOfBounds]
        recovery_map = recovery_base_map.copy()
        recovery_map.update(recovery)

        if pset.fieldset is not None:
            for g in pset.fieldset.gridset.grids:
                if len(g.load_chunk) > 0:  # not the case if a field in not called in the kernel
                    g.load_chunk = np.where(g.load_chunk == 2, 3, g.load_chunk)

        # Execute the kernel over the particle set
        if self.ptype.uses_jit:
            self.execute_jit(pset, endtime, dt)
        else:
            self.execute_python(pset, endtime, dt)

        # Remove all particles that signalled deletion
        remove_deleted(pset)

        # Identify particles that threw errors
        error_particles = np.isin(pset.particle_data['state'], [StateCode.Success, StateCode.Evaluate], invert=True)
        while np.any(error_particles):
            # Apply recovery kernel
            for p in np.where(error_particles)[0]:
                particles.set_index(p)
                if particles.state == OperationCode.StopExecution:
                    return
                if particles.state == OperationCode.Repeat:
                    particles.set_state(StateCode.Evaluate)
                elif particles.state in recovery_map:
                    recovery_kernel = recovery_map[particles.state]
                    particles.set_state(StateCode.Success)
                    recovery_kernel(particles, self.fieldset, particles.time)
                    if particles.state == StateCode.Success:
                        particles.set_state(StateCode.Evaluate)
                else:
                    logger.warning_once('Deleting particle because of bug in #749 and #737')
                    particles.delete()

            # Remove all particles that signalled deletion
            remove_deleted(pset)

            # Execute core loop again to continue interrupted particles
            if self.ptype.uses_jit:
                self.execute_jit(pset, endtime, dt)
            else:
                self.execute_python(pset, endtime, dt)

            error_particles = np.isin(pset.particle_data['state'], [StateCode.Success, StateCode.Evaluate], invert=True)
示例#29
0
    def __init__(self, fieldset, ptype, pyfunc=None, funcname=None,
                 funccode=None, py_ast=None, funcvars=None, c_include="", delete_cfiles=True):
        self.fieldset = fieldset
        self.ptype = ptype
        self._lib = None
        self.delete_cfiles = delete_cfiles
        self._cleanup_files = None
        self._cleanup_lib = None

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            warning = False
            if isinstance(fieldset.W, Field) and fieldset.W.creation_log != 'from_nemo' and \
               fieldset.W._scaling_factor is not None and fieldset.W._scaling_factor > 0:
                warning = True
            if type(fieldset.W) in [SummedField, NestedField]:
                for f in fieldset.W:
                    if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:
                        warning = True
            if warning:
                logger.warning_once('Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n'
                                    '         If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)')
        elif pyfunc is AdvectionAnalytical:
            if ptype.uses_jit:
                raise NotImplementedError('Analytical Advection only works in Scipy mode')
            if fieldset.U.interp_method != 'cgrid_velocity':
                raise NotImplementedError('Analytical Advection only works with C-grids')
            if fieldset.U.grid.gtype not in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
                raise NotImplementedError('Analytical Advection only works with Z-grids in the vertical')

        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['ParcelsRandom'] = globals()['ParcelsRandom']
                user_ctx['random'] = globals()['random']
                user_ctx['StateCode'] = globals()['StateCode']
                user_ctx['OperationCode'] = globals()['OperationCode']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning("Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = parse("")
            py_mod.body = [self.py_ast]
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc

        if version_info[0] < 3:
            numkernelargs = len(inspect.getargspec(self.pyfunc).args)
        else:
            numkernelargs = len(inspect.getfullargspec(self.pyfunc).args)

        assert numkernelargs == 3, \
            'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.'

        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for f in self.vector_field_args.values():
                Wname = f.W.ccode_name if f.W else 'not_defined'
                for sF_name, sF_component in zip([f.U.ccode_name, f.V.ccode_name, Wname], ['U', 'V', 'W']):
                    if sF_name not in self.field_args:
                        if sF_name != 'not_defined':
                            self.field_args[sF_name] = getattr(f, sF_component)
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args, self.const_args,
                                          kernel_ccode, c_include_str)
            if MPI:
                mpi_comm = MPI.COMM_WORLD
                mpi_rank = mpi_comm.Get_rank()
                basename = path.join(get_cache_dir(), self._cache_key) if mpi_rank == 0 else None
                basename = mpi_comm.bcast(basename, root=0)
                basename = basename + "_%d" % mpi_rank
            else:
                basename = path.join(get_cache_dir(), "%s_0" % self._cache_key)

            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename, 'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename
    def execute(self,
                pyfunc=AdvectionRK4,
                endtime=None,
                runtime=None,
                dt=1.,
                moviedt=None,
                recovery=None,
                output_file=None,
                movie_background_field=None,
                verbose_progress=None,
                postIterationCallbacks=None,
                callbackdt=None):
        """Execute a given kernel function over the particle set for
        multiple timesteps. Optionally also provide sub-timestepping
        for particle output.

        :param pyfunc: Kernel function to execute. This can be the name of a
                       defined Python function or a :class:`parcels.kernel.Kernel` object.
                       Kernels can be concatenated using the + operator
        :param endtime: End time for the timestepping loop.
                        It is either a datetime object or a positive double.
        :param runtime: Length of the timestepping loop. Use instead of endtime.
                        It is either a timedelta object or a positive double.
        :param dt: Timestep interval to be passed to the kernel.
                   It is either a timedelta object or a double.
                   Use a negative value for a backward-in-time simulation.
        :param moviedt:  Interval for inner sub-timestepping (leap), which dictates
                         the update frequency of animation.
                         It is either a timedelta object or a positive double.
                         None value means no animation.
        :param output_file: :mod:`parcels.particlefile.ParticleFile` object for particle output
        :param recovery: Dictionary with additional `:mod:parcels.tools.error`
                         recovery kernels to allow custom recovery behaviour in case of
                         kernel errors.
        :param movie_background_field: field plotted as background in the movie if moviedt is set.
                                       'vector' shows the velocity as a vector field.
        :param verbose_progress: Boolean for providing a progress bar for the kernel execution loop.
        :param postIterationCallbacks: (Optional) Array of functions that are to be called after each iteration (post-process, non-Kernel)
        :param callbackdt: (Optional, in conjecture with 'postIterationCallbacks) timestep inverval to (latestly) interrupt the running kernel and invoke post-iteration callbacks from 'postIterationCallbacks'
        """

        # check if pyfunc has changed since last compile. If so, recompile
        if self.kernel is None or (self.kernel.pyfunc is not pyfunc
                                   and self.kernel is not pyfunc):
            # Generate and store Kernel
            if isinstance(pyfunc, Kernel):
                self.kernel = pyfunc
            else:
                self.kernel = self.Kernel(pyfunc)
            # Prepare JIT kernel execution
            if self.ptype.uses_jit:
                self.kernel.remove_lib()
                cppargs = ['-DDOUBLE_COORD_VARIABLES'
                           ] if self.lonlatdepth_dtype == np.float64 else None
                self.kernel.compile(compiler=GNUCompiler(cppargs=cppargs))
                self.kernel.load_lib()

        # Convert all time variables to seconds
        if isinstance(endtime, delta):
            raise RuntimeError('endtime must be either a datetime or a double')
        if isinstance(endtime, datetime):
            endtime = np.datetime64(endtime)
        if isinstance(endtime, np.datetime64):
            if self.time_origin.calendar is None:
                raise NotImplementedError(
                    'If fieldset.time_origin is not a date, execution endtime must be a double'
                )
            endtime = self.time_origin.reltime(endtime)
        if isinstance(runtime, delta):
            runtime = runtime.total_seconds()
        if isinstance(dt, delta):
            dt = dt.total_seconds()
        outputdt = output_file.outputdt if output_file else np.infty
        if isinstance(outputdt, delta):
            outputdt = outputdt.total_seconds()
        if isinstance(moviedt, delta):
            moviedt = moviedt.total_seconds()
        if isinstance(callbackdt, delta):
            callbackdt = callbackdt.total_seconds()

        assert runtime is None or runtime >= 0, 'runtime must be positive'
        assert outputdt is None or outputdt >= 0, 'outputdt must be positive'
        assert moviedt is None or moviedt >= 0, 'moviedt must be positive'

        mintime, maxtime = self.fieldset.gridset.dimrange(
            'time_full') if self.fieldset is not None else (0, 1)
        if np.any(np.isnan(self.particle_data['time'])):
            self.particle_data['time'][np.isnan(
                self.particle_data['time'])] = mintime if dt >= 0 else maxtime

        # Derive _starttime and endtime from arguments or fieldset defaults
        if runtime is not None and endtime is not None:
            raise RuntimeError(
                'Only one of (endtime, runtime) can be specified')
        _starttime = self.particle_data['time'].min(
        ) if dt >= 0 else self.particle_data['time'].max()
        if self.repeatdt is not None and self.repeat_starttime is None:
            self.repeat_starttime = _starttime
        if runtime is not None:
            endtime = _starttime + runtime * np.sign(dt)
        elif endtime is None:
            mintime, maxtime = self.fieldset.gridset.dimrange('time_full')
            endtime = maxtime if dt >= 0 else mintime

        execute_once = False
        if abs(endtime - _starttime) < 1e-5 or dt == 0 or runtime == 0:
            dt = 0
            runtime = 0
            endtime = _starttime
            logger.warning_once(
                "dt or runtime are zero, or endtime is equal to Particle.time. "
                "The kernels will be executed once, without incrementing time")
            execute_once = True

        self.particle_data['dt'][:] = dt

        # First write output_file, because particles could have been added
        if output_file:
            output_file.write(self, _starttime)
        if moviedt:
            self.show(field=movie_background_field,
                      show_time=_starttime,
                      animation=True)

        if moviedt is None:
            moviedt = np.infty
        if callbackdt is None:
            interupt_dts = [np.infty, moviedt, outputdt]
            if self.repeatdt is not None:
                interupt_dts.append(self.repeatdt)
            callbackdt = np.min(np.array(interupt_dts))
        time = _starttime
        if self.repeatdt:
            next_prelease = self.repeat_starttime + (
                abs(time - self.repeat_starttime) // self.repeatdt +
                1) * self.repeatdt * np.sign(dt)
        else:
            next_prelease = np.infty if dt > 0 else -np.infty
        next_output = time + outputdt if dt > 0 else time - outputdt
        next_movie = time + moviedt if dt > 0 else time - moviedt
        next_callback = time + callbackdt if dt > 0 else time - callbackdt
        next_input = self.fieldset.computeTimeChunk(
            time, np.sign(dt)) if self.fieldset is not None else np.inf

        tol = 1e-12
        if verbose_progress is None:
            walltime_start = time_module.time()
        if verbose_progress:
            pbar = self.__create_progressbar(_starttime, endtime)
        while (time < endtime and dt > 0) or (time > endtime
                                              and dt < 0) or dt == 0:
            if verbose_progress is None and time_module.time(
            ) - walltime_start > 10:
                # Showing progressbar if runtime > 10 seconds
                if output_file:
                    logger.info('Temporary output files are stored in %s.' %
                                output_file.tempwritedir_base)
                    logger.info(
                        'You can use "parcels_convert_npydir_to_netcdf %s" to convert these '
                        'to a NetCDF file during the run.' %
                        output_file.tempwritedir_base)
                pbar = self.__create_progressbar(_starttime, endtime)
                verbose_progress = True
            if dt > 0:
                time = min(next_prelease, next_input, next_output, next_movie,
                           next_callback, endtime)
            else:
                time = max(next_prelease, next_input, next_output, next_movie,
                           next_callback, endtime)
            self.kernel.execute(self,
                                endtime=time,
                                dt=dt,
                                recovery=recovery,
                                output_file=output_file,
                                execute_once=execute_once)
            if abs(time - next_prelease) < tol:
                pset_new = ParticleSet(
                    fieldset=self.fieldset,
                    time=time,
                    lon=self.repeatlon,
                    lat=self.repeatlat,
                    depth=self.repeatdepth,
                    pclass=self.repeatpclass,
                    lonlatdepth_dtype=self.lonlatdepth_dtype,
                    partitions=False,
                    pid_orig=self.repeatpid,
                    **self.repeatkwargs)
                p = pset_new.data_accessor()
                for i in range(pset_new.size):
                    p.set_index(i)
                    p.dt = dt
                self.add(pset_new)
                next_prelease += self.repeatdt * np.sign(dt)
            if abs(time - next_output) < tol:
                if output_file:
                    output_file.write(self, time)
                next_output += outputdt * np.sign(dt)
            if abs(time - next_movie) < tol:
                self.show(field=movie_background_field,
                          show_time=time,
                          animation=True)
                next_movie += moviedt * np.sign(dt)
            # ==== insert post-process here to also allow for memory clean-up via external func ==== #
            if abs(time - next_callback) < tol:
                if postIterationCallbacks is not None:
                    for extFunc in postIterationCallbacks:
                        extFunc()
                next_callback += callbackdt * np.sign(dt)
            if time != endtime:
                next_input = self.fieldset.computeTimeChunk(time, dt)
            if dt == 0:
                break
            if verbose_progress:
                pbar.update(abs(time - _starttime))

        if output_file:
            output_file.write(self, time)
        if verbose_progress:
            pbar.finish()