コード例 #1
0
ファイル: grid.py プロジェクト: scw/parcels
    def __init__(self,
                 lon,
                 lat,
                 depth,
                 time=None,
                 time_origin=None,
                 mesh='flat'):
        CurvilinearGrid.__init__(self, lon, lat, time, time_origin, mesh)
        assert (isinstance(depth, np.ndarray) and len(depth.shape)
                in [3, 4]), 'depth is not a 4D numpy array'

        self.gtype = GridCode.CurvilinearSGrid
        self.depth = depth
        self.zdim = self.depth.shape[-3]
        self.z4d = len(self.depth.shape) == 4
        if self.z4d:
            assert self.tdim == self.depth.shape[
                0], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.xdim == self.depth.shape[
                -1], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[
                -2], 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]'
        else:
            assert self.xdim == self.depth.shape[
                -1], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
            assert self.ydim == self.depth.shape[
                -2], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]'
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
コード例 #2
0
    def advancetime(self, fieldset_new):
        """Replace oldest time on FieldSet with new FieldSet
        :param fieldset_new: FieldSet snapshot with which the oldest time has to be replaced"""

        logger.warning_once("Fieldset.advancetime() is deprecated.\n \
                             Parcels deals automatically with loading only 3 time steps simustaneously\
                             such that the total allocated memory remains limited."
                            )

        advance = 0
        for gnew in fieldset_new.gridset.grids:
            gnew.advanced = False

        for fnew in fieldset_new.fields:
            if isinstance(fnew, VectorField):
                continue
            f = getattr(self, fnew.name)
            gnew = fnew.grid
            if not gnew.advanced:
                g = f.grid
                advance2 = g.advancetime(gnew)
                if advance2 * advance < 0:
                    raise RuntimeError(
                        "Some Fields of the Fieldset are advanced forward and other backward"
                    )
                advance = advance2
                gnew.advanced = True
            f.advancetime(fnew, advance == 1)
コード例 #3
0
ファイル: grid.py プロジェクト: scw/parcels
    def add_periodic_halo(self, zonal, meridional, halosize=5):
        """Add a 'halo' to the Grid, through extending the Grid (and lon/lat)
        similarly to the halo created for the Fields

        :param zonal: Create a halo in zonal direction (boolean)
        :param meridional: Create a halo in meridional direction (boolean)
        :param halosize: size of the halo (in grid points). Default is 5 grid points
        """
        if zonal:
            lonshift = (self.lon[-1] - 2 * self.lon[0] + self.lon[1])
            if not np.allclose(self.lon[1] - self.lon[0],
                               self.lon[-1] - self.lon[-2]):
                logger.warning_once(
                    "The zonal halo is located at the east and west of current grid, with a dx = lon[1]-lon[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[1]-lon[0] != lon[-1]-lon[-2]. Is the halo computed as you expect?"
                )
            self.lon = np.concatenate(
                (self.lon[-halosize:] - lonshift, self.lon,
                 self.lon[0:halosize] + lonshift))
            self.xdim = self.lon.size
            self.zonal_periodic = True
            self.zonal_halo = halosize
        if meridional:
            if not np.allclose(self.lat[1] - self.lat[0],
                               self.lat[-1] - self.lat[-2]):
                logger.warning_once(
                    "The meridional halo is located at the north and south of current grid, with a dy = lat[1]-lat[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1]-lat[0] != lat[-1]-lat[-2]. Is the halo computed as you expect?"
                )
            latshift = (self.lat[-1] - 2 * self.lat[0] + self.lat[1])
            self.lat = np.concatenate(
                (self.lat[-halosize:] - latshift, self.lat,
                 self.lat[0:halosize] + latshift))
            self.ydim = self.lat.size
            self.meridional_halo = halosize
コード例 #4
0
ファイル: grid.py プロジェクト: raymondedwards95/parcels
    def __init__(self, lon, lat, depth, time=None, time_origin=0, mesh='flat'):
        CurvilinearGrid.__init__(self, lon, lat, time, time_origin, mesh)
        assert(isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4]), 'depth is not a 4D numpy array'

        self.gtype = GridCode.CurvilinearSGrid
        self.depth = depth
        self.zdim = self.depth.shape[2]
        self.z4d = len(self.depth.shape) == 4
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
コード例 #5
0
ファイル: grid.py プロジェクト: raymondedwards95/parcels
    def __init__(self, lon, lat, depth=None, time=None, time_origin=0, mesh='flat'):
        CurvilinearGrid.__init__(self, lon, lat, time, time_origin, mesh)
        if isinstance(depth, np.ndarray):
            assert(len(depth.shape) == 1), 'depth is not a vector'

        self.gtype = GridCode.CurvilinearZGrid
        self.depth = np.zeros(1, dtype=np.float32) if depth is None else depth
        self.zdim = self.depth.size
        self.z4d = -1  # only for SGrid
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
コード例 #6
0
ファイル: grid.py プロジェクト: subond/parcels
    def __init__(self, name, lon, lat, depth=None, time=None, time_origin=0, mesh='flat'):
        assert(isinstance(lon, np.ndarray) and len(lon.shape) == 1), 'lon is not a numpy vector'
        assert(isinstance(lat, np.ndarray) and len(lat.shape) == 1), 'lat is not a numpy vector'
        assert(isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4]), 'depth is not a 4D numpy array'
        assert (isinstance(time, np.ndarray) or not time), 'time is not a numpy array'
        if isinstance(time, np.ndarray):
            assert(len(time.shape) == 1), 'time is not a vector'

        self.name = name
        self.gtype = GridCode.RectilinearSGrid
        self.lon = lon
        self.lat = lat
        self.depth = depth
        self.z4d = len(depth.shape) == 4
        self.time = np.zeros(1, dtype=np.float64) if time is None else time
        if not self.lon.dtype == np.float32:
            logger.warning_once("Casting lon data to np.float32")
            self.lon = self.lon.astype(np.float32)
        if not self.lat.dtype == np.float32:
            logger.warning_once("Casting lat data to np.float32")
            self.lat = self.lat.astype(np.float32)
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
        if not self.time.dtype == np.float64:
            logger.warning_once("Casting time data to np.float64")
            self.time = self.time.astype(np.float64)
        self.time_origin = time_origin
        self.mesh = mesh
コード例 #7
0
ファイル: grid.py プロジェクト: scw/parcels
    def __init__(self, lon, lat, time, time_origin, mesh):
        assert (isinstance(lon, np.ndarray)
                and len(lon.shape) == 1), 'lon is not a numpy vector'
        assert (isinstance(lat, np.ndarray)
                and len(lat.shape) == 1), 'lat is not a numpy vector'
        assert (isinstance(time, np.ndarray)
                or not time), 'time is not a numpy array'
        if isinstance(time, np.ndarray):
            assert (len(time.shape) == 1), 'time is not a vector'

        Grid.__init__(self, lon, lat, time, time_origin, mesh)
        self.xdim = self.lon.size
        self.ydim = self.lat.size
        self.tdim = self.time.size
        if self.lat[-1] < self.lat[0]:
            self.lat = np.flip(self.lat, axis=0)
            self.lat_flipped = True
            logger.warning_once(
                "Flipping lat data from North-South to South-North")
コード例 #8
0
ファイル: grid.py プロジェクト: scw/parcels
 def __init__(self, lon, lat, time, time_origin, mesh):
     self.lon = lon
     self.lat = lat
     self.time = np.zeros(1, dtype=np.float64) if time is None else time
     if not self.lon.dtype == np.float32:
         logger.warning_once("Casting lon data to np.float32")
         self.lon = self.lon.astype(np.float32)
     if not self.lat.dtype == np.float32:
         logger.warning_once("Casting lat data to np.float32")
         self.lat = self.lat.astype(np.float32)
     if not self.time.dtype == np.float64:
         assert isinstance(
             self.time[0],
             (np.integer, np.floating, float,
              int)), 'Time vector must be an array of int or floats'
         logger.warning_once("Casting time data to np.float64")
         self.time = self.time.astype(np.float64)
     self.time_origin = time_origin
     if self.time_origin:
         if isinstance(self.time_origin, datetime.datetime):
             self.time_origin = np.datetime64(self.time_origin)
         assert isinstance(
             self.time_origin, np.datetime64
         ), 'If defined, time_origin must be a datetime.datetime or a np.datetime64'
     self.mesh = mesh
     self.cstruct = None
     self.cell_edge_sizes = {}
     self.zonal_periodic = False
     self.zonal_halo = 0
     self.meridional_halo = 0
     self.lat_flipped = False
     self.defer_load = False
コード例 #9
0
ファイル: grid.py プロジェクト: karolinedubin/parcels
    def __init__(self, lon, lat, time, time_origin, mesh):
        assert (isinstance(lon, np.ndarray)
                and len(lon.shape) == 1), 'lon is not a numpy vector'
        assert (isinstance(lat, np.ndarray)
                and len(lat.shape) == 1), 'lat is not a numpy vector'
        assert (isinstance(time, np.ndarray)
                or not time), 'time is not a numpy array'
        if isinstance(time, np.ndarray):
            assert (len(time.shape) == 1), 'time is not a vector'

        self.lon = lon
        self.lat = lat
        self.time = np.zeros(1, dtype=np.float64) if time is None else time
        if not self.lon.dtype == np.float32:
            logger.warning_once("Casting lon data to np.float32")
            self.lon = self.lon.astype(np.float32)
        if not self.lat.dtype == np.float32:
            logger.warning_once("Casting lat data to np.float32")
            self.lat = self.lat.astype(np.float32)
        if not self.time.dtype == np.float64:
            logger.warning_once("Casting time data to np.float64")
            self.time = self.time.astype(np.float64)
        self.xdim = self.lon.size
        self.ydim = self.lat.size
        self.tdim = self.time.size
        self.time_origin = time_origin
        self.mesh = mesh
        self.cstruct = None
コード例 #10
0
    def __init__(self,
                 fieldset,
                 ptype,
                 pyfunc=None,
                 funcname=None,
                 funccode=None,
                 py_ast=None,
                 funcvars=None,
                 c_include=""):
        self.fieldset = fieldset
        self.ptype = ptype

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            logger.warning_once(
                'Note that positive vertical velocity is assumed DOWNWARD by AdvectionRK4_3D'
            )
        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['random'] = globals()['random']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning(
                    "Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = Module(body=[self.py_ast])
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc
        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            self.field_args = kernelgen.field_args
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            if 'UV' in self.field_args:
                fieldset = self.field_args['UV'].fieldset
                for f in ['U', 'V', 'cosU', 'sinU', 'cosV', 'sinV']:
                    if f not in self.field_args:
                        try:
                            self.field_args[f] = getattr(fieldset, f)
                        except:
                            continue
                del self.field_args['UV']
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args,
                                          self.const_args, kernel_ccode,
                                          c_include_str)

            basename = path.join(get_cache_dir(), self._cache_key)
            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename,
                                       'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename
        self._lib = None
コード例 #11
0
    def __init__(self,
                 name,
                 data,
                 lon,
                 lat,
                 depth=None,
                 time=None,
                 transpose=False,
                 vmin=None,
                 vmax=None,
                 time_origin=0,
                 units=None,
                 interp_method='linear',
                 allow_time_extrapolation=None):
        self.name = name
        self.data = data
        self.lon = lon
        self.lat = lat
        self.depth = np.zeros(1, dtype=np.float32) if depth is None else depth
        self.time = np.zeros(1, dtype=np.float64) if time is None else time
        self.time_origin = time_origin
        self.units = units if units is not None else UnitConverter()
        self.interp_method = interp_method
        if allow_time_extrapolation is None:
            self.allow_time_extrapolation = True if time is None else False
        else:
            self.allow_time_extrapolation = allow_time_extrapolation

        # Ensure that field data is the right data type
        if not self.data.dtype == np.float32:
            logger.warning_once("Casting field data to np.float32")
            self.data = self.data.astype(np.float32)
        if not self.lon.dtype == np.float32:
            logger.warning_once("Casting lon data to np.float32")
            self.lon = self.lon.astype(np.float32)
        if not self.lat.dtype == np.float32:
            logger.warning_once("Casting lat data to np.float32")
            self.lat = self.lat.astype(np.float32)
        if not self.depth.dtype == np.float32:
            logger.warning_once("Casting depth data to np.float32")
            self.depth = self.depth.astype(np.float32)
        if not self.time.dtype == np.float64:
            logger.warning_once("Casting time data to np.float64")
            self.time = self.time.astype(np.float64)
        if transpose:
            # Make a copy of the transposed array to enforce
            # C-contiguous memory layout for JIT mode.
            self.data = np.transpose(self.data).copy()
        if self.depth.size > 1:
            self.data = self.data.reshape((self.time.size, self.depth.size,
                                           self.lat.size, self.lon.size))
        else:
            self.data = self.data.reshape(
                (self.time.size, self.lat.size, self.lon.size))

        # Hack around the fact that NaN and ridiculously large values
        # propagate in SciPy's interpolators
        if vmin is not None:
            self.data[self.data < vmin] = 0.
        if vmax is not None:
            self.data[self.data > vmax] = 0.
        self.data[np.isnan(self.data)] = 0.

        # Variable names in JIT code
        self.ccode_data = self.name
        self.ccode_lon = self.name + "_lon"
        self.ccode_lat = self.name + "_lat"
コード例 #12
0
    def __init__(self,
                 name,
                 data,
                 lon=None,
                 lat=None,
                 depth=None,
                 time=None,
                 grid=None,
                 transpose=False,
                 vmin=None,
                 vmax=None,
                 time_origin=0,
                 interp_method='linear',
                 allow_time_extrapolation=None,
                 time_periodic=False):
        self.name = name
        self.data = data
        if grid:
            self.grid = grid
        else:
            self.grid = RectilinearZGrid('auto_gen_grid',
                                         lon,
                                         lat,
                                         depth,
                                         time,
                                         time_origin=time_origin)
        # self.lon, self.lat, self.depth and self.time are not used anymore in parcels.
        # self.grid should be used instead.
        # Those variables are still defined for backwards compatibility with users codes.
        self.lon = self.grid.lon
        self.lat = self.grid.lat
        self.depth = self.grid.depth
        self.time = self.grid.time
        if self.grid.mesh is 'flat' or (name is not 'U' and name is not 'V'):
            self.units = UnitConverter()
        elif self.grid.mesh is 'spherical' and name == 'U':
            self.units = GeographicPolar()
        elif self.grid.mesh is 'spherical' and name == 'V':
            self.units = Geographic()
        else:
            raise ValueError(
                "Unsupported mesh type. Choose either: 'spherical' or 'flat'")
        self.interp_method = interp_method
        self.fieldset = None
        if allow_time_extrapolation is None:
            self.allow_time_extrapolation = True if time is None else False
        else:
            self.allow_time_extrapolation = allow_time_extrapolation

        self.time_periodic = time_periodic
        if self.time_periodic and self.allow_time_extrapolation:
            logger.warning_once(
                "allow_time_extrapolation and time_periodic cannot be used together.\n \
                                 allow_time_extrapolation is set to False")
            self.allow_time_extrapolation = False

        # Ensure that field data is the right data type
        if not self.data.dtype == np.float32:
            logger.warning_once("Casting field data to np.float32")
            self.data = self.data.astype(np.float32)
        if transpose:
            # Make a copy of the transposed array to enforce
            # C-contiguous memory layout for JIT mode.
            self.data = np.transpose(self.data).copy()
        if self.grid.depth.size > 1 and len(self.grid.depth.shape) == 1:
            self.data = self.data.reshape(
                (self.grid.time.size, self.grid.depth.size, self.grid.lat.size,
                 self.grid.lon.size))
        elif len(self.grid.depth.shape) in [3, 4]:
            self.data = self.data.reshape(
                (self.grid.time.size, self.grid.depth.shape[2],
                 self.grid.lat.size, self.grid.lon.size))
        else:
            self.data = self.data.reshape(
                (self.grid.time.size, self.grid.lat.size, self.grid.lon.size))

        # Hack around the fact that NaN and ridiculously large values
        # propagate in SciPy's interpolators
        if vmin is not None:
            self.data[self.data < vmin] = 0.
        if vmax is not None:
            self.data[self.data > vmax] = 0.
        self.data[np.isnan(self.data)] = 0.

        # Variable names in JIT code
        self.ccode_data = self.name
コード例 #13
0
    def execute(self,
                pyfunc=AdvectionRK4,
                starttime=None,
                endtime=None,
                dt=1.,
                runtime=None,
                interval=None,
                recovery=None,
                output_file=None,
                show_movie=False):
        """Execute a given kernel function over the particle set for
        multiple timesteps. Optionally also provide sub-timestepping
        for particle output.

        :param pyfunc: Kernel function to execute. This can be the name of a
                       defined Python function or a :class:`parcels.kernel.Kernel` object.
                       Kernels can be concatenated using the + operator
        :param starttime: Starting time for the timestepping loop. Defaults to 0.0.
        :param endtime: End time for the timestepping loop
        :param runtime: Length of the timestepping loop. Use instead of endtime.
        :param dt: Timestep interval to be passed to the kernel
        :param interval: Interval for inner sub-timestepping (leap), which dictates
                         the update frequency of file output and animation.
        :param output_file: :mod:`parcels.particlefile.ParticleFile` object for particle output
        :param recovery: Dictionary with additional `:mod:parcels.kernels.error`
                         recovery kernels to allow custom recovery behaviour in case of
                         kernel errors.
        :param show_movie: True shows particles; name of field plots that field as background
        """

        # check if pyfunc has changed since last compile. If so, recompile
        if self.kernel is None or (self.kernel.pyfunc is not pyfunc
                                   and self.kernel is not pyfunc):
            # Generate and store Kernel
            if isinstance(pyfunc, Kernel):
                self.kernel = pyfunc
            else:
                self.kernel = self.Kernel(pyfunc)
            # Prepare JIT kernel execution
            if self.ptype.uses_jit:
                self.kernel.remove_lib()
                self.kernel.compile(compiler=GNUCompiler())
                self.kernel.load_lib()

        # Convert all time variables to seconds
        if isinstance(starttime, delta):
            starttime = starttime.total_seconds()
        if isinstance(endtime, delta):
            endtime = endtime.total_seconds()
        if isinstance(runtime, delta):
            runtime = runtime.total_seconds()
        if isinstance(dt, delta):
            dt = dt.total_seconds()
        if isinstance(interval, delta):
            interval = interval.total_seconds()
        if isinstance(starttime, datetime):
            starttime = (starttime - self.time_origin).total_seconds()
        if isinstance(endtime, datetime):
            endtime = (endtime - self.time_origin).total_seconds()

        # Derive starttime, endtime and interval from arguments or fieldset defaults
        if runtime is not None and endtime is not None:
            raise RuntimeError(
                'Only one of (endtime, runtime) can be specified')
        if starttime is None:
            starttime = self.fieldset.U.grid.time[
                0] if dt >= 0 else self.fieldset.U.grid.time[-1]
        if runtime is not None:
            if runtime < 0:
                runtime = np.abs(runtime)
                logger.warning(
                    "Negating runtime because it has to be positive")
            endtime = starttime + runtime * np.sign(dt)
        else:
            if endtime is None:
                endtime = self.fieldset.U.grid.time[
                    -1] if dt >= 0 else self.fieldset.U.grid.time[0]
        if interval is None:
            interval = endtime - starttime

        # Ensure that dt and interval have the correct sign
        if endtime > starttime:  # Time-forward mode
            if dt < 0:
                dt *= -1.
                logger.warning(
                    "Negating dt because running in time-forward mode")
            if interval < 0:
                interval *= -1.
                logger.warning(
                    "Negating interval because running in time-forward mode")
        elif endtime < starttime:  # Time-backward mode
            if dt > 0.:
                dt *= -1.
                logger.warning(
                    "Negating dt because running in time-backward mode")
            if interval > 0.:
                interval *= -1.
                logger.warning(
                    "Negating interval because running in time-backward mode")

        if np.allclose(endtime,
                       starttime) or interval == 0 or dt == 0 or runtime == 0:
            timeleaps = 1
            dt = 0
            runtime = 0
            endtime = starttime
            logger.warning_once(
                "dt = 0 and endtime == starttime. The kernels will be executed once, without incrementing time"
            )
        else:
            timeleaps = int((endtime - starttime) / interval)

        # Initialise particle timestepping
        for p in self:
            p.time = starttime
            p.dt = dt
        # Execute time loop in sub-steps (timeleaps)
        assert (timeleaps >= 0)
        leaptime = starttime
        for _ in range(timeleaps):
            # First write output_file, because particles could have been added
            if output_file:
                output_file.write(self, leaptime)
            if show_movie:
                self.show(field=show_movie, show_time=leaptime)
            leaptime += interval
            self.kernel.execute(self,
                                endtime=leaptime,
                                dt=dt,
                                recovery=recovery)
        # Write out a final output_file
        if output_file:
            output_file.write(self, leaptime)
コード例 #14
0
    def execute(self,
                pyfunc=AdvectionRK4,
                endtime=None,
                runtime=None,
                dt=1.,
                moviedt=None,
                recovery=None,
                output_file=None,
                movie_background_field=None):
        """Execute a given kernel function over the particle set for
        multiple timesteps. Optionally also provide sub-timestepping
        for particle output.

        :param pyfunc: Kernel function to execute. This can be the name of a
                       defined Python function or a :class:`parcels.kernel.Kernel` object.
                       Kernels can be concatenated using the + operator
        :param endtime: End time for the timestepping loop.
                        It is either a datetime object or a positive double.
        :param runtime: Length of the timestepping loop. Use instead of endtime.
                        It is either a timedelta object or a positive double.
        :param dt: Timestep interval to be passed to the kernel.
                   It is either a timedelta object or a double.
                   Use a negative value for a backward-in-time simulation.
        :param moviedt:  Interval for inner sub-timestepping (leap), which dictates
                         the update frequency of animation.
                         It is either a timedelta object or a positive double.
                         None value means no animation.
        :param output_file: :mod:`parcels.particlefile.ParticleFile` object for particle output
        :param recovery: Dictionary with additional `:mod:parcels.kernels.error`
                         recovery kernels to allow custom recovery behaviour in case of
                         kernel errors.
        :param movie_background_field: field plotted as background in the movie if moviedt is set.
                                       'vector' shows the velocity as a vector field.

        """

        # check if pyfunc has changed since last compile. If so, recompile
        if self.kernel is None or (self.kernel.pyfunc is not pyfunc
                                   and self.kernel is not pyfunc):
            # Generate and store Kernel
            if isinstance(pyfunc, Kernel):
                self.kernel = pyfunc
            else:
                self.kernel = self.Kernel(pyfunc)
            # Prepare JIT kernel execution
            if self.ptype.uses_jit:
                self.kernel.remove_lib()
                self.kernel.compile(compiler=GNUCompiler())
                self.kernel.load_lib()

        # Convert all time variables to seconds
        if isinstance(endtime, delta):
            raise RuntimeError('endtime must be either a datetime or a double')
        if isinstance(endtime, datetime):
            endtime = (endtime - self.time_origin).total_seconds()
        if isinstance(runtime, delta):
            runtime = runtime.total_seconds()
        if isinstance(dt, delta):
            dt = dt.total_seconds()
        outputdt = output_file.outputdt if output_file else np.infty
        if isinstance(outputdt, delta):
            outputdt = outputdt.total_seconds()
        if isinstance(moviedt, delta):
            moviedt = moviedt.total_seconds()

        assert runtime is None or runtime >= 0, 'runtime must be positive'
        assert outputdt is None or outputdt >= 0, 'outputdt must be positive'
        assert moviedt is None or moviedt >= 0, 'moviedt must be positive'

        # Set particle.time defaults based on sign of dt, if not set at ParticleSet construction
        for p in self:
            if np.isnan(p.time):
                p.time = self.fieldset.U.grid.time[
                    0] if dt >= 0 else self.fieldset.U.grid.time[-1]

        # Derive _starttime and endtime from arguments or fieldset defaults
        if runtime is not None and endtime is not None:
            raise RuntimeError(
                'Only one of (endtime, runtime) can be specified')
        _starttime = min([p.time for p in self]) if dt >= 0 else max(
            [p.time for p in self])
        if self.repeatdt is not None and self.repeat_starttime is None:
            self.repeat_starttime = _starttime
        if runtime is not None:
            endtime = _starttime + runtime * np.sign(dt)
        elif endtime is None:
            endtime = self.fieldset.U.grid.time[
                -1] if dt >= 0 else self.fieldset.U.grid.time[0]

        if abs(endtime - _starttime) < 1e-5 or dt == 0 or runtime == 0:
            dt = 0
            runtime = 0
            endtime = _starttime
            logger.warning_once(
                "dt or runtime are zero, or endtime is equal to Particle.time. "
                "The kernels will be executed once, without incrementing time")

        # Initialise particle timestepping
        for p in self:
            p.dt = dt

        # First write output_file, because particles could have been added
        if output_file:
            output_file.write(self, _starttime)
        if moviedt:
            self.show(field=movie_background_field, show_time=_starttime)

        if moviedt is None:
            moviedt = np.infty
        time = _starttime
        if self.repeatdt:
            next_prelease = self.repeat_starttime + (
                abs(time - self.repeat_starttime) // self.repeatdt +
                1) * self.repeatdt * np.sign(dt)
        else:
            next_prelease = np.infty * np.sign(dt)
        next_output = time + outputdt * np.sign(dt)
        next_movie = time + moviedt * np.sign(dt)
        next_input = np.infty * np.sign(dt)  # Not used yet

        tol = 1e-12
        while (time < endtime and dt > 0) or (time > endtime
                                              and dt < 0) or dt == 0:
            if dt > 0:
                time = min(next_prelease, next_input, next_output, next_movie,
                           endtime)
            else:
                time = max(next_prelease, next_input, next_output, next_movie,
                           endtime)
            self.kernel.execute(self,
                                endtime=time,
                                dt=dt,
                                recovery=recovery,
                                output_file=output_file)
            if abs(time - next_prelease) < tol:
                self.add(
                    ParticleSet(fieldset=self.fieldset,
                                time=time,
                                lon=self.repeatlon,
                                lat=self.repeatlat,
                                depth=self.repeatdepth,
                                pclass=self.repeatpclass))
                next_prelease += self.repeatdt * np.sign(dt)
            if abs(time - next_input) < tol:
                continue
            if abs(time - next_output) < tol:
                if output_file:
                    output_file.write(self, time)
                next_output += outputdt * np.sign(dt)
            if abs(time - next_movie) < tol:
                self.show(field=movie_background_field, show_time=time)
                next_movie += moviedt * np.sign(dt)
            if dt == 0:
                break

        if output_file:
            output_file.write(self, time)