Esempio n. 1
0
    def generate(self, py_ast, funcvars):
        # Replace occurences of intrinsic objects in Python AST
        transformer = IntrinsicTransformer(self.fieldset, self.ptype)
        py_ast = transformer.visit(py_ast)

        # Untangle Pythonic tuple-assignment statements
        py_ast = TupleSplitter().visit(py_ast)

        # Generate C-code for all nodes in the Python AST
        self.visit(py_ast)
        self.ccode = py_ast.ccode

        # Insert variable declarations for non-instrinsics
        # Make sure that repeated variables are not declared more than
        # once. If variables occur in multiple Kernels, give a warning
        used_vars = []
        funcvars_copy = copy(funcvars)  # editing a list while looping over it is dangerous
        for kvar in funcvars:
            if kvar in used_vars:
                if kvar not in ['particle', 'fieldset', 'time']:
                    logger.warning(kvar+" declared in multiple Kernels")
                funcvars_copy.remove(kvar)
            else:
                used_vars.append(kvar)
        funcvars = funcvars_copy
        for kvar in self.kernel_vars + self.array_vars:
            if kvar in funcvars:
                funcvars.remove(kvar)
        self.ccode.body.insert(0, c.Value('ErrorCode', 'err'))
        if len(funcvars) > 0:
            self.ccode.body.insert(0, c.Value("type_coord", ", ".join(funcvars)))
        if len(transformer.tmp_vars) > 0:
            self.ccode.body.insert(0, c.Value("float", ", ".join(transformer.tmp_vars)))

        return self.ccode
Esempio n. 2
0
    def to_dict(self, pfile, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.

        :param pfile: ParticleFile object requesting the conversion
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        pd = self.particle_data

        if pfile.lasttime_written != time and \
           (pfile.write_ondelete is False or deleted_only is not False):
            if pd['id'].size == 0:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)
            else:
                if deleted_only is not False:
                    to_write = deleted_only
                else:
                    to_write = _to_write_particles(pd, time)
                if np.any(to_write) > 0:
                    for var in pfile.var_names:
                        data_dict[var] = pd[var][to_write]
                    pfile.maxid_written = np.maximum(pfile.maxid_written,
                                                     np.max(data_dict['id']))

                pset_errs = (to_write & (pd['state'] != OperationCode.Delete)
                             & np.less(1e-3,
                                       np.abs(time - pd['time']),
                                       where=np.isfinite(pd['time'])))
                if np.count_nonzero(pset_errs) > 0:
                    logger.warning_once(
                        'time argument in pfile.write() is {}, but particles have time {}'
                        .format(time, pd['time'][pset_errs]))

                if time not in pfile.time_written:
                    pfile.time_written.append(time)

                if len(pfile.var_names_once) > 0:
                    first_write = (_to_write_particles(pd, time) & np.isin(
                        pd['id'], pfile.written_once, invert=True))
                    if np.any(first_write):
                        data_dict_once['id'] = np.array(
                            pd['id'][first_write]).astype(dtype=np.int64)
                        for var in pfile.var_names_once:
                            data_dict_once[var] = pd[var][first_write]
                        pfile.written_once.extend(pd['id'][first_write])

            if deleted_only is False:
                pfile.lasttime_written = time

        return data_dict, data_dict_once
Esempio n. 3
0
    def write(self, pset, time, sync=True, deleted_only=False):
        """Write :class:`parcels.particleset.ParticleSet` data to file

        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param sync: Optional argument whether to write data to disk immediately. Default is True

        """
        if self.dataset is None:
            self.open_dataset()
        if isinstance(time, delta):
            time = time.total_seconds()
        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size > 0:

                first_write = [
                    p for p in pset if (p.fileid < 0 or len(self.idx) == 0)
                    and _is_particle_started_yet(p, time)
                ]  # len(self.idx)==0 in case pset is written to new ParticleFile
                for p in first_write:
                    p.fileid = self.lasttraj  # particle id in current file
                    self.lasttraj += 1

                self.idx = np.append(self.idx, np.zeros(len(first_write)))

                for p in pset:
                    if _is_particle_started_yet(p, time):
                        i = p.fileid
                        self.id[i, self.idx[i]] = p.id
                        self.time[i, self.idx[i]] = p.time
                        self.lat[i, self.idx[i]] = p.lat
                        self.lon[i, self.idx[i]] = p.lon
                        self.z[i, self.idx[i]] = p.depth
                        for var in self.user_vars:
                            getattr(self, var)[i,
                                               self.idx[i]] = getattr(p, var)
                        if p.state != ErrorCode.Delete and not np.allclose(
                                p.time, time):
                            logger.warning_once(
                                'time argument in pfile.write() is %g, but a particle has time %g.'
                                % (time, p.time))

                for p in first_write:
                    for var in self.user_vars_once:
                        getattr(self, var)[p.fileid] = getattr(p, var)
            else:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)

            if not deleted_only:
                self.idx += 1
                self.lasttime_written = time

        if sync:
            self.sync()
Esempio n. 4
0
    def toDictionary(self, pfile, time, deleted_only=False):
        """
        Convert all Particle data from one time step to a python dictionary.
        :param pfile: ParticleFile object requesting the conversion
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once

        This function depends on the specific collection in question and thus needs to be specified in specific
        derivative classes.
        """

        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        indices_to_write = []
        if pfile.lasttime_written != time and \
           (pfile.write_ondelete is False or deleted_only is not False):
            if self._data['id'].size == 0:
                logger.warning("ParticleSet is empty on writing as array at time %g" % time)
            else:
                if deleted_only is not False:
                    if type(deleted_only) not in [list, np.ndarray] and deleted_only in [True, 1]:
                        indices_to_write = np.where(np.isin(self._data['state'],
                                                            [OperationCode.Delete]))[0]
                    elif type(deleted_only) in [list, np.ndarray]:
                        indices_to_write = deleted_only
                else:
                    indices_to_write = _to_write_particles(self._data, time)
                if np.any(indices_to_write) > 0:
                    for var in pfile.var_names:
                        data_dict[var] = self._data[var][indices_to_write]
                    pfile.maxid_written = np.maximum(pfile.maxid_written, np.max(data_dict['id']))

                pset_errs = ((self._data['state'][indices_to_write] != OperationCode.Delete) & np.greater(np.abs(time - self._data['time'][indices_to_write]), 1e-3, where=np.isfinite(self._data['time'][indices_to_write])))
                if np.count_nonzero(pset_errs) > 0:
                    logger.warning_once('time argument in pfile.write() is {}, but particles have time {}'.format(time, self._data['time'][pset_errs]))

                # ==== this function should probably move back somewhere into the particle-file instead of the to_dict ==== #
                if time not in pfile.time_written:
                    pfile.time_written.append(time)

                if len(pfile.var_names_once) > 0:
                    first_write = (_to_write_particles(self._data, time) & _is_particle_started_yet(self._data, time) & np.isin(self._data['id'], pfile.written_once, invert=True))
                    if np.any(first_write):
                        data_dict_once['id'] = np.array(self._data['id'][first_write]).astype(dtype=np.int64)
                        for var in pfile.var_names_once:
                            data_dict_once[var] = self._data[var][first_write]
                        pfile.written_once.extend(np.array(self._data['id'][first_write]).astype(dtype=np.int64).tolist())

            if deleted_only is False:
                pfile.lasttime_written = time

        return data_dict, data_dict_once
Esempio n. 5
0
    def convert_pset_to_dict(self, pset, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.
        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size == 0:
                logger.warning("ParticleSet is empty on writing as array at time %g" % time)
            else:
                for var in self.var_names:
                    data_dict[var] = np.nan * np.zeros(len(pset))

                i = 0
                for p in pset:
                    if p.dt*p.time <= p.dt*time:
                        for var in self.var_names:
                            data_dict[var][i] = getattr(p, var)
                        if p.state != ErrorCode.Delete and not np.allclose(p.time, time):
                            logger.warning_once('time argument in pfile.write() is %g, but a particle has time %g.' % (time, p.time))
                        self.maxid_written = np.max([self.maxid_written, p.id])
                        i += 1

                save_ind = np.isfinite(data_dict["id"])
                for key in self.var_names:
                    data_dict[key] = data_dict[key][save_ind]

                if time not in self.time_written:
                    self.time_written.append(time)

                if len(self.var_names_once) > 0:
                    first_write = [p for p in pset if (p.id not in self.written_once) and _is_particle_started_yet(p, time)]
                    data_dict_once['id'] = np.nan * np.zeros(len(first_write))
                    for var in self.var_names_once:
                        data_dict_once[var] = np.nan * np.zeros(len(first_write))

                    i = 0
                    for p in first_write:
                        self.written_once.append(p.id)
                        data_dict_once['id'][i] = p.id
                        for var in self.var_names_once:
                            data_dict_once[var][i] = getattr(p, var)
                        i += 1

            if not deleted_only:
                self.lasttime_written = time

        return data_dict, data_dict_once
Esempio n. 6
0
    def __init__(self, fieldset, ptype, pyfunc=None, funcname=None,
                 funccode=None, py_ast=None, funcvars=None, c_include="", delete_cfiles=True):
        self.fieldset = fieldset
        self.ptype = ptype
        self._lib = None
        self.delete_cfiles = delete_cfiles
        self._cleanup_files = None
        self._cleanup_lib = None

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            warning = False
            if isinstance(fieldset.W, Field) and fieldset.W.creation_log != 'from_nemo' and \
               fieldset.W._scaling_factor is not None and fieldset.W._scaling_factor > 0:
                warning = True
            if type(fieldset.W) in [SummedField, NestedField]:
                for f in fieldset.W:
                    if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:
                        warning = True
            if warning:
                logger.warning_once('Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n'
                                    '         If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)')
        elif pyfunc is AdvectionAnalytical:
            if ptype.uses_jit:
                raise NotImplementedError('Analytical Advection only works in Scipy mode')
            if fieldset.U.interp_method != 'cgrid_velocity':
                raise NotImplementedError('Analytical Advection only works with C-grids')
            if fieldset.U.grid.gtype not in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
                raise NotImplementedError('Analytical Advection only works with Z-grids in the vertical')

        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['ParcelsRandom'] = globals()['ParcelsRandom']
                user_ctx['random'] = globals()['random']
                user_ctx['StateCode'] = globals()['StateCode']
                user_ctx['OperationCode'] = globals()['OperationCode']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning("Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = parse("")
            py_mod.body = [self.py_ast]
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc

        if version_info[0] < 3:
            numkernelargs = len(inspect.getargspec(self.pyfunc).args)
        else:
            numkernelargs = len(inspect.getfullargspec(self.pyfunc).args)

        assert numkernelargs == 3, \
            'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.'

        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for f in self.vector_field_args.values():
                Wname = f.W.ccode_name if f.W else 'not_defined'
                for sF_name, sF_component in zip([f.U.ccode_name, f.V.ccode_name, Wname], ['U', 'V', 'W']):
                    if sF_name not in self.field_args:
                        if sF_name != 'not_defined':
                            self.field_args[sF_name] = getattr(f, sF_component)
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args, self.const_args,
                                          kernel_ccode, c_include_str)
            if MPI:
                mpi_comm = MPI.COMM_WORLD
                mpi_rank = mpi_comm.Get_rank()
                basename = path.join(get_cache_dir(), self._cache_key) if mpi_rank == 0 else None
                basename = mpi_comm.bcast(basename, root=0)
                basename = basename + "_%d" % mpi_rank
            else:
                basename = path.join(get_cache_dir(), "%s_0" % self._cache_key)

            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename, 'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename
    def from_particlefile(cls,
                          fieldset,
                          pclass,
                          filename,
                          restart=True,
                          restarttime=None,
                          repeatdt=None,
                          lonlatdepth_dtype=None,
                          **kwargs):
        """Initialise the ParticleSet from a netcdf ParticleFile.
        This creates a new ParticleSet based on locations of all particles written
        in a netcdf ParticleFile at a certain time. Particle IDs are preserved if restart=True

        :param fieldset: :mod:`parcels.fieldset.FieldSet` object from which to sample velocity
        :param pclass: mod:`parcels.particle.JITParticle` or :mod:`parcels.particle.ScipyParticle`
                 object that defines custom particle
        :param filename: Name of the particlefile from which to read initial conditions
        :param restart: Boolean to signal if pset is used for a restart (default is True).
               In that case, Particle IDs are preserved.
        :param restarttime: time at which the Particles will be restarted. Default is the last time written.
               Alternatively, restarttime could be a time value (including np.datetime64) or
               a callable function such as np.nanmin. The last is useful when running with dt < 0.
        :param repeatdt: Optional interval (in seconds) on which to repeat the release of the ParticleSet
        :param lonlatdepth_dtype: Floating precision for lon, lat, depth particle coordinates.
               It is either np.float32 or np.float64. Default is np.float32 if fieldset.U.interp_method is 'linear'
               and np.float64 if the interpolation method is 'cgrid_velocity'
        """

        if repeatdt is not None:
            logger.warning(
                'Note that the `repeatdt` argument is not retained from %s, and that '
                'setting a new repeatdt will start particles from the _new_ particle '
                'locations.' % filename)

        pfile = xr.open_dataset(str(filename), decode_cf=True)
        pfile_vars = [v for v in pfile.data_vars]

        vars = {}
        to_write = {}
        for v in pclass.getPType().variables:
            if v.name in pfile_vars:
                vars[v.name] = np.ma.filled(pfile.variables[v.name], np.nan)
            elif v.name not in ['xi', 'yi', 'zi', 'ti', 'dt', '_next_dt', 'depth', 'id', 'fileid', 'state'] \
                    and v.to_write:
                raise RuntimeError(
                    'Variable %s is in pclass but not in the particlefile' %
                    v.name)
            to_write[v.name] = v.to_write
        vars['depth'] = np.ma.filled(pfile.variables['z'], np.nan)
        vars['id'] = np.ma.filled(pfile.variables['trajectory'], np.nan)

        if isinstance(vars['time'][0, 0], np.timedelta64):
            vars['time'] = np.array(
                [t / np.timedelta64(1, 's') for t in vars['time']])

        if restarttime is None:
            restarttime = np.nanmax(vars['time'])
        elif callable(restarttime):
            restarttime = restarttime(vars['time'])
        else:
            restarttime = restarttime

        inds = np.where(vars['time'] == restarttime)
        for v in vars:
            if to_write[v] is True:
                vars[v] = vars[v][inds]
            elif to_write[v] == 'once':
                vars[v] = vars[v][inds[0]]
            if v not in ['lon', 'lat', 'depth', 'time', 'id']:
                kwargs[v] = vars[v]

        if restart:
            pclass.setLastID(0)  # reset to zero offset
        else:
            vars['id'] = None

        return cls(fieldset=fieldset,
                   pclass=pclass,
                   lon=vars['lon'],
                   lat=vars['lat'],
                   depth=vars['depth'],
                   time=vars['time'],
                   pid_orig=vars['id'],
                   lonlatdepth_dtype=lonlatdepth_dtype,
                   repeatdt=repeatdt,
                   **kwargs)
Esempio n. 8
0
    def convert_pset_to_dict(self, pset, time, deleted_only=False):
        """Convert all Particle data from one time step to a python dictionary.
        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param deleted_only: Flag to write only the deleted Particles
        returns two dictionaries: one for all variables to be written each outputdt,
         and one for all variables to be written once
        """
        data_dict = {}
        data_dict_once = {}

        time = time.total_seconds() if isinstance(time, delta) else time

        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size == 0:
                logger.warning(
                    "ParticleSet is empty on writing as array at time %g" %
                    time)
            else:
                if deleted_only:
                    pset_towrite = pset
                elif pset[0].dt > 0:
                    pset_towrite = [
                        p for p in pset
                        if time <= p.time < time + p.dt and np.isfinite(p.id)
                    ]
                else:
                    pset_towrite = [
                        p for p in pset
                        if time + p.dt < p.time <= time and np.isfinite(p.id)
                    ]
                if len(pset_towrite) > 0:
                    for var in self.var_names:
                        data_dict[var] = np.array(
                            [getattr(p, var) for p in pset_towrite])
                    self.maxid_written = np.max(
                        [self.maxid_written,
                         np.max(data_dict['id'])])

                pset_errs = [
                    p for p in pset_towrite
                    if p.state != ErrorCode.Delete and abs(time -
                                                           p.time) > 1e-3
                ]
                for p in pset_errs:
                    logger.warning_once(
                        'time argument in pfile.write() is %g, but a particle has time % g.'
                        % (time, p.time))

                if time not in self.time_written:
                    self.time_written.append(time)

                if len(self.var_names_once) > 0:
                    first_write = [
                        p for p in pset if (p.id not in self.written_once)
                        and _is_particle_started_yet(p, time)
                    ]
                    data_dict_once['id'] = np.array(
                        [p.id for p in first_write])
                    for var in self.var_names_once:
                        data_dict_once[var] = np.array(
                            [getattr(p, var) for p in first_write])
                    self.written_once += [p.id for p in first_write]

            if not deleted_only:
                self.lasttime_written = time

        return data_dict, data_dict_once
Esempio n. 9
0
def plotfield(field,
              show_time=None,
              domain=None,
              depth_level=0,
              projection=None,
              land=True,
              vmin=None,
              vmax=None,
              savefile=None,
              **kwargs):
    """Function to plot a Parcels Field

    :param show_time: Time at which to show the Field
    :param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
    :param depth_level: depth level to be plotted (default 0)
    :param projection: type of cartopy projection to use (default PlateCarree)
    :param land: Boolean whether to show land. This is ignored for flat meshes
    :param vmin: minimum colour scale (only in single-plot mode)
    :param vmax: maximum colour scale (only in single-plot mode)
    :param savefile: Name of a file to save the plot to
    :param animation: Boolean whether result is a single plot, or an animation
    """

    if type(field) is VectorField:
        spherical = True if field.U.grid.mesh == 'spherical' else False
        field = [field.U, field.V]
        plottype = 'vector'
    elif type(field) is Field:
        spherical = True if field.grid.mesh == 'spherical' else False
        field = [field]
        plottype = 'scalar'
    else:
        raise RuntimeError('field needs to be a Field or VectorField object')

    if field[0].grid.gtype in [
            GridCode.CurvilinearZGrid, GridCode.CurvilinearSGrid
    ]:
        logger.warning(
            'Field.show() does not always correctly determine the domain for curvilinear grids. '
            'Use plotting with caution and perhaps use domain argument as in the NEMO 3D tutorial'
        )

    plt, fig, ax, cartopy = create_parcelsfig_axis(spherical,
                                                   land,
                                                   projection=projection)
    if plt is None:
        return None, None, None, None  # creating axes was not possible

    data = {}
    plotlon = {}
    plotlat = {}
    for i, fld in enumerate(field):
        show_time = fld.grid.time[0] if show_time is None else show_time
        if fld.grid.defer_load:
            fld.fieldset.computeTimeChunk(show_time, 1)
        (idx, periods) = fld.time_index(show_time)
        show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
        if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
            raise TimeExtrapolationError(show_time, field=fld, msg='show_time')

        latN, latS, lonE, lonW = parsedomain(domain, fld)
        if isinstance(fld.grid, CurvilinearGrid):
            plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
            plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
        else:
            plotlon[i] = fld.grid.lon[lonW:lonE]
            plotlat[i] = fld.grid.lat[latS:latN]
        if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
            raise RuntimeError(
                'VectorField needs to be on an A-grid for plotting')
        if fld.grid.time.size > 1:
            if fld.grid.zdim > 1:
                data[i] = np.squeeze(
                    fld.temporal_interpolate_fullfield(idx,
                                                       show_time))[depth_level,
                                                                   latS:latN,
                                                                   lonW:lonE]
            else:
                data[i] = np.squeeze(
                    fld.temporal_interpolate_fullfield(idx,
                                                       show_time))[latS:latN,
                                                                   lonW:lonE]
        else:
            if fld.grid.zdim > 1:
                data[i] = np.squeeze(fld.data)[depth_level, latS:latN,
                                               lonW:lonE]
            else:
                data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]

    if plottype == 'vector':
        if field[0].interp_method == 'cgrid_velocity':
            logger.warning_once(
                'Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy'
            )
            d = np.empty_like(data[0])
            d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
            d[-1, :] = data[0][-1, :]
            data[0] = d
            d = np.empty_like(data[0])
            d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
            d[:, -1] = data[0][:, -1]
            data[1] = d

        spd = data[0]**2 + data[1]**2
        speed = np.where(spd > 0, np.sqrt(spd), 0)
        vmin = speed.min() if vmin is None else vmin
        vmax = speed.max() if vmax is None else vmax
        if isinstance(field[0].grid, CurvilinearGrid):
            x, y = plotlon[0], plotlat[0]
        else:
            x, y = np.meshgrid(plotlon[0], plotlat[0])
        u = np.where(speed > 0., data[0] / speed, 0)
        v = np.where(speed > 0., data[1] / speed, 0)
        if cartopy:
            cs = ax.quiver(np.asarray(x),
                           np.asarray(y),
                           np.asarray(u),
                           np.asarray(v),
                           speed,
                           cmap=plt.cm.gist_ncar,
                           clim=[vmin, vmax],
                           scale=50,
                           transform=cartopy.crs.PlateCarree())
        else:
            cs = ax.quiver(x,
                           y,
                           u,
                           v,
                           speed,
                           cmap=plt.cm.gist_ncar,
                           clim=[vmin, vmax],
                           scale=50)
    else:
        vmin = data[0].min() if vmin is None else vmin
        vmax = data[0].max() if vmax is None else vmax
        assert len(data[0].shape) == 2
        if field[0].interp_method == 'cgrid_tracer':
            d = data[0][1:, 1:]
        elif field[0].interp_method == 'cgrid_velocity':
            if field[0].fieldtype == 'U':
                d = np.empty_like(data[0])
                d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
            elif field[0].fieldtype == 'V':
                d = np.empty_like(data[0])
                d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
            else:  # W
                d = data[0][1:, 1:]
        else:  # if A-grid
            d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] +
                 data[0][1:, 1:]) / 4.
            d = np.where(data[0][:-1, :-1] == 0, 0, d)
            d = np.where(data[0][1:, :-1] == 0, 0, d)
            d = np.where(data[0][1:, 1:] == 0, 0, d)
            d = np.where(data[0][:-1, 1:] == 0, 0, d)
        if cartopy:
            cs = ax.pcolormesh(plotlon[0],
                               plotlat[0],
                               d,
                               transform=cartopy.crs.PlateCarree())
        else:
            cs = ax.pcolormesh(plotlon[0], plotlat[0], d)

    if cartopy is None:
        ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
        ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
    elif domain is not None:
        ax.set_extent([
            np.nanmin(plotlon[0]),
            np.nanmax(plotlon[0]),
            np.nanmin(plotlat[0]),
            np.nanmax(plotlat[0])
        ],
                      crs=cartopy.crs.PlateCarree())
    cs.cmap.set_over('k')
    cs.cmap.set_under('w')
    cs.set_clim(vmin, vmax)

    cartopy_colorbar(cs, plt, fig, ax)

    timestr = parsetimestr(field[0].grid.time_origin, show_time)
    titlestr = kwargs.pop('titlestr', '')
    if field[0].grid.zdim > 1:
        if field[0].grid.gtype in [
                GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid
        ]:
            gphrase = 'depth'
            depth_or_level = field[0].grid.depth[depth_level]
        else:
            gphrase = 'level'
            depth_or_level = depth_level
        depthstr = ' at %s %g ' % (gphrase, depth_or_level)
    else:
        depthstr = ''
    if plottype == 'vector':
        ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
    else:
        ax.set_title(titlestr + field[0].name + depthstr + timestr)

    if not spherical:
        ax.set_xlabel('Zonal distance [m]')
        ax.set_ylabel('Meridional distance [m]')

    plt.draw()

    if savefile:
        plt.savefig(savefile)
        logger.info('Plot saved to ' + savefile + '.png')
        plt.close()

    return plt, fig, ax, cartopy
Esempio n. 10
0
    def __init__(self, fieldset, ptype, pyfunc=None, funcname=None,
                 funccode=None, py_ast=None, funcvars=None, c_include="", delete_cfiles=True):
        super(KernelSOA, self).__init__(fieldset=fieldset, ptype=ptype, pyfunc=pyfunc, funcname=funcname, funccode=funccode, py_ast=py_ast, funcvars=funcvars, c_include=c_include, delete_cfiles=delete_cfiles)

        # Derive meta information from pyfunc, if not given
        self.check_fieldsets_in_kernels(pyfunc)

        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(BaseKernel.fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['ParcelsRandom'] = globals()['ParcelsRandom']
                user_ctx['random'] = globals()['random']
                user_ctx['StateCode'] = globals()['StateCode']
                user_ctx['OperationCode'] = globals()['OperationCode']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning("Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = parse("")
            py_mod.body = [self.py_ast]
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self._pyfunc = user_ctx[self.funcname]
        else:
            self._pyfunc = pyfunc

        numkernelargs = self.check_kernel_signature_on_version()

        assert numkernelargs == 3, \
            'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.'

        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for f in self.vector_field_args.values():
                Wname = f.W.ccode_name if f.W else 'not_defined'
                for sF_name, sF_component in zip([f.U.ccode_name, f.V.ccode_name, Wname], ['U', 'V', 'W']):
                    if sF_name not in self.field_args:
                        if sF_name != 'not_defined':
                            self.field_args[sF_name] = getattr(f, sF_component)
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(self._c_include):
                with open(self._c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = self._c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args, self.const_args,
                                          kernel_ccode, c_include_str)

            src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files()
            if type(src_file_or_files) in (list, dict, tuple, np.ndarray):
                self.dyn_srcs = src_file_or_files
            else:
                self.src_file = src_file_or_files
Esempio n. 11
0
 def _get_initial_chunk_dictionary(self):
     """
     [private function - not to be called from outside the class]
     Super-function that maps and correlates the requested chunksize with the requested parcels dimensions, variables
     and the NetCDF-available dimensions. Thus, it takes care to remove chunksize arguments that are not in the
     Parcels- or NetCDF dimensions, or whose chunking would be omitted due to an empty chunk dimension.
     The function retuns the corrected chunksize dictionary. The function also initializes the chunk_map.
     The chunk map maps the array index dimension to the requested chunksize.
     Apart from resolving the different requested version of the chunksize, the function also test-executes the
     chunk request. If this initial test fails, as a last resort, we execute a heuristic to map the requested
     parcels dimensions to the dimension signature of the most-parameterized NetCDF variable, and heuristically
     try to map its parameters to the parcels dimensions with the class-wide name-map.
     """
     # ==== check-opening requested dataset to access metadata                   ==== #
     # ==== file-opening and dimension-reading does not require a decode or lock ==== #
     self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks={}, lock=False)
     self.dataset['decoded'] = False
     # ==== self.dataset temporarily available ==== #
     init_chunk_dict = {}
     init_chunk_map = {}
     if isinstance(self.chunksize, dict):
         init_chunk_dict, init_chunk_map = self._get_initial_chunk_dictionary_by_dict_()
     elif self.chunksize == 'auto':
         av_mem = psutil.virtual_memory().available
         chunk_cap = av_mem * (1/8) * (1/3)
         if 'array.chunk-size' in da_conf.config.keys():
             chunk_cap = da_utils.parse_bytes(da_conf.config.get('array.chunk-size'))
         else:
             predefined_cap = da_conf.get('array.chunk-size')
             if predefined_cap is not None:
                 chunk_cap = da_utils.parse_bytes(predefined_cap)
             else:
                 logger.info_once("Unable to locate chunking hints from dask, thus estimating the max. chunk size heuristically. Please consider defining the 'chunk-size' for 'array' in your local dask configuration file (see http://oceanparcels.org/faq.html#field_chunking_config and https://docs.dask.org).")
         loni, lonname, lonvalue = self._is_dimension_in_dataset('lon')
         lati, latname, latvalue = self._is_dimension_in_dataset('lat')
         if lati is not None and loni is not None and lati >= 0 and loni >= 0:
             pDim = int(math.floor(math.sqrt(chunk_cap/np.dtype(np.float64).itemsize)))
             init_chunk_dict[latname] = min(latvalue, pDim)
             init_chunk_map[lati] = min(latvalue, pDim)
             init_chunk_dict[lonname] = min(lonvalue, pDim)
             init_chunk_map[loni] = min(lonvalue, pDim)
         timei, timename, timevalue = self._is_dimension_in_dataset('time')
         if timei is not None and timei >= 0:
             init_chunk_dict[timename] = min(1, timevalue)
             init_chunk_map[timei] = min(1, timevalue)
         depthi, depthname, depthvalue = self._is_dimension_in_dataset('depth')
         if depthi is not None and depthi >= 0:
             init_chunk_dict[depthname] = max(1, depthvalue)
             init_chunk_map[depthi] = max(1, depthvalue)
     # ==== closing check-opened requested dataset ==== #
     self.dataset.close()
     # ==== check if the chunksize reading is successful. if not, load the file ONCE really into memory and ==== #
     # ==== deduce the chunking from the array dims.                                                         ==== #
     if len(init_chunk_dict) == 0 and self.chunksize not in [False, None, 'auto']:
         self.autochunkingfailed = True
         raise DaskChunkingError(self.__class__.__name__, "No correct mapping found between Parcels- and NetCDF dimensions! Please correct the 'FieldSet(..., chunksize={...})' parameter and try again.")
     else:
         self.autochunkingfailed = False
     try:
         self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False)
         if isinstance(self.chunksize, dict):
             self.chunksize = init_chunk_dict
     except:
         logger.warning("Chunking with init_chunk_dict = {} failed - Executing Dask chunking 'failsafe'...".format(init_chunk_dict))
         self.autochunkingfailed = True
         if not self.autochunkingfailed:
             init_chunk_dict = self._failsafe_parse_()
         if isinstance(self.chunksize, dict):
             self.chunksize = init_chunk_dict
     finally:
         self.dataset.close()
         self.chunk_mapping = init_chunk_map
     self.dataset = None
     # ==== self.dataset not available ==== #
     return init_chunk_dict
Esempio n. 12
0
    def write(self, pset, time, sync=True, deleted_only=False):
        """Write :class:`parcels.particleset.ParticleSet` 
        All data from one time step is saved to one NPY-file using a python 
        dictionary. The data is saved in the folder 'out'.

        :param pset: ParticleSet object to write
        :param time: Time at which to write ParticleSet
        :param sync: Optional argument whether to write data to disk immediately. Default is True

        """
        if self.dataset is None:
            self.open_dataset()
        if isinstance(time, delta):
            time = time.total_seconds()
        if self.lasttime_written != time and \
           (self.write_ondelete is False or deleted_only is True):
            if pset.size > 0:

                first_write = [p for p in pset if (p.fileid < 0 or len(self.idx) == 0) and (p.dt*p.time <= p.dt*time or np.isnan(p.dt))]  # len(self.idx)==0 in case pset is written to new ParticleFile
                for p in first_write:
                    p.fileid = self.lasttraj
                    self.lasttraj += 1

                self.idx = np.append(self.idx, np.zeros(len(first_write)))

                size = len(pset)
                
                # dictionary for temporary hold data                
                tmp = {}
                tmp["ids"], tmp["time"], tmp["lat"], tmp["lon"], tmp["z"] =\
                        map(lambda x: np.zeros(x), [size,size,size,size,size])
                
                for var in self.user_vars:
                    tmp[var] = np.zeros(size)
                
                for key in tmp.keys():
                    tmp[key][:] = np.nan 
                
                i = 0
                for p in pset:

                    if p.dt*p.time <= p.dt*time: 
                        tmp["ids"][i] = p.id
                        tmp["time"][i] = time
                        tmp["lat"][i] = p.lat
                        tmp["lon"][i] = p.lon
                        tmp["z"][i]   = p.depth
                        for var in self.user_vars:
                            tmp[var][i] = getattr(p, var)
                        if p.state != ErrorCode.Delete and not np.allclose(p.time, time):
                            logger.warning_once('time argument in pfile.write() is %g, but a particle has time %g.' % (time, p.time))
                        i += 1
                
                if not os.path.exists(self.npy_path):
                    os.mkdir(self.npy_path)
                
                save_ind = np.isfinite(tmp["ids"])
                for key in tmp.keys():
                    tmp[key] = tmp[key][save_ind]

                np.save(os.path.join(self.npy_path,str(time)),tmp)
                
                for p in first_write:
                    for var in self.user_vars_once:
                        getattr(self, var)[p.fileid] = getattr(p, var)
            else:
                logger.warning("ParticleSet is empty on writing as array at time %g" % time)

            if not deleted_only:
                self.idx += 1
                self.lasttime_written = time

        if sync:
            self.sync()
Esempio n. 13
0
    def __init__(self,
                 fieldset,
                 ptype,
                 pyfunc=None,
                 funcname=None,
                 funccode=None,
                 py_ast=None,
                 funcvars=None,
                 c_include=""):
        self.fieldset = fieldset
        self.ptype = ptype
        self._lib = None

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            logger.warning_once(
                'Note that positive vertical velocity is assumed DOWNWARD by AdvectionRK4_3D'
            )
        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['random'] = globals()['random']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning(
                    "Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = Module(body=[self.py_ast])
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc
        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for fname in self.vector_field_args:
                f = getattr(fieldset, fname)
                Wname = f.W.name if f.W else 'not_defined'
                for sF in [f.U.name, f.V.name, Wname]:
                    if sF not in self.field_args:
                        try:
                            self.field_args[sF] = getattr(fieldset, sF)
                        except:
                            continue
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args,
                                          self.const_args, kernel_ccode,
                                          c_include_str)

            basename = path.join(get_cache_dir(), self._cache_key)
            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename,
                                       'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename
Esempio n. 14
0
    def __init__(self,
                 fieldset,
                 ptype,
                 pyfunc=None,
                 funcname=None,
                 funccode=None,
                 py_ast=None,
                 funcvars=None,
                 c_include=""):
        self.fieldset = fieldset
        self.ptype = ptype
        self._lib = None

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            warning = False
            if isinstance(fieldset.W, Field) and fieldset.W.creation_log != 'from_nemo' and \
               fieldset.W._scaling_factor is not None and fieldset.W._scaling_factor > 0:
                warning = True
            if type(fieldset.W) in [SummedField, NestedField]:
                for f in fieldset.W:
                    if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:
                        warning = True
            if warning:
                logger.warning_once(
                    'Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n'
                    '         If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)'
                )
        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['random'] = globals()['random']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning(
                    "Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = Module(body=[self.py_ast])
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc
        assert len(inspect.getargspec(self.pyfunc).args) == 3, \
            'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.'

        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for fname in self.vector_field_args:
                f = getattr(fieldset, fname)
                Wname = f.W.name if f.W else 'not_defined'
                for sF in [f.U.name, f.V.name, Wname]:
                    if sF not in self.field_args:
                        try:
                            self.field_args[sF] = getattr(fieldset, sF)
                        except:
                            continue
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args,
                                          self.const_args, kernel_ccode,
                                          c_include_str)

            basename = path.join(get_cache_dir(), self._cache_key)
            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename,
                                       'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename