Ejemplo n.º 1
0
 def test_repr_str(self):
     """
     tests repr and str work
     """
     repr(Field('test'))
     str(Field('test'))
     assert True
Ejemplo n.º 2
0
class UniformDistribution(Serializable):
    _state = copy.deepcopy(Serializable._state)
    _state += [Field('low', save=True, update=True),
               Field('high', save=True, update=True)]
    _schema = UniformDistributionSchema

    'Uniform Probability Distribution'
    def __init__(self, low=0., high=0.1):
        '''
        :param low: For the Uniform distribution, it is lower bound.
        :param high: For the Uniform distribution, it is upper bound.
        '''
        self.low = low
        self.high = high
        self._check_uniform_args()

    def _check_uniform_args(self):
        if None in (self.low, self.high):
            raise TypeError('Uniform probability distribution requires '
                            'low and high')

    def _uniform(self, np_array):
        np_array[:] = np.random.uniform(self.low, self.high, len(np_array))

    def set_values(self, np_array):
        self._uniform(np_array)
Ejemplo n.º 3
0
class NormalDistribution(Serializable):
    _state = copy.deepcopy(Serializable._state)
    _state += [Field('mean', save=True, update=True),
               Field('sigma', save=True, update=True)]
    _schema = NormalDistributionSchema

    'Normal Probability Distribution'
    def __init__(self, mean=0., sigma=0.1):
        '''
        :param mean: The mean of the normal distribution
        :param sigma: The standard deviation of normal distribution
        '''
        self.mean = mean
        self.sigma = sigma
        self._check_normal_args()

    def _check_normal_args(self):
        if None in (self.mean, self.sigma):
            raise TypeError('Normal probability distribution requires '
                            'mean and sigma')

    def _normal(self, np_array):
        np_array[:] = np.random.normal(self.mean, self.sigma, len(np_array))

    def set_values(self, np_array):
        self._normal(np_array)
Ejemplo n.º 4
0
    def test_field_eq(self):
        """
        tests equality of Field object
        """
        assert Field('test') == Field('test')

        # all fields must match for equality
        assert Field('test') != Field('test', isdatafile=True)
Ejemplo n.º 5
0
    def test_state_init(self):
        '''
        Test initialization of _state
        '''
        _state = State(read=read, update=update, save=save,
                       field=(Field('field0', read=True),
                              Field('field1', save=True)))

        all_fields = []
        all_fields.extend(save)
        all_fields.extend(update)
        all_fields.extend(read)
        all_fields = list(set(all_fields))
        assert len(_state.fields) == len(all_fields) + 2
Ejemplo n.º 6
0
class HalfLifeWeatherer(Weatherer, Serializable):
    '''
    Give half-life for all components and decay accordingly
    '''
    _schema = HalfLifeWeathererSchema
    _state = copy.deepcopy(Weatherer._state)
    _state += Field('half_lives', save=True, update=True)

    def __init__(self, half_lives=(15. * 60, ), **kwargs):
        '''
        The half_lives are a property of HalfLifeWeatherer. If the

          len(half_lives) != gnome.array_types.mass_components.shape[0]

        then, only keep the number of elements of half_lives that equal the
        length of half_lives and consequently the mass_components array.
        The default is 5, it is possible to change default but not easily done.
        HalfLifeWeatherer is currently more for testing, so will change this if
        it becomes more widely used and there is a need for user to change
        default number of mass components.

        half_lives could be constants or could be something more complex like
        a function of time (not implemented yet). Not storing 'half_lives' in
        data_arrays since they are neither time-varying nor varying per LE.
        '''
        super(HalfLifeWeatherer, self).__init__(**kwargs)
        self.half_lives = half_lives

    @property
    def half_lives(self):
        return self._half_lives

    @half_lives.setter
    def half_lives(self, half_lives):
        self._half_lives = np.asarray(half_lives, dtype=np.float64)

    def weather_elements(self, sc, time_step, model_time):
        '''
        weather elements over time_step
        '''
        if not self.active:
            return
        if sc.num_released == 0:
            return

        for _, data in sc.itersubstancedata(self.array_types):
            hl = self._halflife(data['mass_components'], self.half_lives,
                                time_step)
            data['mass_components'][:] = hl
            data['mass'][:] = data['mass_components'].sum(1)

        sc.update_from_fatedataview()
Ejemplo n.º 7
0
    def test_get_field_by_attribute(self):
        """
        tests the fields can also be obtained by the attributes that are
        set to True.
        get_field_by_attribute function
        """
        _state = State(read=read, update=update, save=save)
        _state.add_field(Field('test', isdatafile=True))

        for field in _state.get_field_by_attribute('read'):
            assert field.name in read

        for field in _state.get_field_by_attribute('update'):
            assert field.name in update

        for field in _state.get_field_by_attribute('save'):
            assert field.name in save

        for field in _state.get_field_by_attribute('isdatafile'):
            assert field.name in ['test']
Ejemplo n.º 8
0
    def test_state_add_field(self):
        """
        Tests the add_field functionality to add a field to _state object.
        This can also be a list of field objects.
        """
        _state = State()
        _state += Field('test')

        assert len(_state.fields) == 1

        f = []
        f.append(Field('filename', save=True, isdatafile=True))
        f.append(Field('topology_file', save=True, isdatafile=True))

        _state += f
        assert len(_state.fields) == 3

        for field in _state.fields:
            assert field.name in ['test', 'filename', 'topology_file']

            if field.name == 'filename' or field.name == 'topology_file':
                assert field.isdatafile
                assert field.save
                assert not field.update
                assert not field.read
            else:
                assert not field.isdatafile
                assert not field.save
                assert not field.update
                assert not field.read

        with pytest.raises(ValueError):
            _state += Field('test')

        with pytest.raises(ValueError):
            _state += [Field('test1'), Field('test1')]
Ejemplo n.º 9
0
class WeatheringOutput(Outputter, Serializable):
    '''
    class that outputs GNOME weathering results.
    The output is the aggregation of properties for all LEs (aka Mass Balance)
    for a particular time step.
    There are a number of different things we would like to graph:
    - Evaporation
    - Dissolution
    - Dissipation
    - Biodegradation
    - ???

    However at this time we will simply try to implement an outputter for the
    halflife Weatherer.
    Following is the output format.

        {
        "type": "WeatheringGraphs",
        "half_life": {"properties": {"mass_components": <Component values>,
                                     "mass": <total Mass value>,
                                     }
                      },
            ...
        }

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state += [Field('output_dir', update=True, save=True)]
    _schema = WeatheringOutputSchema

    def __init__(
            self,
            output_dir=None,  # default is to not output to file
            **kwargs):
        '''
        :param str output_dir='./': output directory for geojson files

        use super to pass optional \*\*kwargs to base class __init__ method
        '''
        self.output_dir = output_dir
        self.units = {
            'default': 'kg',
            'avg_density': 'kg/m^3',
            'avg_viscosity': 'm^2/s'
        }
        super(WeatheringOutput, self).__init__(**kwargs)

    def write_output(self, step_num, islast_step=False):
        '''
        Weathering data is only output for forecast spill container, not
        the uncertain spill container. This is because Weathering has its
        own uncertainty and mixing the two was giving weird results. The
        cloned models that are modeling weathering uncertainty do not include
        the uncertain spill container.
        '''
        super(WeatheringOutput, self).write_output(step_num, islast_step)

        if not self._write_step:
            return None

        # return a dict - json of the mass_balance data
        # weathering outputter should only apply to forecast spill_container
        sc = self.cache.load_timestep(step_num).items()[0]

        dict_ = {}
        dict_.update(sc.mass_balance)

        output_info = {'time_stamp': sc.current_time_stamp.isoformat()}
        output_info.update(sc.mass_balance)

        # output_info.update({'area': hull_area(sc['positions'][sc['status_codes'] == oil_status.in_water])})
        self.logger.debug(self._pid + 'step_num: {0}'.format(step_num))
        for name, val in dict_.iteritems():
            msg = ('\t{0}: {1}'.format(name, val))
            self.logger.debug(msg)

        if self.output_dir:
            output_filename = self.output_to_file(output_info, step_num)
            output_info.update({'output_filename': output_filename})

        return output_info

    def output_to_file(self, json_content, step_num):
        file_format = 'mass_balance_{0:06d}.json'
        filename = os.path.join(self.output_dir, file_format.format(step_num))

        with open(filename, 'w') as outfile:
            dump(json_content, outfile, indent=True)

        return filename

    def clean_output_files(self):
        if self.output_dir:
            files = glob(os.path.join(self.output_dir, 'mass_balance_*.json'))
            for f in files:
                os.remove(f)

    def rewind(self):
        'remove previously written files'
        super(WeatheringOutput, self).rewind()
        self.clean_output_files()

    def __getstate__(self):
        '''
            This is to support pickle.dumps() inside the uncertainty model
            subprocesses.
            We need to be able to pickle our weathering outputters so that
            our uncertainty subprocesses can send them back to the parent
            process through a message queue.
            And the cache attribute (specifically, the ElementCache.lock
            attribute) can not be pickled, and instead produces a
            RuntimeError.

            (Note: The __setstate__() probably doesn't need to recreate the
                   ElementCache since it will be created inside the
                   Model.setup_model_run() function.)
        '''
        odict = self.__dict__.copy()  # copy the dict since we change it
        del odict['cache']  # remove cache entry

        return odict
Ejemplo n.º 10
0
class NaturalDispersion(Weatherer, Serializable):
    _state = copy.deepcopy(Weatherer._state)
    _state += [Field('water', save=True, update=True, save_reference=True),
               Field('waves', save=True, update=True, save_reference=True)]
    _schema = WeathererSchema

    def __init__(self,
                 waves=None,
                 water=None,
                 **kwargs):
        '''
        :param conditions: gnome.environment.Conditions object which contains
            things like water temperature
        :param waves: waves object for obtaining wave_height, etc at given time
        '''
        self.waves = waves
        self.water = water

        super(NaturalDispersion, self).__init__(**kwargs)
        self.array_types.update({'viscosity': viscosity,
                                 'mass':  mass,
                                 'density': density,
                                 'fay_area': fay_area,
                                 'frac_water': frac_water,
                                 'droplet_avg_size': droplet_avg_size,
                                 })

    def prepare_for_model_run(self, sc):
        '''
        add dispersion and sedimentation keys to mass_balance
        Assumes all spills have the same type of oil
        '''
        # create 'natural_dispersion' and 'sedimentation keys
        # if they doesn't exist
        # let's only define this the first time
        if self.on:
            super(NaturalDispersion, self).prepare_for_model_run(sc)
            sc.mass_balance['natural_dispersion'] = 0.0
            sc.mass_balance['sedimentation'] = 0.0

    def prepare_for_model_step(self, sc, time_step, model_time):
        '''
        Set/update arrays used by dispersion module for this timestep:

        '''
        super(NaturalDispersion, self).prepare_for_model_step(sc,
                                                              time_step,
                                                              model_time)

        if not self.active:
            return

    def weather_elements(self, sc, time_step, model_time):
        '''
        weather elements over time_step
        - sets 'natural_dispersion' and 'sedimentation' in sc.mass_balance
        '''
        if not self.active:
            return

        if sc.num_released == 0:
            return

        # from the waves module
        wave_height = self.waves.get_value(model_time)[0]
        frac_breaking_waves = self.waves.get_value(model_time)[2]
        disp_wave_energy = self.waves.get_value(model_time)[3]

        visc_w = self.waves.water.kinematic_viscosity
        rho_w = self.waves.water.density

        # web has different units
        sediment = self.waves.water.get('sediment', unit='kg/m^3')

        for substance, data in sc.itersubstancedata(self.array_types):
            if len(data['mass']) == 0:
                # substance does not contain any surface_weathering LEs
                continue

            V_entrain = constants.volume_entrained
            ka = constants.ka  # oil sticking term

            disp = np.zeros((len(data['mass'])), dtype=np.float64)
            sed = np.zeros((len(data['mass'])), dtype=np.float64)
            droplet_avg_size = data['droplet_avg_size']

            disperse_oil(time_step,
                         data['frac_water'],
                         data['mass'],
                         data['viscosity'],
                         data['density'],
                         data['fay_area'],
                         disp,
                         sed,
                         droplet_avg_size,
                         frac_breaking_waves,
                         disp_wave_energy,
                         wave_height,
                         visc_w,
                         rho_w,
                         sediment,
                         V_entrain,
                         ka)

            sc.mass_balance['natural_dispersion'] += np.sum(disp[:])

            if data['mass'].sum() > 0:
                disp_mass_frac = np.sum(disp[:]) / data['mass'].sum()
                if disp_mass_frac > 1:
                    disp_mass_frac = 1
            else:
                disp_mass_frac = 0

            data['mass_components'] = ((1 - disp_mass_frac) *
                                       data['mass_components'])
            data['mass'] = data['mass_components'].sum(1)

            sc.mass_balance['sedimentation'] += np.sum(sed[:])

            if data['mass'].sum() > 0:
                sed_mass_frac = np.sum(sed[:]) / data['mass'].sum()
                if sed_mass_frac > 1:
                    sed_mass_frac = 1
            else:
                sed_mass_frac = 0

            data['mass_components'] = ((1 - sed_mass_frac) *
                                       data['mass_components'])
            data['mass'] = data['mass_components'].sum(1)

            self.logger.debug('{0} Amount Dispersed for {1}: {2}'
                              .format(self._pid,
                                      substance.name,
                                      sc.mass_balance['natural_dispersion']))

        sc.update_from_fatedataview()

    def disperse_oil(self, time_step,
                     frac_water,
                     mass,
                     viscosity,
                     density,
                     fay_area,
                     disp_out,
                     sed_out,
                     frac_breaking_waves,
                     disp_wave_energy,
                     wave_height,
                     visc_w,
                     rho_w,
                     sediment,
                     V_entrain,
                     ka):
        '''
            Right now we just want to recreate what the lib_gnome dispersion
            function is doing...but in python.
            This will allow us to more easily refactor, and we can always
            then put it back into lib_gnome if necessary.
            (TODO: Not quite finished with the function yet.)
        '''
        D_e = disp_wave_energy
        f_bw = frac_breaking_waves
        H_rms = wave_height

        # dispersion term at current time.
        C_disp = D_e ** 0.57 * f_bw

        for i, (rho, mass, visc, Y, A) in enumerate(zip(density, mass,
                                                        viscosity, frac_water,
                                                        fay_area)):
            pass

    def serialize(self, json_='webapi'):
        """
        'water'/'waves' property is saved as references in save file
        """
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()
        serial = schema.serialize(toserial)

        if json_ == 'webapi':
            if self.waves:
                serial['waves'] = self.waves.serialize(json_)
            if self.water:
                serial['water'] = self.water.serialize(json_)

        return serial

    @classmethod
    def deserialize(cls, json_):
        """
        Append correct schema for water / waves
        """
        if not cls.is_sparse(json_):
            schema = cls._schema()
            dict_ = schema.deserialize(json_)

            if 'water' in json_:
                obj = json_['water']['obj_type']
                dict_['water'] = (eval(obj).deserialize(json_['water']))

            if 'waves' in json_:
                obj = json_['waves']['obj_type']
                dict_['waves'] = (eval(obj).deserialize(json_['waves']))

            return dict_
        else:
            return json_
Ejemplo n.º 11
0
class PyGrid(Serializable):

    _def_count = 0

    _state = copy.deepcopy(Serializable._state)
    _schema = PyGridSchema
    _state.add_field(
        [Field('filename', save=True, update=True, isdatafile=True)])

    def __new__(cls, *args, **kwargs):
        '''
        If you construct a PyGrid object directly, you will always
        get one of the child types based on your input
        '''
        if cls is not PyGrid_U and cls is not PyGrid_S:
            if 'faces' in kwargs:
                cls = PyGrid_U
            else:
                cls = PyGrid_S
#         cls.obj_type = c.obj_type
        return super(type(cls), cls).__new__(cls, *args, **kwargs)

    def __init__(self, filename=None, *args, **kwargs):
        '''
        Init common to all PyGrid types. This constructor will take all the kwargs of both
        pyugrid.UGrid and pysgrid.SGrid. See their documentation for details

        :param filename: Name of the file this grid was constructed from, if available.
        '''
        super(PyGrid, self).__init__(**kwargs)
        if 'name' in kwargs:
            self.name = kwargs['name']
        else:
            self.name = self.name + '_' + str(type(self)._def_count)
        self.obj_type = str(type(self).__bases__[0])
        self.filename = filename
        type(self)._def_count += 1

    @classmethod
    def load_grid(cls, filename, topology_var):
        '''
        Redirect to grid-specific loading routine.
        '''
        if hasattr(topology_var, 'face_node_connectivity') or isinstance(
                topology_var, dict) and 'faces' in topology_var.keys():
            cls = PyGrid_U
            return cls.from_ncfile(filename)
        else:
            cls = PyGrid_S
            return cls.load_grid(filename)
        pass

    @classmethod
    def from_netCDF(cls,
                    filename=None,
                    dataset=None,
                    grid_type=None,
                    grid_topology=None,
                    *args,
                    **kwargs):
        '''
        :param filename: File containing a grid
        :param dataset: Takes precedence over filename, if provided.
        :param grid_type: Must be provided if Dataset does not have a 'grid_type' attribute, or valid topology variable
        :param grid_topology: A dictionary mapping of grid attribute to variable name. Takes precendence over discovered attributes
        :param **kwargs: All kwargs to SGrid or UGrid are valid, and take precedence over all.
        :returns: Instance of PyGrid_U, PyGrid_S, or PyGrid_R
        '''
        gf = dataset if filename is None else _get_dataset(filename, dataset)
        if gf is None:
            raise ValueError('No filename or dataset provided')

        cls = PyGrid._get_grid_type(gf, grid_topology, grid_type)
        init_args, gf_vars = cls._find_required_grid_attrs(
            filename, dataset=dataset, grid_topology=grid_topology)
        return cls(**init_args)

    @classmethod
    def _find_required_grid_attrs(
        cls,
        filename,
        dataset=None,
        grid_topology=None,
    ):
        '''
        This function is the top level 'search for attributes' function. If there are any
        common attributes to all potential grid types, they will be sought here.

        This function returns a dict, which maps an attribute name to a netCDF4
        Variable or numpy array object extracted from the dataset. When called from
        PyGrid_U or PyGrid_S, this function should provide all the kwargs needed to
        create a valid instance.
        '''
        gf_vars = dataset.variables if dataset is not None else _get_dataset(
            filename).variables
        init_args = {}
        init_args['filename'] = filename
        node_attrs = ['node_lon', 'node_lat']
        node_coord_names = [['node_lon', 'node_lat'], ['lon', 'lat'],
                            ['lon_psi', 'lat_psi']]
        composite_node_names = ['nodes', 'node']
        if grid_topology is None:
            for n1, n2 in node_coord_names:
                if n1 in gf_vars and n2 in gf_vars:
                    init_args[node_attrs[0]] = gf_vars[n1][:]
                    init_args[node_attrs[1]] = gf_vars[n2][:]
                    break
            if node_attrs[0] not in init_args:
                for n in composite_node_names:
                    if n in gf_vars:
                        v = gf_vars[n][:].reshape(-1, 2)
                        init_args[node_attrs[0]] = v[:, 0]
                        init_args[node_attrs[1]] = v[:, 1]
                        break
            if node_attrs[0] not in init_args:
                raise ValueError('Unable to find node coordinates.')
        else:
            for n, v in grid_topology.items():
                if n in node_attrs:
                    init_args[n] = gf_vars[v][:]
                if n in composite_node_names:
                    v = gf_vars[n][:].reshape(-1, 2)
                    init_args[node_attrs[0]] = v[:, 0]
                    init_args[node_attrs[1]] = v[:, 1]
        return init_args, gf_vars

    @classmethod
    def new_from_dict(cls, dict_):
        dict_.pop('json_')
        filename = dict_['filename']
        rv = cls.from_netCDF(filename)
        rv.__class__._restore_attr_from_save(rv, dict_)
        rv._id = dict_.pop('id') if 'id' in dict_ else rv.id
        rv.__class__._def_count -= 1
        return rv

    @staticmethod
    def _get_grid_type(dataset, grid_topology=None, grid_type=None):
        sgrid_names = ['sgrid', 'pygrid_s', 'staggered', 'curvilinear', 'roms']
        ugrid_names = ['ugrid', 'pygrid_u', 'triangular', 'unstructured']
        if grid_type is not None:
            if grid_type.lower() in sgrid_names:
                return PyGrid_S
            elif grid_type.lower() in ugrid_names:
                return PyGrid_U
            else:
                raise ValueError(
                    'Specified grid_type not recognized/supported')
        if grid_topology is not None:
            if 'faces' in grid_topology.keys() or grid_topology.get(
                    'grid_type', 'notype').lower() in ugrid_names:
                return PyGrid_U
            else:
                return PyGrid_S
        else:
            # no topology, so search dataset for grid_type variable
            if hasattr(dataset, 'grid_type'
                       ) and dataset.grid_type in sgrid_names + ugrid_names:
                if dataset.grid_type.lower() in ugrid_names:
                    return PyGrid_U
                else:
                    return PyGrid_S
            else:
                # no grid type explicitly specified. is a topology variable present?
                topology = PyGrid._find_topology_var(None, dataset=dataset)
                if topology is not None:
                    if hasattr(topology, 'node_coordinates') and not hasattr(
                            topology, 'node_dimensions'):
                        return PyGrid_U
                    else:
                        return PyGrid_S
                else:
                    # no topology variable either, so generate and try again.
                    # if no defaults are found, _gen_topology will raise an error
                    try:
                        u_init_args, u_gf_vars = PyGrid_U._find_required_grid_attrs(
                            None, dataset)
                        return PyGrid_U
                    except ValueError:
                        s_init_args, s_gf_vars = PyGrid_S._find_required_grid_attrs(
                            None, dataset)
                        return PyGrid_S

    @staticmethod
    def _find_topology_var(filename, dataset=None):
        gf = _get_dataset(filename, dataset)
        gts = []
        for v in gf.variables:
            if hasattr(v, 'cf_role') and 'topology' in v.cf_role:
                gts.append(v)
#         gts = gf.get_variables_by_attributes(cf_role=lambda t: t is not None and 'topology' in t)
        if len(gts) != 0:
            return gts[0]
        else:
            return None

    @property
    def shape(self):
        return self.node_lon.shape

    def __eq__(self, o):
        if self is o:
            return True
        for n in ('nodes', 'faces'):
            if hasattr(self, n) and hasattr(o, n) and getattr(
                    self, n) is not None and getattr(o, n) is not None:
                s = getattr(self, n)
                s2 = getattr(o, n)
                if s.shape != s2.shape or np.any(s != s2):
                    return False
        return True

    def serialize(self, json_='webapi'):
        pass
        return Serializable.serialize(self, json_=json_)

    def _write_grid_to_file(self, pth):
        self.save_as_netcdf(pth)

    def save(self, saveloc, references=None, name=None):
        '''
        INCOMPLETE
        Write Wind timeseries to file or to zip,
        then call save method using super
        '''
        #         name = self.name
        #         saveloc = os.path.splitext(name)[0] + '_grid.GRD'

        if zipfile.is_zipfile(saveloc):
            if self.filename is None:
                self._write_grid_to_file(saveloc)
                self._write_grid_to_zip(saveloc, saveloc)
                self.filename = saveloc


#             else:
#                 self._write_grid_to_zip(saveloc, self.filename)
        else:
            if self.filename is None:
                self._write_grid_to_file(saveloc)
                self.filename = saveloc
        return super(PyGrid, self).save(saveloc, references, name)

    def draw_to_plot(self, plt, features=None, style=None):
        def_style = {
            'node': {
                'color': 'green',
                'linestyle': 'dashed',
                'marker': 'o'
            },
            'center': {
                'color': 'blue',
                'linestyle': 'solid'
            },
            'edge1': {
                'color': 'purple'
            },
            'edge2': {
                'color': 'olive'
            }
        }
        if features is None:
            features = ['node']
        if style is None:
            style = def_style
        for f in features:
            s = style['f']
            lon, lat = self._get_grid_attrs(f)
            plt.plot(lon, lat, *s)
            plt.plot(lon.T, lat.T, *s)
Ejemplo n.º 12
0
class Evaporation(Weatherer, Serializable):
    _state = copy.deepcopy(Weatherer._state)
    _state += [
        Field('water', save=True, update=True, save_reference=True),
        Field('wind', save=True, update=True, save_reference=True)
    ]
    _schema = WeathererSchema

    def __init__(self, water=None, wind=None, **kwargs):
        '''
        :param conditions: gnome.environment.Conditions object which contains
            things like water temperature
        :param wind: wind object for obtaining speed at specified time
        :type wind: Wind API, specifically must have get_value(time) method
        '''
        self.water = water
        self.wind = wind

        if water is not None and wind is not None:
            make_default_refs = False
        else:
            make_default_refs = True

        super(Evaporation, self).__init__(make_default_refs=make_default_refs,
                                          **kwargs)
        self.array_types.update({
            'area', 'evap_decay_constant', 'frac_water', 'frac_lost',
            'init_mass'
        })

    def prepare_for_model_run(self, sc):
        '''
        add evaporated key to mass_balance
        for now also add 'density' key here
        Assumes all spills have the same type of oil
        '''
        # create 'evaporated' key if it doesn't exist
        # let's only define this the first time
        if self.on:
            super(Evaporation, self).prepare_for_model_run(sc)

            sc.mass_balance['evaporated'] = 0.0
            msg = ("{0._pid} init 'evaporated' key to 0.0").format(self)
            self.logger.debug(msg)

    def _mass_transport_coeff(self, model_time):
        '''
        Is wind a function of only model_time? How about time_step?
        at present yes since wind only contains timeseries data

            K = c * U ** 0.78 if U <= 10 m/s
            K = 0.06 * c * U ** 2 if U > 10 m/s

        If K is expressed in m/sec, then Buchanan and Hurford set c = 0.0025
        U is wind_speed 10m above the surface

        .. note:: wind speed is at least 1 m/s.
        '''
        wind_speed = max(1, self.wind.get_value(model_time)[0])
        c_evap = 0.0025  # if wind_speed in m/s
        if wind_speed <= 10.0:
            return c_evap * wind_speed**0.78
        else:
            return 0.06 * c_evap * wind_speed**2

    def _set_evap_decay_constant(self, model_time, data, substance, time_step):
        # used to compute the evaporation decay constant
        K = self._mass_transport_coeff(model_time)
        water_temp = self.water.get('temperature', 'K')

        f_diff = 1.0
        if 'frac_water' in data:
            # frac_water content in emulsion will be a per element but is
            # currently not being set by anything. Fix once we initialize
            # and properly set frac_water
            f_diff = (1.0 - data['frac_water'])

        vp = substance.vapor_pressure(water_temp)

        #mw = substance.molecular_weight
        # evaporation expects mw in kg/mol, database is in g/mol
        mw = substance.molecular_weight / 1000.

        sum_mi_mw = (data['mass_components'][:, :len(vp)] / mw).sum(axis=1)
        # d_numer = -1/rho * f_diff.reshape(-1, 1) * K * vp
        # d_denom = (data['thickness'] * constants.gas_constant *
        #            water_temp * sum_frac_mw).reshape(-1, 1)
        # data['evap_decay_constant'][:, :len(vp)] = d_numer/d_denom
        #
        # Do computation together so we don't need to make intermediate copies
        # of data - left sum_frac_mw, which is a copy but easier to
        # read/understand
        data['evap_decay_constant'][:, :len(vp)] = \
            ((-data['area'] * f_diff * K /
              (constants.gas_constant * water_temp * sum_mi_mw)).reshape(-1, 1)
             * vp)

        self.logger.debug(self._pid + 'max decay: {0}, min decay: {1}'.format(
            np.max(data['evap_decay_constant']),
            np.min(data['evap_decay_constant'])))
        if np.any(data['evap_decay_constant'] > 0.0):
            raise ValueError("Error in Evaporation routine. One of the"
                             " exponential decay constant is positive")

    def weather_elements(self, sc, time_step, model_time):
        '''
        weather elements over time_step

        - sets 'evaporation' in sc.mass_balance
        - currently also sets 'density' in sc.mass_balance but may update
          this as we add more weatherers and perhaps density gets set elsewhere

        Following diff eq models rate of change each pseudocomponent of oil::

            dm(t)/dt = -(1 - fw) * A/B * m(t)

        Over a time-step, A, B, C are assumed constant. m(t) is the component
        mass at beginning of timestep; m(t + Dt) is mass at end of timestep::

            m(t + Dt) = m(t) * exp(-L * Dt)
            L := (1 - fw) * A/B

        Define properties for each pseudocomponent of oil and constants::

            vp: vapor pressure
            mw: molecular weight

        The following quantities are defined for a given blob of oil. The
        thickness of the blob is same for all LEs regardless of how many LEs
        are used to model the blob::

            area: area computed from fay spreading
            m_i: mass of component 'i'
            sum_m_mw: sum(m_i/mw_i) over all components

        effect of wind - mass transport coefficient::

            K: See _mass_transport_coeff()

        Finally, Evaporation of component 'i' for blob of oil::

            A = area * K * vp
            B = gas_constant * water_temp * sum_m_mw

        L becomes::
            L = (1 - fw) * area * K * vp/(gas_constant * water_temp * sum_m_mw)
        '''
        if not self.active:
            return
        if sc.num_released == 0:
            return

        for substance, data in sc.itersubstancedata(self.array_types):
            if len(data['mass']) is 0:
                continue

            # set evap_decay_constant array
            self._set_evap_decay_constant(model_time, data, substance,
                                          time_step)
            mass_remain = self._exp_decay(data['mass_components'],
                                          data['evap_decay_constant'],
                                          time_step)

            sc.mass_balance['evaporated'] += \
                np.sum(data['mass_components'][:, :] - mass_remain[:, :])

            # log amount evaporated at each step
            self.logger.debug(self._pid +
                              'amount evaporated for {0}: {1}'.format(
                                  substance.name,
                                  np.sum(data['mass_components'][:, :] -
                                         mass_remain[:, :])))

            data['mass_components'][:] = mass_remain
            data['mass'][:] = data['mass_components'].sum(1)

            # add frac_lost
            data['frac_lost'][:] = 1 - data['mass'] / data['init_mass']
        sc.update_from_fatedataview()

    def serialize(self, json_='webapi'):
        """
        Since 'wind'/'water' property is saved as references in save file
        need to add appropriate node to WindMover schema for 'webapi'
        """
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()

        if json_ == 'webapi':
            if self.wind:
                schema.add(WindSchema(name='wind'))
            if self.water:
                schema.add(WaterSchema(name='water'))

        return schema.serialize(toserial)

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for wind object
        """
        schema = cls._schema()

        if 'wind' in json_:
            schema.add(WindSchema(name='wind'))

        if 'water' in json_:
            schema.add(WaterSchema(name='water'))

        return schema.deserialize(json_)
Ejemplo n.º 13
0
class MapFromBNA(RasterMap):
    """
    A raster land-water map, created from a BNA file
    """
    _state = copy.deepcopy(RasterMap._state)
    _state.update(['map_bounds', 'spillable_area'], save=False)
    _state.add(save=['refloat_halflife'], update=['refloat_halflife'])
    _state.add_field(
        Field('filename',
              isdatafile=True,
              save=True,
              read=True,
              test_for_eq=False))
    _schema = MapFromBNASchema

    def __init__(self, filename, raster_size=1024 * 1024, **kwargs):
        """
        Creates a GnomeMap (specifically a RasterMap) from a bna file.
        It is expected that you will get the spillable area and map bounds
        from the BNA -- if they exist

        Required arguments:

        :param bna_file: full path to a bna file
        :param refloat_halflife: the half-life (in hours) for the re-floating.
        :param raster_size: the total number of pixels (bytes) to make the
                            raster -- the actual size will match the
                            aspect ratio of the bounding box of the land

        Optional arguments (kwargs):

        :param map_bounds: The polygon bounding the map -- could be larger or
                           smaller than the land raster
        :param spillable_area: The polygon bounding the spillable_area
        :param id: unique ID of the object. Using UUID as a string.
                   This is only used when loading object from save file.
        :type id: string
        """
        self.filename = filename
        polygons = haz_files.ReadBNA(filename, 'PolygonSet')
        map_bounds = None
        self.name = kwargs.pop('name', os.path.split(filename)[1])

        # find the spillable area and map bounds:
        # and create a new polygonset without them
        #  fixme -- adding a "pop" method to PolygonSet might be better
        #      or a gnome_map_data object...

        just_land = PolygonSet()  # and lakes....
        spillable_area = PolygonSet()

        for p in polygons:
            if p.metadata[1].lower() == 'spillablearea':
                spillable_area.append(p)

            elif p.metadata[1].lower() == 'map bounds':
                map_bounds = p
            else:
                just_land.append(p)

        # now draw the raster map with a map_canvas:
        # determine the size:

        BB = just_land.bounding_box

        # create spillable area and  bounds if they weren't in the BNA
        if map_bounds is None:
            map_bounds = BB.AsPoly()

        if len(spillable_area) == 0:
            spillable_area.append(map_bounds)

        # user defined spillable_area, map_bounds overrides data obtained
        # from polygons

        # todo: should there be a check between spillable_area read from BNA
        # versus what the user entered. if this is within spillable_area for
        # BNA, then include it? else ignore
        #spillable_area = kwargs.pop('spillable_area', spillable_area)
        spillable_area = kwargs.pop('spillable_area', spillable_area)
        map_bounds = kwargs.pop('map_bounds', map_bounds)

        # stretch the bounding box, to get approximate aspect ratio in
        # projected coords.

        aspect_ratio = (np.cos(BB.Center[1] * np.pi / 180) *
                        (BB.Width / BB.Height))
        w = int(np.sqrt(raster_size * aspect_ratio))
        h = int(raster_size / w)

        canvas = BW_MapCanvas((w, h), land_polygons=just_land)
        canvas.draw_background()

        # canvas.save_background("raster_map_test.png")

        # # get the bitmap as a numpy array:

        bitmap_array = canvas.as_array()

        # __init__ the  RasterMap

        # hours
        RasterMap.__init__(self,
                           bitmap_array,
                           canvas.projection,
                           map_bounds=map_bounds,
                           spillable_area=spillable_area,
                           **kwargs)

        return None
Ejemplo n.º 14
0
class CatsMover(CurrentMoversBase, Serializable):

    _state = copy.deepcopy(CurrentMoversBase._state)

    _update = ['scale', 'scale_refpoint', 'scale_value',
               'up_cur_uncertain', 'down_cur_uncertain',
               'right_cur_uncertain', 'left_cur_uncertain',
               'uncertain_eddy_diffusion', 'uncertain_eddy_v0']
    _create = []
    _create.extend(_update)
    _state.add(update=_update, save=_create)
    _state.add_field([Field('filename', save=True, read=True, isdatafile=True,
                            test_for_eq=False),
                      Field('tide', save=True, update=True,
                            save_reference=True)])

    _schema = CatsMoverSchema

    def __init__(self, filename, tide=None, uncertain_duration=48,
                 **kwargs):
        """
        Uses super to invoke base class __init__ method.

        :param filename: file containing currents patterns for Cats

        Optional parameters (kwargs).
        Defaults are defined by CyCatsMover object.

        :param tide: a gnome.environment.Tide object to be attached to
                     CatsMover
        :param scale: a boolean to indicate whether to scale value at
                      reference point or not
        :param scale_value: value used for scaling at reference point
        :param scale_refpoint: reference location (long, lat, z). The scaling
                               applied to all data is determined by scaling
                               the raw value at this location.

        :param uncertain_duration: how often does a given uncertain element
                                   gets reset
        :param uncertain_time_delay: when does the uncertainly kick in.
        :param up_cur_uncertain: Scale for uncertainty along the flow
        :param down_cur_uncertain: Scale for uncertainty along the flow
        :param right_cur_uncertain: Scale for uncertainty across the flow
        :param left_cur_uncertain: Scale for uncertainty across the flow
        :param uncertain_eddy_diffusion: Diffusion coefficient for
                                         eddy diffusion. Default is 0.
        :param uncertain_eddy_v0: Default is .1 (Check that this is still used)
        Remaining kwargs are passed onto Mover's __init__ using super.
        See Mover documentation for remaining valid kwargs.
        """
        if not os.path.exists(filename):
            raise ValueError('Path for Cats filename does not exist: {0}'
                             .format(filename))

        self._filename = filename

        # check if this is stored with cy_cats_mover?
        self.mover = CyCatsMover()
        self.mover.text_read(filename)
        self.name = os.path.split(filename)[1]

        self._tide = None
        if tide is not None:
            self.tide = tide

        self.scale = kwargs.pop('scale', self.mover.scale_type)
        self.scale_value = kwargs.get('scale_value',
                                      self.mover.scale_value)

        self.up_cur_uncertain = kwargs.pop('up_cur_uncertain', .3)
        self.down_cur_uncertain = kwargs.pop('down_cur_uncertain', -.3)
        self.right_cur_uncertain = kwargs.pop('right_cur_uncertain', .1)
        self.left_cur_uncertain = kwargs.pop('left_cur_uncertain', -.1)
        self.uncertain_eddy_diffusion = kwargs.pop('uncertain_eddy_diffusion',
                                                   0)
        self.uncertain_eddy_v0 = kwargs.pop('uncertain_eddy_v0', .1)
        # TODO: no need to check for None since properties that are None
        # are not persisted

        if 'scale_refpoint' in kwargs:
            self.scale_refpoint = kwargs.pop('scale_refpoint')
            self.mover.compute_velocity_scale()

        if (self.scale and
            self.scale_value != 0.0 and
                self.scale_refpoint is None):
            raise TypeError("Provide a reference point in 'scale_refpoint'.")

        super(CatsMover, self).__init__(uncertain_duration, **kwargs)

    def __repr__(self):
        return 'CatsMover(filename={0})'.format(self.filename)

    # Properties
    filename = property(lambda self: basename(self._filename),
                        lambda self, val: setattr(self, '_filename', val))

    scale = property(lambda self: bool(self.mover.scale_type),
                     lambda self, val: setattr(self.mover,
                                               'scale_type',
                                               int(val)))

    scale_value = property(lambda self: self.mover.scale_value,
                           lambda self, val: setattr(self.mover,
                                                     'scale_value',
                                                     val))

    up_cur_uncertain = property(lambda self: self.mover.up_cur_uncertain,
                                lambda self, val: setattr(self.mover,
                                                          'up_cur_uncertain',
                                                          val))

    down_cur_uncertain = property(lambda self: self.mover.down_cur_uncertain,
                                  lambda self, val:
                                  setattr(self.mover, 'down_cur_uncertain',
                                          val))

    right_cur_uncertain = property(lambda self: self.mover.right_cur_uncertain,
                                   lambda self, val:
                                   setattr(self.mover, 'right_cur_uncertain',
                                           val))

    left_cur_uncertain = property(lambda self: self.mover.left_cur_uncertain,
                                  lambda self, val:
                                  setattr(self.mover, 'left_cur_uncertain',
                                          val))

    uncertain_eddy_diffusion = property(lambda self:
                                        self.mover.uncertain_eddy_diffusion,
                                        lambda self, val:
                                        setattr(self.mover,
                                                'uncertain_eddy_diffusion',
                                                val))

    uncertain_eddy_v0 = property(lambda self: self.mover.uncertain_eddy_v0,
                                 lambda self, val: setattr(self.mover,
                                                           'uncertain_eddy_v0',
                                                           val))

    @property
    def ref_scale(self):
        return self.mover.ref_scale

    @property
    def scale_refpoint(self):
        return self.mover.ref_point

    @scale_refpoint.setter
    def scale_refpoint(self, val):
        '''
        Must be a tuple of length 2 or 3: (long, lat, z). If only (long, lat)
        is given, the set z = 0
        '''
        if len(val) == 2:
            self.mover.ref_point = (val[0], val[1], 0.)
        else:
            self.mover.ref_point = val

        self.mover.compute_velocity_scale()

    @property
    def tide(self):
        return self._tide

    @tide.setter
    def tide(self, tide_obj):
        if not isinstance(tide_obj, Tide):
            raise TypeError('tide must be of type environment.Tide')

        if isinstance(tide_obj.cy_obj, CyShioTime):
            self.mover.set_shio(tide_obj.cy_obj)
        elif isinstance(tide_obj.cy_obj, CyOSSMTime):
            self.mover.set_ossm(tide_obj.cy_obj)
        else:
            raise TypeError('Tide.cy_obj attribute must be either '
                            'CyOSSMTime or CyShioTime type for CatsMover.')

        self._tide = tide_obj

    def get_grid_data(self):
        """
            Invokes the GetToplogyHdl method of TriGridVel_c object
        """
        # we are assuming cats are always triangle grids,
        # but may want to extend
        return self.get_triangles()

    def get_center_points(self):
        return self.get_triangle_center_points()

    def get_scaled_velocities(self, model_time):
        """
        Get file values scaled to ref pt value, with tide applied (if any)
        """
        velocities = self.mover._get_velocity_handle()
        ref_scale = self.ref_scale  # this needs to be computed, needs a time

        if self._tide is not None:
            time_value = self._tide.cy_obj.get_time_value(model_time)
            tide = time_value[0][0]
        else:
            tide = 1

        velocities['u'] *= ref_scale * tide
        velocities['v'] *= ref_scale * tide

        return velocities

    def serialize(self, json_='webapi'):
        """
        Since 'wind' property is saved as a reference when used in save file
        and 'save' option, need to add appropriate node to WindMover schema
        """
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()

        if json_ == 'save':
            toserial['filename'] = self._filename

        if 'tide' in toserial:
            schema.add(TideSchema(name='tide'))

        return schema.serialize(toserial)

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for wind object
        """
        if not cls.is_sparse(json_):
            schema = cls._schema()

            if 'tide' in json_:
                schema.add(TideSchema())

            return schema.deserialize(json_)
        else:
            return json_
Ejemplo n.º 15
0
class Renderer(Outputter, MapCanvas):
    """
    Map Renderer

    class that writes map images for GNOME results.

    Writes the frames for the LE "movies", etc.
    """
    # This defines the colors used for the map
    #   -- they can then be referenced by name in the rest of the code.
    map_colors = [('background', (255, 255, 255)),  # white
                  ('lake', (255, 255, 255)),  # white
                  ('land', (255, 204, 153)),  # brown
                  ('LE', (0, 0, 0)),  # black
                  ('uncert_LE', (255, 0, 0)),  # red
                  ('map_bounds', (175, 175, 175)),  # grey
                  ('spillable_area', (255, 0, 0)),  # red
                  ('raster_map', (51, 102, 0)),  # dark green
                  ('raster_map_outline', (0, 0, 0)),  # black
                  ('grid_1', (51, 78, 0)),
                  ('grid_2', (175, 175, 175)),
                  ]

    background_map_name = 'background_map.png'
    foreground_filename_format = 'foreground_{0:05d}.png'
    foreground_filename_glob = 'foreground_?????.png'

    # Serialization info:
    _update = ['viewport', 'map_BB', 'image_size', 'draw_ontop']
    _create = ['image_size', 'projection', 'draw_ontop']

    _create.extend(_update)
    _state = copy.deepcopy(Outputter._state)
    _state.add(save=_create, update=_update)
    _state.add_field(Field('map_filename',
                           isdatafile=True,
                           save=True,
                           read=True,
                           test_for_eq=False))
    _state.add_field(Field('output_dir', save=True, update=True,
                           test_for_eq=False))
    _schema = RendererSchema

    @classmethod
    def new_from_dict(cls, dict_):
        """
        change projection_type from string to correct type for loading from
        save file
        """
        if 'projection' in dict_:
            # todo:
            # The 'projection' isn't stored as a nested object - should
            # revisit this and see if we can make it consistent with nested
            # objects ... but this works!
            # creates an instance of the projection class
            proj_inst = class_from_objtype(dict_.pop('projection'))()
            # then creates the object
            obj = cls(projection=proj_inst, **dict_)
        else:
            obj = super(Renderer, cls).new_from_dict(dict_)
        return obj

    def __init__(self,
                 map_filename=None,
                 output_dir='./',
                 image_size=(800, 600),
                 projection=None,
                 viewport=None,
                 map_BB=None,
                 land_polygons=None,
                 draw_back_to_fore=True,
                 draw_map_bounds=False,
                 draw_spillable_area=False,
                 cache=None,
                 output_timestep=None,
                 output_zero_step=True,
                 output_last_step=True,
                 output_start_time=None,
                 draw_ontop='forecast',
                 name=None,
                 on=True,
                 formats=['png', 'gif'],
                 timestamp_attrib={},
                 **kwargs
                 ):
        """
        Init the image renderer.

        :param str map_filename=None: name of file for basemap (BNA)
        :type map_filename: str

        :param str output_dir='./': directory to output the images

        :param 2-tuple image_size=(800, 600): size of images to output

        :param projection=None: projection instance to use:
                                if None, set to projections.FlatEarthProjection()
        :type projection: a gnome.utilities.projection.Projection instance

        :param viewport: viewport of map -- what gets drawn and on what scale.
                         Default is full globe: (((-180, -90), (180, 90)))
        :type viewport: pair of (lon, lat) tuples ( lower_left, upper right )

        :param map_BB=None: bounding box of map if None, it will use the
                            bounding box of the mapfile.

        :param draw_back_to_fore=True: draw the background (map) to the
                                       foregound image when outputting
                                       the images each time step.
        :type draw_back_to_fore: boolean

        Following args are passed to base class Outputter's init:

        :param cache: sets the cache object from which to read prop. The model
            will automatically set this param

        :param output_timestep: default is None in which case everytime the
            write_output is called, output is written. If set, then output is
            written every output_timestep starting from model_start_time.
        :type output_timestep: timedelta object

        :param output_zero_step: default is True. If True then output for
            initial step (showing initial release conditions) is written
            regardless of output_timestep
        :type output_zero_step: boolean

        :param output_last_step: default is True. If True then output for
            final step is written regardless of output_timestep
        :type output_last_step: boolean

        :param draw_ontop: draw 'forecast' or 'uncertain' LEs on top. Default
            is to draw 'forecast' LEs, which are in black on top
        :type draw_ontop: str

        :param formats: list of formats to output.
                        Default is .png and animated .gif
        :type formats: list of strings


        Remaining kwargs are passed onto baseclass's __init__ with a direct
        call: Outputter.__init__(..)

        """
        projection = (projections.FlatEarthProjection()
                      if projection is None
                      else projection)
        # set up the canvas
        self.map_filename = map_filename
        self.output_dir = output_dir

        if map_filename is not None and land_polygons is None:
            self.land_polygons = haz_files.ReadBNA(map_filename, 'PolygonSet')
        elif land_polygons is not None:
            self.land_polygons = land_polygons
        else:
            self.land_polygons = []  # empty list so we can loop thru it

        self.last_filename = ''
        self.draw_ontop = draw_ontop
        self.draw_back_to_fore = draw_back_to_fore

        Outputter.__init__(self,
                           cache,
                           on,
                           output_timestep,
                           output_zero_step,
                           output_last_step,
                           output_start_time,
                           name,
                           output_dir
                           )

        if map_BB is None:
            if not self.land_polygons:
                map_BB = ((-180, -90), (180, 90))
            else:
                map_BB = self.land_polygons.bounding_box
        self.map_BB = map_BB

        MapCanvas.__init__(self,
                           image_size,
                           projection=projection,
                           viewport=self.map_BB)

        # assorted rendering flags:
        self.draw_map_bounds = draw_map_bounds
        self.draw_spillable_area = draw_spillable_area
        self.raster_map = None
        self.raster_map_fill = True
        self.raster_map_outline = False

        # initilize the images:
        self.add_colors(self.map_colors)
        self.background_color = 'background'

        if self.map_filename is not None:
            file_prefix = os.path.splitext(self.map_filename)[0]
            sep = '_'
        else:
            file_prefix = sep = ''
        fn = '{}{}anim.gif'.format(file_prefix, sep)
        self.anim_filename = os.path.join(output_dir, fn)

        self.formats = formats
        self.delay = 50
        self.repeat = True
        self.timestamp_attribs = {}
        self.set_timestamp_attrib(**timestamp_attrib)
        self.grids = []
        self.props = []

    @property
    def delay(self):
        return self._delay if 'gif' in self.formats else -1

    @delay.setter
    def delay(self, d):
        self._delay = d

    @property
    def repeat(self):
        return self._repeat if 'gif' in self.formats else False

    @repeat.setter
    def repeat(self, r):
        self._repeat = r

    @property
    def map_filename(self):
        return basename(self._filename) if self._filename is not None else None

    @map_filename.setter
    def map_filename(self, name):
        self._filename = name

    @property
    def draw_ontop(self):
        return self._draw_ontop

    @draw_ontop.setter
    def draw_ontop(self, val):
        if val not in ['forecast', 'uncertain']:
            raise ValueError("'draw_ontop' must be either 'forecast' or"
                             "'uncertain'. {0} is invalid.".format(val))
        self._draw_ontop = val

    def output_dir_to_dict(self):
        return os.path.abspath(self.output_dir)

    def start_animation(self, filename):
        self.animation = py_gd.Animation(filename, self.delay)
        l = 0 if self.repeat else -1
        print 'Starting animation'
        self.animation.begin_anim(self.back_image, l)

    def prepare_for_model_run(self, *args, **kwargs):
        """
        prepares the renderer for a model run.

        Parameters passed to base class (use super): model_start_time, cache

        Does not take any other input arguments; however, to keep the interface
        the same for all outputters, define ``**kwargs`` and pass into the
        base class

        In this case, it draws the background image and clears the previous
        images. If you want to save the previous images, a new output dir
        should be set.
        """
        super(Renderer, self).prepare_for_model_run(*args, **kwargs)

        self.clean_output_files()

        self.draw_background()
        for ftype in self.formats:
            if ftype == 'gif':
                self.start_animation(self.anim_filename)
            else:
                self.save_background(os.path.join(self.output_dir,
                                                  self.background_map_name),
                                     file_type=ftype)

    def set_timestamp_attrib(self, **kwargs):
        """
        Function to set details of the timestamp's appearance when printed.
        These details are stored as a dict.

        Recognized attributes:

        :param on: Turn the draw function on or off
        :type on: Boolean

        :param dt_format: Format string for strftime to format the timestamp
        :type dt_format: String

        :param background: Color of the text background.
                           Color must be present in foreground palette

        :type background: str

        :param color: Color of the font. Note that the color must be present
                      in the foreground palette

        :type color: str

        :param size: Size of the font, one of 'tiny', 'small', 'medium', 'large', 'giant'

        :type size: str

        :param position: x, y pixel coordinates of where to draw the timestamp.

        :type position :tuple

        :param align: The reference point of the text bounding box.
                      One of: 'lt'(left top), 'ct', 'rt','l', 'r','rb', 'cb', 'lb'

        :type align: str

        """
        self.timestamp_attribs.update(kwargs)


    def draw_timestamp(self, time):
        """
        Function that draws the timestamp to the foreground.
        Uses self.timestamp_attribs to determine it's appearance.

        :param time: the datetime object representing the timestamp
        :type time: datetime
        """
        d = self.timestamp_attribs
        on = d['on'] if 'on' in d else True
        if not on:
            return
        dt_format = d['format'] if 'format' in d else '%c'
        background = d['background'] if 'background' in d else 'white'
        color = d['color'] if 'color' in d else 'black'
        size = d['size'] if 'size' in d else 'small'
        position = d['position'] if 'position' in d else (
            self.fore_image.width / 2, self.fore_image.height)
        align = d['alignment'] if 'alignment' in d else 'cb'

        self.fore_image.draw_text(
            time.strftime(dt_format), position, size, color, align, background)

    def clean_output_files(self):

        # clear out the output dir:
        try:
            os.remove(os.path.join(self.output_dir,
                                   self.background_map_name))
        except OSError:
            # it's not there to delete..
            pass

        try:
            os.remove(self.anim_filename)
        except OSError:
            # it's not there to delete..
            pass

        for name in glob.glob(os.path.join(self.output_dir,
                                           self.foreground_filename_glob)):
            os.remove(name)

    def draw_background(self):
        """
        Draws the background image -- just land for now

        This should be called whenever the scale changes
        """
        # create a new background image
        self.clear_background()
        self.draw_land()
        if self.raster_map is not None:
            self.draw_raster_map()
        self.draw_graticule()
        self.draw_tags()
        self.draw_grids()

    def add_grid(self, grid,
                 on=True,
                 color='grid_1',
                 width=2):
        layer = GridVisLayer(grid, self.projection, on, color, width)
        self.grids.append(layer)

    def draw_grids(self):
        for grid in self.grids:
            grid.draw_to_image(self.back_image)

    def add_vec_prop(self,
                     prop,
                     on=True,
                     color='LE',
                     mask_color='uncert_LE',
                     size=3,
                     width=1,
                     scale=1000):
        layer = GridPropVisLayer(prop, self.projection, on, color, mask_color, size, width, scale)
        self.props.append(layer)

    def draw_props(self, time):
        for prop in self.props:
            prop.draw_to_image(self.fore_image, time)

    def draw_masked_nodes(self, grid, time):
        if grid.appearance['on'] and grid.appearance['mask'] is not None:
            var = grid.appearance['mask']
            masked_nodes = grid.masked_nodes(time, var)
            dia = grid.appearance['n_size']
            unmasked_nodes = np.ascontiguousarray(
                masked_nodes.compressed().reshape(-1, 2))
            self.draw_points(unmasked_nodes, dia, 'black')
            masked = np.ascontiguousarray(
                masked_nodes[masked_nodes.mask].prop.reshape(-1, 2))
            self.draw_points(masked, dia, 'uncert_LE')
#             for i in range(0, grid.nodes.shape[0]):
#                 if masked_nodes.mask[i, 0] and masked_nodes.mask[i, 1]:
#                     self.draw_points(
#                         grid.nodes[i], diameter=dia, color='uncert_LE')
#                 else:
#                     self.draw_points(
#                         grid.nodes[i], diameter=dia, color='black')

    def draw_land(self):
        """
        Draws the land map to the internal background image.
        """
        for poly in self.land_polygons:
            metadata = poly.metadata

            if metadata[1].strip().lower() == 'map bounds':
                if self.draw_map_bounds:
                    self.draw_polygon(poly,
                                      line_color='map_bounds',
                                      fill_color=None,
                                      line_width=2,
                                      background=True)
            elif metadata[1].strip().lower().replace(' ', '') == 'spillablearea':
                if self.draw_spillable_area:
                    self.draw_polygon(poly,
                                      line_color='spillable_area',
                                      fill_color=None,
                                      line_width=2,
                                      background=True)
            elif metadata[2] == '2':
                # this is a lake
                self.draw_polygon(poly, fill_color='lake', background=True)
            else:
                self.draw_polygon(poly,
                                  fill_color='land', background=True)

        return None

    def draw_elements(self, sc):
        """
        Draws the individual elements to a foreground image

        :param sc: a SpillContainer object to draw

        """
        # TODO: add checks for the other status flags!

        if sc.num_released > 0:  # nothing to draw if no elements
            if sc.uncertain:
                color = 'uncert_LE'
            else:
                color = 'LE'

            positions = sc['positions']

            # which ones are on land?
            on_land = sc['status_codes'] == oil_status.on_land
            self.draw_points(positions[on_land],
                             diameter=2,
                             color='black',
                             # color=color,
                             shape="x")
            # draw the four pixels for the elements not on land and
            # not off the map
            self.draw_points(positions[~on_land],
                             diameter=2,
                             color=color,
                             shape="round")

    def draw_raster_map(self):
        """
        draws the raster map used for beaching to the image.

        draws a grid for the pixels

        this is pretty slow, but only used for diagnostics.
        (not bad for just the lines)
        """
        if self.raster_map is not None:
            raster_map = self.raster_map
            projection = raster_map.projection
            w, h = raster_map.basebitmap.shape

            if self.raster_map_outline:
                # vertical lines
                for i in range(w):
                    coords = projection.to_lonlat(np.array(((i, 0.0),
                                                            (i, h)),
                                                           dtype=np.float64))
                    self.draw_polyline(coords, background=True,
                                       line_color='raster_map_outline')
                # horizontal lines
                for i in range(h):
                    coords = projection.to_lonlat(np.array(((0.0, i),
                                                            (w, i)),
                                                           dtype=np.float64))
                    self.draw_polyline(coords, background=True,
                                       line_color='raster_map_outline')

            if self.raster_map_fill:
                for i in range(w):
                    for j in range(h):
                        if raster_map.basebitmap[i, j] == 1:
                            rect = projection.to_lonlat(np.array(((i, j),
                                                                  (i + 1, j),
                                                                  (i + 1, j + 1),
                                                                  (i, j + 1)),
                                                                 dtype=np.float64))
                            self.draw_polygon(rect, fill_color='raster_map',
                                              background=True)

    def write_output(self, step_num, islast_step=False):
        """
        Render the map image, according to current parameters.

        :param step_num: the model step number you want rendered.
        :type step_num: int

        :param islast_step: default is False. Flag that indicates that step_num
            is last step. If 'output_last_step' is True then this is written
            out
        :type islast_step: bool

        :returns: A dict of info about this step number if this step
            is to be output, None otherwise.
            'step_num': step_num
            'image_filename': filename
            'time_stamp': time_stamp # as ISO string

        use super to call base class write_output method

        If this is last step, then prop is written; otherwise
        prepare_for_model_step determines whether to write the output for
        this step based on output_timestep
        """

        super(Renderer, self).write_output(step_num, islast_step)

        if not self._write_step:
            return None

        image_filename = os.path.join(self.output_dir,
                                      self.foreground_filename_format
                                      .format(step_num))

        self.clear_foreground()
        if self.draw_back_to_fore:
            self.copy_back_to_fore()

        # draw prop for self.draw_ontop second so it draws on top
        scp = self.cache.load_timestep(step_num).items()
        if len(scp) == 1:
            self.draw_elements(scp[0])
        else:
            if self.draw_ontop == 'forecast':
                self.draw_elements(scp[1])
                self.draw_elements(scp[0])
            else:
                self.draw_elements(scp[0])
                self.draw_elements(scp[1])

        time_stamp = scp[0].current_time_stamp
        self.draw_timestamp(time_stamp)
        self.draw_props(time_stamp)

        for ftype in self.formats:
            if ftype == 'gif':
                self.animation.add_frame(self.fore_image, self.delay)
            else:
                self.save_foreground(image_filename, file_type=ftype)
        self.last_filename = image_filename

        return {'image_filename': image_filename,
                'time_stamp': time_stamp}

    def write_output_post_run(self, **kwargs):
        super(Renderer, **kwargs)
        if '.gif' in self.formats:
            self.animation.close_anim()

    def _draw(self, step_num):
        """
        create a small function so prop arrays are garbage collected from
        memory after this function exits - it returns current_time_stamp
        """

        # draw prop for self.draw_ontop second so it draws on top
        scp = self.cache.load_timestep(step_num).items()
        if len(scp) == 1:
            self.draw_elements(scp[0])
        else:
            if self.draw_ontop == 'forecast':
                self.draw_elements(scp[1])
                self.draw_elements(scp[0])
            else:
                self.draw_elements(scp[0])
                self.draw_elements(scp[1])

        return scp[0].current_time_stamp

    def projection_to_dict(self):
        """
        store projection class as a string for now since that is all that
        is required for persisting
        todo: This may not be the case for all projection classes, but keep
        simple for now so we don't have to make the projection classes
        serializable
        """
        return '{0}.{1}'.format(self.projection.__module__,
                                self.projection.__class__.__name__)

    def serialize(self, json_='webapi'):
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()

        if json_ == 'save':
            toserial['map_filename'] = self._filename

        return schema.serialize(toserial)

    def save(self, saveloc, references=None, name=None):
        '''
        update the 'output_dir' key in the json to point to directory
        inside saveloc, then save the json - do not copy image files or
        image directory over
        '''
        json_ = self.serialize('save')
        out_dir = os.path.split(json_['output_dir'])[1]
        # store output_dir relative to saveloc
        json_['output_dir'] = os.path.join('./', out_dir)

        return self._json_to_saveloc(json_, saveloc, references, name)

    @classmethod
    def loads(cls, json_data, saveloc, references=None):
        '''
        loads object from json_data

        prepend saveloc path to 'output_dir' and create output_dir in saveloc,
        then call super to load object
        '''
        if zipfile.is_zipfile(saveloc):
            saveloc = os.path.split(saveloc)[0]

        os.mkdir(os.path.join(saveloc, json_data['output_dir']))
        json_data['output_dir'] = os.path.join(saveloc,
                                               json_data['output_dir'])

        return super(Renderer, cls).loads(json_data, saveloc, references)
Ejemplo n.º 16
0
class WeatheringData(Weatherer, Serializable):
    '''
    Serves to initialize weathering data arrays. Also updates data arrays
    like density, viscosity
    Doesn't have an id like other gnome objects. It isn't exposed to
    application since Model will automatically instantiate if there
    are any Weathering objects defined

    Use this to manage data_arrays associated with weathering that are not
    initialized anywhere else. This is inplace of defining initializers for
    every single array, let WeatheringData set/initialize/update these arrays.
    '''
    _state = copy.deepcopy(Weatherer._state)
    # UI does not need to manipulate - if make_default_refs is True as is the
    # default, it'll automatically get the default Water object
    _state += Field('water', save=True, update=False, save_reference=True)

    _schema = WeathererSchema

    def __init__(self, water, **kwargs):
        '''
        initialize object.

        :param water: requires a water object
        :type water: gnome.environment.Water

        Options arguments kwargs: these get passed to base class via super
        '''
        super(WeatheringData, self).__init__(**kwargs)

        self.water = water
        self.array_types = {
            'fate_status', 'positions', 'status_codes', 'density', 'viscosity',
            'mass_components', 'mass', 'oil_density', 'oil_viscosity',
            'init_mass', 'frac_water', 'frac_lost', 'age'
        }

        # following used to update viscosity
        self.visc_curvfit_param = 1.5e3  # units are sec^0.5 / m
        self.visc_f_ref = 0.84

    def prepare_for_model_run(self, sc):
        '''
        1. initialize standard keys:
           avg_density, floating, amount_released, avg_viscosity to 0.0
        2. set init_density for all ElementType objects in each Spill
        3. set spreading thickness limit based on viscosity of oil at
           water temperature which is constant for now.
        '''
        # nothing released yet - set everything to 0.0
        for key in ('avg_density', 'floating', 'amount_released',
                    'avg_viscosity'):
            sc.mass_balance[key] = 0.0

    def initialize_data(self, sc, num_released):
        '''
        If on is False, then arrays should not be included - dont' initialize

        1. initialize all weathering data arrays
        2. update aggregated data in sc.mass_balance dict
        '''
        if not self.on:
            return

        for substance, data in sc.itersubstancedata(self.array_types,
                                                    fate='all'):
            'update properties only if elements are released'
            if len(data['density']) == 0:
                continue

            # could also use 'age' but better to use an uninitialized var since
            # we might end up changing 'age' to something other than 0
            # model should only call initialize_data if new particles were
            # released
            new_LEs_mask = data['density'] == 0

            if np.any(new_LEs_mask):
                self._init_new_particles(new_LEs_mask, data, substance)

        sc.update_from_fatedataview(fate='all')

        # also initialize/update aggregated data
        self._aggregated_data(sc, num_released)

    def weather_elements(self, sc, time_step, model_time):
        '''
        Update intrinsic property data arrays: density, viscosity.
        In a model step, this is the last thing that happens. All the
        weatherers update 'mass_components' so mass_fraction will have changed
        at the end of the timestep. Update the density and viscosity
        accordingly.
        '''
        if not self.active:
            return

        water_rho = self.water.get('density')

        for substance, data in sc.itersubstancedata(self.array_types,
                                                    fate='all'):
            'update properties only if elements are released'
            if len(data['density']) == 0:
                continue

            k_rho = self._get_k_rho_weathering_dens_update(substance)

            # sub-select mass_components array by substance.num_components.
            # Currently, physics for modeling multiple spills with different
            # substances is not correctly done in the same model. However,
            # let's put some basic code in place so the data arrays can infact
            # contain two substances and the code does not raise exceptions.
            # mass_components are zero padded for substance which has fewer
            # psuedocomponents. Subselecting mass_components array by
            # [mask, :substance.num_components] ensures numpy operations work
            mass_frac = \
                (data['mass_components'][:, :substance.num_components] /
                 data['mass'].reshape(len(data['mass']), -1))

            # check if density becomes > water, set it equal to water in this
            # case - 'density' is for the oil-water emulsion
            oil_rho = k_rho * (substance.component_density * mass_frac).sum(1)

            # oil/water emulsion density
            new_rho = (data['frac_water'] * water_rho +
                       (1 - data['frac_water']) * oil_rho)

            if np.any(new_rho > self.water.density):
                new_rho[new_rho > self.water.density] = self.water.density
                self.logger.info(
                    '{0} during update, density is larger '
                    'than water density - set to water density'.format(
                        self._pid))

            data['density'] = new_rho
            data['oil_density'] = oil_rho

            # following implementation results in an extra array called
            # fw_d_fref but is easy to read
            v0 = substance.kvis_at_temp(self.water.get('temperature', 'K'))

            if v0 is not None:
                kv1 = self._get_kv1_weathering_visc_update(v0)
                fw_d_fref = data['frac_water'] / self.visc_f_ref

                data['viscosity'] = (v0 * np.exp(kv1 * data['frac_lost']) *
                                     (1 + (fw_d_fref /
                                           (1.187 - fw_d_fref)))**2.49)
                data['oil_viscosity'] = (v0 * np.exp(kv1 * data['frac_lost']))

        sc.update_from_fatedataview(fate='all')

        # also initialize/update aggregated data
        self._aggregated_data(sc, 0)

    def _aggregated_data(self, sc, new_LEs):
        '''
        aggregated properties that are not set by any other weatherer are
        set here. The following keys in sc.mass_balance are set here:
            'avg_density',
            'avg_viscosity',
            'floating',
            'amount_released',
        todo: amount_released and beached can probably get set by
            SpillContainer. The trajectory only case will probably also care
            about amount 'beached'.
        '''
        # update avg_density from density array
        # wasted cycles at present since all values in density for given
        # timestep should be the same, but that will likely change
        # Any optimization in doing the following?:
        #   (sc['mass'] * sc['density']).sum()/sc['mass'].sum()
        # todo: move weighted average to utilities
        # also added a check for 'mass' == 0, edge case
        if len(sc.substances) > 1:
            self.logger.warning('{0} current code is not valid for '
                                'multiple weathering substances'.format(
                                    self._pid))
        elif len(sc.substances) == 0:
            # should not happen with the Web API. Just log a warning for now
            self.logger.warning('{0} weathering is on but found no '
                                'weatherable substances.'.format(self._pid))
        else:
            # avg_density, avg_viscosity applies to elements that are on the
            # surface and being weathered
            data = sc.substancefatedata(sc.substances[0],
                                        {'mass', 'density', 'viscosity'})

            if data['mass'].sum() > 0.0:
                sc.mass_balance['avg_density'] = \
                    np.sum(data['mass']/data['mass'].sum() * data['density'])
                sc.mass_balance['avg_viscosity'] = \
                    np.sum(data['mass']/data['mass'].sum() * data['viscosity'])
            else:
                self.logger.info("{0} sum of 'mass' array went to 0.0".format(
                    self._pid))

        # floating includes LEs marked to be skimmed + burned + dispersed
        # todo: remove fate_status and add 'surface' to status_codes. LEs
        # marked to be skimmed, burned, dispersed will also be marked as
        # 'surface' so following can get cleaned up.
        sc.mass_balance['floating'] = \
            (sc['mass'][sc['fate_status'] == fate.surface_weather].sum() +
             sc['mass'][sc['fate_status'] == fate.non_weather].sum() -
             sc['mass'][sc['status_codes'] == oil_status.on_land].sum() -
             sc['mass'][sc['status_codes'] == oil_status.to_be_removed].sum() +
             sc['mass'][sc['fate_status'] & fate.skim == fate.skim].sum() +
             sc['mass'][sc['fate_status'] & fate.burn == fate.burn].sum() +
             sc['mass'][sc['fate_status'] & fate.disperse == fate.disperse].sum())

        # add 'non_weathering' key if any mass is released for nonweathering
        # particles.
        nonweather = sc['mass'][sc['fate_status'] == fate.non_weather].sum()
        sc.mass_balance['non_weathering'] = nonweather

        if new_LEs > 0:
            amount_released = np.sum(sc['mass'][-new_LEs:])

            if 'amount_released' in sc.mass_balance:
                sc.mass_balance['amount_released'] += amount_released
            else:
                sc.mass_balance['amount_released'] = amount_released

    def _init_new_particles(self, mask, data, substance):
        '''
        initialize new particles released together in a given timestep

        :param mask: mask gives only the new LEs in data arrays
        :type mask: numpy bool array
        :param data: dict containing numpy arrays
        :param substance: OilProps object defining the substance spilled
        '''
        water_temp = self.water.get('temperature', 'K')
        density = substance.density_at_temp(water_temp)

        if density > self.water.get('density'):
            msg = ("{0} will sink at given water temperature: {1} {2}. "
                   "Set density to water density".format(
                       substance.name,
                       self.water.get('temperature',
                                      self.water.units['temperature']),
                       self.water.units['temperature']))
            self.logger.error(msg)

            data['density'][mask] = self.water.get('density')
            data['oil_density'][mask] = self.water.get('density')
        else:
            data['density'][mask] = density
            data['oil_density'][mask] = density

        # initialize mass_components -
        # sub-select mass_components array by substance.num_components.
        # Currently, the physics for modeling multiple spills with different
        # substances is not being correctly done in the same model. However,
        # let's put some basic code in place so the data arrays can infact
        # contain two substances and the code does not raise exceptions. The
        # mass_components are zero padded for substance which has fewer
        # psuedocomponents. Subselecting mass_components array by
        # [mask, :substance.num_components] ensures numpy operations work
        data['mass_components'][mask, :substance.num_components] = \
            (np.asarray(substance.mass_fraction, dtype=np.float64) *
             (data['mass'][mask].reshape(len(data['mass'][mask]), -1)))

        data['init_mass'][mask] = data['mass'][mask]

        substance_kvis = substance.kvis_at_temp(water_temp)
        if substance_kvis is not None:
            'make sure we do not add NaN values'
            data['viscosity'][mask] = substance_kvis
            data['oil_viscosity'][mask] = substance_kvis

        # initialize the fate_status array based on positions and status_codes
        self._init_fate_status(mask, data)

    def _init_fate_status(self, update_LEs_mask, data):
        '''
        initialize fate_status for newly released LEs or refloated LEs
        For refloated LEs, the mask should apply to non_weather LEs.
        Currently, the 'status_codes' is separate from 'fate_status' and we
        don't want to reset the 'fate_status' of LEs that have been marked
        as 'skim' or 'burn' or 'disperse'. This should only apply for newly
        released LEs (currently marked as non_weather since that's the default)
        and for refloated LEs which should also have been marked as non_weather
        when they beached.
        '''
        surf_mask = np.logical_and(
            update_LEs_mask,
            np.logical_and(data['positions'][:, 2] == 0,
                           data['status_codes'] == oil_status.in_water))
        subs_mask = np.logical_and(
            update_LEs_mask,
            np.logical_and(data['positions'][:, 2] > 0,
                           data['status_codes'] == oil_status.in_water))

        # set status for new_LEs correctly
        data['fate_status'][surf_mask] = fate.surface_weather
        data['fate_status'][subs_mask] = fate.subsurf_weather

    @lru_cache(1)
    def _get_kv1_weathering_visc_update(self, v0):
        '''
        kv1 is constant.
        It defining the exponential change in viscosity as it weathers due to
        the fraction lost to evaporation/dissolution:
            v(t) = v' * exp(kv1 * f_lost_evap_diss)

        kv1 = sqrt(v0) * 1500
        if kv1 < 1, then return 1
        if kv1 > 10, then return 10

        Since this is fixed for an oil, it only needs to be computed once. Use
        lru_cache on this function to cache the result for a given initial
        viscosity: v0
        '''
        kv1 = np.sqrt(v0) * self.visc_curvfit_param

        if kv1 < 1:
            kv1 = 1
        elif kv1 > 10:
            kv1 = 10

        return kv1

    @lru_cache(1)
    def _get_k_rho_weathering_dens_update(self, substance):
        '''
        use lru_cache on substance. substance is an OilProps object, if this
        object stays the same, then return the cached value for k_rho
        This depends on initial mass fractions, initial density and fixed
        component densities
        '''
        # update density/viscosity/relative_buoyancy/area for previously
        # released elements
        rho0 = substance.density_at_temp(self.water.get('temperature', 'K'))

        # dimensionless constant
        k_rho = (rho0 /
                 (substance.component_density * substance.mass_fraction).sum())

        return k_rho

    def serialize(self, json_='webapi'):
        '''
            'water' property is saved as references in save file
        '''
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()
        serial = schema.serialize(toserial)

        if json_ == 'webapi':
            if self.water:
                serial['water'] = self.water.serialize(json_)

        return serial

    @classmethod
    def deserialize(cls, json_):
        '''
            Append correct schema for water
        '''
        if not cls.is_sparse(json_):
            schema = cls._schema()
            dict_ = schema.deserialize(json_)

            if 'water' in json_:
                obj = json_['water']['obj_type']
                dict_['water'] = (eval(obj).deserialize(json_['water']))

            return dict_
        else:
            return json_
Ejemplo n.º 17
0
class IceGeoJsonOutput(Outputter, Serializable):
    '''
    Class that outputs GNOME ice velocity results for each ice mover
    in a geojson format.  The output is a collection of Features.
    Each Feature contains a Point object with associated properties.
    Following is the output format - the data in <> are the results
    for each element.
    ::
        {
         "time_stamp": <TIME IN ISO FORMAT>,
         "step_num": <OUTPUT ASSOCIATED WITH THIS STEP NUMBER>,
         "feature_collections": {<mover_id>: {"type": "FeatureCollection",
                                              "features": [{"type": "Feature",
                                                            "id": <PARTICLE_ID>,
                                                            "properties": {"ice_fraction": <FRACTION>,
                                                                           "ice_thickness": <METERS>,
                                                                           "water_velocity": [u, v],
                                                                           "ice_velocity": [u, v]
                                                                           },
                                                            "geometry": {"type": "Point",
                                                                         "coordinates": [<LONG>, <LAT>]
                                                                         },
                                                            },
                                                            ...
                                                           ],
                                              },
                                 ...
                                 }
        }
    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state.add_field(
        Field('ice_movers', save=True, update=True, iscollection=True))

    _schema = IceGeoJsonSchema

    def __init__(self, ice_movers, **kwargs):
        '''
        :param list current_movers: A list or collection of current grid mover
                                    objects.

        use super to pass optional \*\*kwargs to base class __init__ method
        '''
        self.ice_movers = ice_movers

        super(IceGeoJsonOutput, self).__init__(**kwargs)

    def write_output(self, step_num, islast_step=False):
        'dump data in geojson format'
        super(IceGeoJsonOutput, self).write_output(step_num, islast_step)

        if self.on is False or not self._write_step:
            return None

        for sc in self.cache.load_timestep(step_num).items():
            pass

        model_time = date_to_sec(sc.current_time_stamp)

        geojson = {}
        for mover in self.ice_movers:
            mover_triangles = self.get_triangles(mover)
            ice_coverage, ice_thickness = mover.get_ice_fields(model_time)

            geojson[mover.id] = []
            geojson[mover.id].append(
                self.get_coverage_fc(ice_coverage, mover_triangles))
            geojson[mover.id].append(
                self.get_thickness_fc(ice_thickness, mover_triangles))

        # default geojson should not output data to file
        output_info = {
            'time_stamp': sc.current_time_stamp.isoformat(),
            'feature_collections': geojson
        }

        return output_info

    def get_coverage_fc(self, coverage, triangles):
        return self.get_grouped_fc_from_1d_array(coverage,
                                                 triangles,
                                                 'coverage',
                                                 decimals=2)

    def get_thickness_fc(self, thickness, triangles):
        return self.get_grouped_fc_from_1d_array(thickness,
                                                 triangles,
                                                 'thickness',
                                                 decimals=1)

    def get_grouped_fc_from_1d_array(self, values, triangles, property_name,
                                     decimals):
        rounded = values.round(decimals=decimals)
        unique = np.unique(rounded)

        features = []
        for u in unique:
            matching = np.where(rounded == u)
            matching_triangles = (triangles[matching])

            dtype = matching_triangles.dtype.descr
            shape = matching_triangles.shape + (len(dtype), )

            coordinates = (matching_triangles.view(
                dtype='<f8').reshape(shape).tolist())

            prop_fmt = '{{:.{}f}}'.format(decimals)
            properties = {'{}'.format(property_name): prop_fmt.format(u)}

            feature = Feature(id="1",
                              properties=properties,
                              geometry=MultiPolygon(coordinates=coordinates))
            features.append(feature)

        return FeatureCollection(features)

    def get_rounded_ice_values(self, coverage, thickness):
        return np.vstack(
            (coverage.round(decimals=2), thickness.round(decimals=1))).T

    def get_unique_ice_values(self, ice_values):
        '''
        In order to make numpy perform this function fast, we will use a
        contiguous structured array using a view of a void type that
        joins the whole row into a single item.
        '''
        dtype = np.dtype(
            (np.void, ice_values.dtype.itemsize * ice_values.shape[1]))
        voidtype_array = np.ascontiguousarray(ice_values).view(dtype)

        _, idx = np.unique(voidtype_array, return_index=True)

        return ice_values[idx]

    def get_matching_ice_values(self, ice_values, v):
        return np.where((ice_values == v).all(axis=1))

    def get_triangles(self, mover):
        '''
        The triangle data that we get from the mover is in the form of
        indices into the points array.
        So we get our triangle data and points array, and then build our
        triangle coordinates by reference.
        '''
        points = self.get_points(mover)

        if mover.mover._is_triangle_grid():
            data = self.get_triangle_data(mover)
            dtype = data[0].dtype.descr
            unstructured_type = dtype[0][1]
            unstructured = (data.view(dtype=unstructured_type).reshape(
                -1, len(dtype))[:, :3])

            triangles = points[unstructured]
            return triangles

        else:
            data = self.get_cell_data(mover)
            dtype = data[0].dtype.descr
            unstructured_type = dtype[0][1]
            unstructured = (data.view(dtype=unstructured_type).reshape(
                -1, len(dtype))[:, 1:])

            cells = points[unstructured]
            return cells

    def get_triangle_data(self, mover):
        return mover.mover._get_triangle_data()

    def get_cell_data(self, mover):
        return mover.mover._get_cell_data()

    def get_points(self, mover):
        points = (mover.mover._get_points().astype([('long', '<f8'),
                                                    ('lat', '<f8')]))
        points['long'] /= 10**6
        points['lat'] /= 10**6

        return points

    def rewind(self):
        'remove previously written files'
        super(IceGeoJsonOutput, self).rewind()

    def ice_movers_to_dict(self):
        '''
        a dict containing 'obj_type' and 'id' for each object in
        list/collection
        '''
        return self._collection_to_dict(self.ice_movers)

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for current mover
        """
        schema = cls._schema()
        _to_dict = schema.deserialize(json_)

        if 'ice_movers' in json_:
            _to_dict['ice_movers'] = []
            for i, cm in enumerate(json_['ice_movers']):
                cm_cls = class_from_objtype(cm['obj_type'])
                cm_dict = cm_cls.deserialize(json_['ice_movers'][i])

                _to_dict['ice_movers'].append(cm_dict)

        return _to_dict
Ejemplo n.º 18
0
class TrajectoryGeoJsonOutput(Outputter, Serializable):
    '''
    class that outputs GNOME results in a geojson format. The output is a
    collection of Features. Each Feature contains a Point object with
    associated properties. Following is the format for a particle - the
    data in <> are the results for each element.
    ::
        {
        "type": "FeatureCollection",
        "features": [
            {
                "geometry": {
                    "type": "Point",
                    "coordinates": [
                        <LONGITUDE>,
                        <LATITUDE>
                    ]
                },
                "type": "Feature",
                "id": <PARTICLE_ID>,
                "properties": {
                    "current_time": <TIME IN SEC SINCE EPOCH>,
                    "status_code": <>,
                    "spill_id": <UUID OF SPILL OBJECT THAT RELEASED PARTICLE>,
                    "depth": <DEPTH>,
                    "spill_type": <FORECAST OR UNCERTAIN>,
                    "step_num": <OUTPUT ASSOCIATED WITH THIS STEP NUMBER>
                }
            },
            ...
        }

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state += [
        Field('round_data', update=True, save=True),
        Field('round_to', update=True, save=True),
        Field('output_dir', update=True, save=True)
    ]
    _schema = TrajectoryGeoJsonSchema

    def __init__(self, round_data=True, round_to=4, output_dir=None, **kwargs):
        '''
        :param bool round_data=True: if True, then round the numpy arrays
            containing float to number of digits specified by 'round_to'.
            Default is True
        :param int round_to=4: round float arrays to these number of digits.
            Default is 4.
        :param str output_dir=None: output directory for geojson files. Default
            is None since data is returned in dict for webapi. For using
            write_output_post_run(), this must be set

        use super to pass optional \*\*kwargs to base class __init__ method
        '''
        self.round_data = round_data
        self.round_to = round_to
        self.output_dir = output_dir

        super(TrajectoryGeoJsonOutput, self).__init__(**kwargs)

    def write_output(self, step_num, islast_step=False):
        'dump data in geojson format'
        super(TrajectoryGeoJsonOutput,
              self).write_output(step_num, islast_step)

        if not self._write_step:
            return None

        # one feature per element client; replaced with multipoint
        # because client performance is much more stable with one
        # feature per step rather than (n) features per step.features = []
        features = []
        for sc in self.cache.load_timestep(step_num).items():
            time = date_to_sec(sc.current_time_stamp)
            position = self._dataarray_p_types(sc['positions'])
            status = self._dataarray_p_types(sc['status_codes'])
            sc_type = 'uncertain' if sc.uncertain else 'forecast'

            # break elements into multipoint features based on their status code
            # evaporated : 10
            # in_water : 2
            # not_released : 0
            # off_maps : 7
            # on_land : 3
            # to_be_removed : 12
            points = {}
            for ix, pos in enumerate(position):
                st_code = status[ix]

                if st_code not in points:
                    points[st_code] = []

                points[st_code].append(pos[:2])

            for k in points:
                feature = Feature(geometry=MultiPoint(points[k]),
                                  id="1",
                                  properties={
                                      'sc_type': sc_type,
                                      'status_code': k,
                                  })
                if sc.uncertain:
                    features.insert(0, feature)
                else:
                    features.append(feature)

        geojson = FeatureCollection(features)
        # default geojson should not output data to file
        # read data from file and send it to web client
        output_info = {
            'time_stamp': sc.current_time_stamp.isoformat(),
            'feature_collection': geojson
        }

        if self.output_dir:
            output_filename = self.output_to_file(geojson, step_num)
            output_info.update({'output_filename': output_filename})

        return output_info

    def output_to_file(self, json_content, step_num):
        file_format = 'geojson_{0:06d}.geojson'
        filename = os.path.join(self.output_dir, file_format.format(step_num))

        with open(filename, 'w') as outfile:
            dump(json_content, outfile, indent=True)

        return filename

    def _dataarray_p_types(self, data_array):
        '''
        return array as list with appropriate python dtype
        This is partly to make sure the dtype of the list elements is a python
        data type else geojson fails
        '''
        p_type = type(np.asscalar(data_array.dtype.type(0)))

        if p_type is long:
            'geojson expects int - it fails for a long'
            p_type = int

        if p_type is float and self.round_data:
            data = data_array.round(self.round_to).astype(p_type).tolist()
        else:
            data = data_array.astype(p_type).tolist()
        return data

    def rewind(self):
        'remove previously written files'
        super(TrajectoryGeoJsonOutput, self).rewind()
        self.clean_output_files()

    def clean_output_files(self):
        if self.output_dir:
            files = glob(os.path.join(self.output_dir, 'geojson_*.geojson'))
            for f in files:
                os.remove(f)
Ejemplo n.º 19
0
class CurrentGeoJsonOutput(Outputter, Serializable):
    '''
    Class that outputs GNOME current velocity results for each current mover
    in a geojson format.  The output is a collection of Features.
    Each Feature contains a Point object with associated properties.
    Following is the output format - the data in <> are the results
    for each element.
    ::
        {
         "time_stamp": <TIME IN ISO FORMAT>,
         "step_num": <OUTPUT ASSOCIATED WITH THIS STEP NUMBER>,
         "feature_collections": {<mover_id>: {"type": "FeatureCollection",
                                              "features": [{"type": "Feature",
                                                            "id": <PARTICLE_ID>,
                                                            "properties": {"velocity": [u, v]
                                                                           },
                                                            "geometry": {"type": "Point",
                                                                         "coordinates": [<LONG>, <LAT>]
                                                                         },
                                                        },
                                                        ...
                                                       ],
                                          },
                             ...
                             }
        }

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state.add_field(
        Field('current_movers', save=True, update=True, iscollection=True))

    _schema = CurrentGeoJsonSchema

    def __init__(self, current_movers, **kwargs):
        '''
        :param list current_movers: A list or collection of current grid mover
                                    objects.

        use super to pass optional \*\*kwargs to base class __init__ method
        '''
        self.current_movers = current_movers

        super(CurrentGeoJsonOutput, self).__init__(**kwargs)

    def write_output(self, step_num, islast_step=False):
        'dump data in geojson format'
        super(CurrentGeoJsonOutput, self).write_output(step_num, islast_step)

        if self.on is False or not self._write_step:
            return None

        for sc in self.cache.load_timestep(step_num).items():
            pass

        model_time = date_to_sec(sc.current_time_stamp)

        geojson = {}
        for cm in self.current_movers:
            features = []
            centers = cm.get_center_points()
            velocities = cm.get_scaled_velocities(model_time)

            current_vals = np.hstack(
                (centers.view(dtype='<f8').reshape(-1, 2),
                 velocities.view(dtype='<f8').reshape(-1,
                                                      2))).reshape(-1, 2, 2)

            for c, v in current_vals:
                feature = Feature(geometry=Point(list(c)),
                                  id="1",
                                  properties={'velocity': list(v)})
                features.append(feature)

            geojson[cm.id] = FeatureCollection(features)

        # default geojson should not output data to file
        # read data from file and send it to web client
        output_info = {
            'time_stamp': sc.current_time_stamp.isoformat(),
            'feature_collections': geojson
        }

        return output_info

    def get_rounded_velocities(self, velocities):
        return np.vstack((velocities['u'].round(decimals=1),
                          velocities['v'].round(decimals=1))).T

    def get_unique_velocities(self, velocities):
        '''
        In order to make numpy perform this function fast, we will use a
        contiguous structured array using a view of a void type that
        joins the whole row into a single item.
        '''
        dtype = np.dtype(
            (np.void, velocities.dtype.itemsize * velocities.shape[1]))
        voidtype_array = np.ascontiguousarray(velocities).view(dtype)

        _, idx = np.unique(voidtype_array, return_index=True)

        return velocities[idx]

    def get_matching_velocities(self, velocities, v):
        return np.where((velocities == v).all(axis=1))

    def rewind(self):
        'remove previously written files'
        super(CurrentGeoJsonOutput, self).rewind()

    def current_movers_to_dict(self):
        '''
        a dict containing 'obj_type' and 'id' for each object in
        list/collection
        '''
        return self._collection_to_dict(self.current_movers)
Ejemplo n.º 20
0
class NetCDFOutput(Outputter, Serializable):
    """
    A NetCDFOutput object is used to write the model's data to a NetCDF file.
    It inherits from Outputter class and implements the same interface.

    This class is meant to be used within the Model, to be added to list of
    outputters.

    >>> model = gnome.model.Model(...)
    >>> model.outputters += gnome.netcdf_outputter.NetCDFOutput(
                os.path.join(base_dir,'sample_model.nc'), which_data='most')

    `which_data` flag is used to set which data to add to the netcdf file:

    'standard' : the basic stuff most people would want

    'most': everything the model is tracking except the internal-use-only arrays

    'all': everything tracked by the model (mostly used for diagnostics of save files)


    .. note::
       cf_attributes is a class attribute: a dict
       that contains the global attributes per CF convention

       The attribute: `.arrays_to_output` is a set of the data arrays that
       will be added to the netcdf file. array names may be added to or removed
       from this set before a model run to customize what gets output:
       `the_netcdf_outputter.arrays_to_output.add['rise_vel']`

       Since some of the names of the netcdf variables are different from the
       names in the SpillContainer data_arrays, this list uses the netcdf names
    """
    which_data_lu = {'standard', 'most', 'all'}
    compress_lu = {True, False}

    cf_attributes = {'comment': 'Particle output from the NOAA PyGnome model',
                     'source': 'PyGnome version {0}'.format(__version__),
                     'references': 'TBD',
                     'feature_type': 'particle_trajectory',
                     'institution': 'NOAA Emergency Response Division',
                     'conventions': 'CF-1.6',
                     }

    # the set of arrays we usually output -- i.e. the default
    standard_arrays = ['latitude',
                       'longitude',  # pulled from the 'positions' array
                       'depth',
                       'status_codes',
                       'spill_num',
                       'id',
                       'mass',
                       'age',
                       ]

    # the list of arrays that we usually don't want -- i.e. for internal use
    # these will get skipped if "most" is asked for
    # "all" will output everything.
    usually_skipped_arrays = ['next_positions',
                              'last_water_positions',
                              'windages',
                              'windage_range',
                              'windage_persist',
                              'mass_components',
                              'half_lives',
                              ]

    # define _state for serialization
    _state = copy.deepcopy(Outputter._state)

    # data file should not be moved to save file location!
    _state.add_field([Field('netcdf_filename', save=True, update=True,
                            test_for_eq=False),
                      Field('which_data', save=True, update=True),
                      # Field('netcdf_format', save=True, update=True),
                      Field('compress', save=True, update=True),
                      Field('_start_idx', save=True),
                      Field('_middle_of_run', save=True),
                      ])
    _schema = NetCDFOutputSchema

    def __init__(self,
                 netcdf_filename,
                 which_data='standard',
                 compress=True,
                 **kwargs):
        """
        Constructor for Net_CDFOutput object. It reads data from cache and
        writes it to a NetCDF4 format file using the CF convention

        :param netcdf_filename: Required parameter. The filename in which to
            store the NetCDF data.
        :type netcdf_filename: str. or unicode

        :param which_data='standard':
            If 'standard', write only standard data.
            If 'most' means, write everything except the attributes we know are
            for internal model use.
            If 'all', write all data to NetCDF -- usually only for diagnostics.
            Default is 'standard'.
            These are defined in the standard_arrays and usually_skipped_arrays
            attributes
        :type which_data: string -- one of {'standard', 'most', 'all'}

        Optional arguments passed on to base class (kwargs):

        :param cache: sets the cache object from which to read data. The model
            will automatically set this param

        :param output_timestep: default is None in which case every time the
            write_output is called, output is written. If set, then output is
            written every output_timestep starting from model_start_time.
        :type output_timestep: timedelta object

        :param output_zero_step: default is True. If True then output for
            initial step (showing initial release conditions) is written
            regardless of output_timestep
        :type output_zero_step: boolean

        :param output_last_step: default is True. If True then output for
            final step is written regardless of output_timestep
        :type output_last_step: boolean

        use super to pass optional kwargs to base class __init__ method
        """
        self._check_filename(netcdf_filename)
        self._netcdf_filename = netcdf_filename

        # uncertain file is only written out if model is uncertain
        name, ext = os.path.splitext(self.netcdf_filename)
        self._u_netcdf_filename = '{0}_uncertain{1}'.format(name, ext)

        self.name = os.path.split(netcdf_filename)[1]

        # flag to keep track of _state of the object - is True after calling
        # prepare_for_model_run
        self._middle_of_run = False

        if which_data.lower() in self.which_data_lu:
            self._which_data = which_data.lower()
        else:
            raise ValueError('which_data must be one of: '
                             '{"standard", "most", "all"}')

        self.arrays_to_output = set(self.standard_arrays)

        # this is only updated in prepare_for_model_run if which_data is
        # 'all' or 'most'
        # self.arr_types = None
        self._format = 'NETCDF4'

        if compress in self.compress_lu:
            self._compress = compress
        else:
            raise ValueError('compress must be one of: {True, False}')

        # 1k is about right for 1000LEs and one time step.
        # up to 0.5MB tested better for large datasets, but
        # we don't want to have far-too-large files for the
        # smaller ones
        # The default in netcdf4 is 1 -- which works really badly
        self._chunksize = 1024

        # need to keep track of starting index for writing data since variable
        # number of particles are released
        self._start_idx = 0

        # define NetCDF variable attributes that are instance attributes here
        # It is set in prepare_for_model_run():
        # 'spill_names' is set based on the names of spill's as defined by user
        # time 'units' are seconds since model_start_time
        self._var_attributes = {
            'spill_num': {'spills_map': ''},
            'time': {'units': ''}
                                 }

        super(NetCDFOutput, self).__init__(**kwargs)

    @property
    def middle_of_run(self):
        return self._middle_of_run

    @property
    def netcdf_filename(self):
        return self._netcdf_filename

    @netcdf_filename.setter
    def netcdf_filename(self, new_name):
        if self.middle_of_run:
            raise AttributeError('This attribute cannot be changed in the '
                                 'middle of a run')
        else:
            self._check_netcdf_filename(new_name)
            self._netcdf_filename = new_name

    @property
    def uncertain_filename(self):
        '''
        if uncertain SpillContainer is present, write its data out to this file
        '''
        return self._u_netcdf_filename

    @property
    def which_data(self):
        return self._which_data

    @which_data.setter
    def which_data(self, value):
        'change output data but cannot change in middle of run.'
        if value == self._which_data:
            return
        if self.middle_of_run:
            raise AttributeError('This attribute cannot be changed in the '
                                 'middle of a run')

        if value in self.which_data_lu:
            self._which_data = value
        else:
            raise ValueError('which_data must be one of: '
                             '{"standard", "most", "all"}')

    @property
    def chunksize(self):
        return self._chunksize

    @chunksize.setter
    def chunksize(self, value):
        if self.middle_of_run:
            raise AttributeError('chunksize can not be set '
                                 'in the middle of a run')
        else:
            self._chunksize = value

    @property
    def compress(self):
        return self._compress

    @compress.setter
    def compress(self, value):
        if self.middle_of_run:
            raise AttributeError('This attribute cannot be changed in the '
                                 'middle of a run')

        if value in self.compress_lu:
            self._compress = value
        else:
            raise ValueError('compress must be one of: {True, False}')

    @property
    def netcdf_format(self):
        return self._format

    def _update_var_attributes(self, spills):
        '''
        update instance specific self._var_attributes
        '''
        names = " ".join(["{0}: {1}, ".format(ix, spill.name)
                          for ix, spill in enumerate(spills)])
        self._var_attributes['spill_num']['spills_map'] = names
        self._var_attributes['time']['units'] = \
            ('seconds since {0}').format(self._model_start_time.isoformat())

    def _initialize_rootgrp(self, rootgrp, sc):
        'create dimensions for root group and set cf_attributes'
        # fixme: why remove the "T" ??
        rootgrp.setncatts(self.cf_attributes)   # class level attributes
        rootgrp.setncattr('creation_date',  # instance attribute
                          datetime.now().replace(microsecond=0).isoformat())

        # array sizes of weathering processes + mass_components will vary
        # depending on spills. If there are no spills then no weathering
        # data arrays to write - certainly no data to write
        weathering_sz = None

        # create the dimensions we need
        # not sure if it's a convention or if dimensions
        # need to be names...
        dims = [('time', None),     # unlimited
                ('data', None),     # unlimited
                ('two', 2),
                ('three', 3)]

        if 'mass_components' in sc:
            # get it from array shape
            weathering_sz = (sc.num_released, sc['mass_components'].shape[1])
            dims.append(('weathering', weathering_sz[1]))

        for dim in dims:
            rootgrp.createDimension(dim[0], dim[1])

        return rootgrp

    def _update_arrays_to_output(self, sc):
        'create list of variables that we want to put in the file'
        if self.which_data in ('all', 'most'):
            # get shape and dtype from initailized numpy arrays instead
            # of array_types because some array type shapes are None
            for var_name in sc.data_arrays:
                if var_name != 'positions':
                    # handled by latitude, longitude, depth
                    self.arrays_to_output.add(var_name)

            if self.which_data == 'most':
                # remove the ones we don't want
                for var_name in self.usually_skipped_arrays:
                    self.arrays_to_output.discard(var_name)

    def prepare_for_model_run(self,
                              model_start_time,
                              spills,
                              **kwargs):
        """
        .. function:: prepare_for_model_run(model_start_time,
                                            spills,
                                            **kwargs)

        Write global attributes and define dimensions and variables for NetCDF file.
        This must be done in prepare_for_model_run because if model _state
        changes, it is rewound and re-run from the beginning.

        If there are existing output files, they are deleted here.

        This takes more than standard 'cache' argument. Some of these are
        required arguments - they contain None for defaults because non-default
        argument cannot follow default argument. Since cache is already 2nd
        positional argument for Renderer object, the required non-default
        arguments must be defined following 'cache'.

        If uncertainty is on, then SpillContainerPair object contains
        identical _data_arrays in both certain and uncertain SpillContainers,
        the data itself is different, but they contain the same type of data
        arrays. If uncertain, then datay arrays for uncertain spill container
        are written to netcdf_filename + '_uncertain.nc'

        :param spills: If 'which_data' flag is set to 'all' or 'most', then
            model must provide the model.spills object
            (SpillContainerPair object) so NetCDF variables can be
            defined for the remaining data arrays.
            If spills is None, but which_data flag is 'all' or
            'most', a ValueError will be raised.
            It does not make sense to write 'all' or 'most' but not
            provide 'model.spills'.
        :type spills: gnome.spill_container.SpillContainerPair object.

        .. note::
            Does not take any other input arguments; however, to keep the
            interface the same for all outputters, define ``**kwargs`` in case
            future outputters require different arguments.

        use super to pass model_start_time, cache=None and
        remaining kwargs to base class method
        """
        super(NetCDFOutput, self).prepare_for_model_run(model_start_time,
                                                        spills, **kwargs)
        if not self.on: 
            return

        self.clean_output_files()

        self._update_var_attributes(spills)

        for sc in self.sc_pair.items():
            if sc.uncertain:
                file_ = self._u_netcdf_filename
            else:
                file_ = self.netcdf_filename

            self._file_exists_error(file_)

            # create the netcdf files and write the standard stuff:
            with nc.Dataset(file_, 'w', format=self._format) as rootgrp:

                self._initialize_rootgrp(rootgrp, sc)

                # create a dict with dims {2: 'two', 3: 'three' ...}
                # use this to define the NC variable's shape in code below
                d_dims = {len(dim): name
                          for name, dim in rootgrp.dimensions.iteritems()
                          if len(dim) > 0}

                # create the time/particle_count variables
                self._create_nc_var(rootgrp, 'time', np.float64,
                                    ('time', ), (self._chunksize,))
                self._create_nc_var(rootgrp, 'particle_count', np.int32,
                                    ('time', ), (self._chunksize,))

                self._update_arrays_to_output(sc)

                for var_name in self.arrays_to_output:
                    # the special cases:
                    if var_name in ('latitude', 'longitude', 'depth'):
                        # these don't  map directly to an array_type
                        dt = world_point_type
                        shape = ('data', )
                        chunksz = (self._chunksize,)
                    else:
                        # in prepare_for_model_run, nothing is released but
                        # numpy arrays are initialized with 0 elements so use
                        # the arrays to get shape and dtype instead of the
                        # array_types since array_type could contain None for
                        # shape
                        dt = sc[var_name].dtype

                        if len(sc[var_name].shape) == 1:
                            shape = ('data',)
                            chunksz = (self._chunksize,)
                        else:
                            y_sz = d_dims[sc[var_name].shape[1]]
                            shape = ('data', y_sz)
                            chunksz = (self._chunksize, sc[var_name].shape[1])

                    self._create_nc_var(rootgrp, var_name, dt, shape, chunksz)

                # Add subgroup for mass_balance - could do it w/o subgroup
                if sc.mass_balance:
                    grp = rootgrp.createGroup('mass_balance')
                    # give this grp a dimension for time
                    grp.createDimension('time', None)  # unlimited
                    for key in sc.mass_balance:
                        self._create_nc_var(grp,
                                            var_name=key,
                                            dtype='float',
                                            shape=('time',),
                                            chunksz=(256,),
                                            # smaller chunksize for mass_balance
                                            )

        # need to keep track of starting index for writing data since variable
        # number of particles are released
        self._start_idx = 0
        self._middle_of_run = True

    def _create_nc_var(self, grp, var_name, dtype, shape, chunksz):
        # fixme: why is this even here? it's wrapping a single call???
        if dtype == np.bool:
            # this is not primitive so it is not understood
            # Make it 8-bit unsigned - numpy stores True/False in 1 byte
            dtype = 'u1'

        try:
            if var_name != "non_weathering":
                # fixme: TOTAL Kludge --
                # failing with bad chunksize error for this particular varaible
                # I have no idea why!!!!
                var = grp.createVariable(var_name,
                                         dtype,
                                         shape,
                                         zlib=self._compress,
                                         chunksizes=chunksz,
                                         )
            else:
                var = grp.createVariable(var_name,
                                         dtype,
                                         shape,
                                         zlib=self._compress,
                                         )
        except RuntimeError as err:
            msg = "\narguments are:"
            msg += "var_name: %s\n" % var_name
            msg += "dtype: %s\n" % dtype
            msg += "shape: %s\n" % shape
            msg += "dims: %s\n" % grp.dimensions
            # msg += "shape_dim: %s\n" % grp.dimensions[shape[0]]
            msg += "zlib: %s\n" % self._compress
            msg += "chunksizes: %s\n" % chunksz
            err.args = (err.args[0] + msg,)
            raise err

        if var_name in var_attributes:
            var.setncatts(var_attributes[var_name])

        if var_name in self._var_attributes:
            var.setncatts(self._var_attributes[var_name])

        return var

    def write_output(self, step_num, islast_step=False):
        """
        Write NetCDF output at the end of the step

        :param int step_num: the model step number you want rendered.
        :param bool islast_step: Default is False.
                                 Flag that indicates that step_num is
                                 last step.
                                 If 'output_last_step' is True then this is
                                 written out

        Use super to call base class write_output method
        """
        super(NetCDFOutput, self).write_output(step_num, islast_step)

        if self.on is False or not self._write_step:
            return None

        for sc in self.cache.load_timestep(step_num).items():
            if sc.uncertain and self._u_netcdf_filename is not None:
                file_ = self._u_netcdf_filename
            else:
                file_ = self.netcdf_filename

            time_stamp = sc.current_time_stamp

            with nc.Dataset(file_, 'a') as rootgrp:
                rg_vars = rootgrp.variables
                idx = len(rg_vars['time'])

                rg_vars['time'][idx] = nc.date2num(time_stamp,
                                                   rg_vars['time'].units,
                                                   rg_vars['time'].calendar)
                pc = rg_vars['particle_count']
                pc[idx] = len(sc)

                _end_idx = self._start_idx + pc[idx]

                # add the data:
                for var_name in self.arrays_to_output:
                    # special case positions:
                    if var_name == 'longitude':
                        rg_vars['longitude'][self._start_idx:_end_idx] = sc['positions'][:, 0]
                    elif var_name == 'latitude':
                        rg_vars['latitude'][self._start_idx:_end_idx] = sc['positions'][:, 1]
                    elif var_name == 'depth':
                        rg_vars['depth'][self._start_idx:_end_idx] = sc['positions'][:, 2]
                    else:
                        rg_vars[var_name][self._start_idx:_end_idx] = sc[var_name]

                # write mass_balance data
                if sc.mass_balance:
                    grp = rootgrp.groups['mass_balance']
                    for key, val in sc.mass_balance.iteritems():
                        if key not in grp.variables:
                            self._create_nc_var(grp,
                                                key, 'float', ('time', ),
                                                (self._chunksize,)
                                                )
                        grp.variables[key][idx] = val

        self._start_idx = _end_idx  # set _start_idx for the next timestep

        return {'netcdf_filename': (self.netcdf_filename,
                                    self._u_netcdf_filename),
                'time_stamp': time_stamp}

    def clean_output_files(self):
        '''
        deletes output files that may be around

        called by prepare_for_model_run

        here in case it needs to be called from elsewhere
        '''
        try:
          os.remove(self.netcdf_filename)
        except OSError:
            pass # it must not be there
        try:
            os.remove(self._u_netcdf_filename)
        except OSError:
            pass # it must not be there

    def rewind(self):
        '''
        reset a few parameter and call base class rewind to reset
        internal variables.
        '''
        super(NetCDFOutput, self).rewind()

        self._middle_of_run = False
        self._start_idx = 0

    @classmethod
    def read_data(klass,
                  netcdf_file,
                  time=None,
                  index=None,
                  which_data='standard'):
        """
        Read and create standard data arrays for a netcdf file that was created
        with NetCDFOutput class. Make it a class method since it is
        independent of an instance of the Outputter. The method is put with
        this class because the NetCDF functionality for PyGnome data with CF
        standard is captured here.

        :param str netcdf_file: Name of the NetCDF file from which to read
            the data
        :param datetime time: timestamp at which the data is desired. Looks in
            the netcdf data's 'time' array and finds the closest time to this
            and outputs this data. If both 'time' and 'index' are None, return
            data if file only contains one 'time' else raise an error
        :param int index: Index of the 'time' variable (or time_step). This is
            only used if 'time' is None. If both 'time' and 'index' are None,
            return data if file only contains one 'time' else raise an error
        :param which_data='standard': Which data arrays are desired options are
            ('standard', 'most', 'all', [list_of_array_names])
        :type which_data: string or sequence of strings.

        :return: A dict containing standard data closest to the indicated
            'time'. Standard data is defined as follows:

        Standard data arrays are numpy arrays of size N, where N is number of
        particles released at time step of interest. They are defined by the
        class attribute "standard_arrays", currently:

            'current_time_stamp': datetime object associated with this data
            'positions'         : NX3 array. NetCDF variables: 'longitude', 'latitude', 'depth'
            'status_codes'      : NX1 array. NetCDF variable :'status_codes'
            'spill_num'         : NX1 array. NetCDF variable: 'spill_num'
            'id'                : NX1 array of particle id. NetCDF variable 'id'
            'mass'              : NX1 array showing 'mass' of each particle

        standard_arrays = ['latitude',
                           'longitude', # pulled from the 'positions' array
                           'depth',
                           'status_codes',
                           'spill_num',
                           'id',
                           'mass',
                           'age',
                           ]

        """
        if not os.path.exists(netcdf_file):
            raise IOError('File not found: {0}'.format(netcdf_file))

        arrays_dict = {}
        with nc.Dataset(netcdf_file) as data:
            _start_ix = 0

            # first find the index of index in which we are interested
            time_ = data.variables['time']
            if time is None and index is None:
                # there should only be 1 time in file. Read and
                # return data associated with it
                if len(time_) > 1:
                    raise ValueError('More than one time found in netcdf '
                                     'file. Please specify time/index for '
                                     'which data is desired')
                else:
                    index = 0
            else:
                if time is not None:
                    time_offset = nc.date2num(time, time_.units,
                                              calendar=time_.calendar)
                    if time_offset < 0:
                        'desired time is before start of model'
                        index = 0
                    else:
                        index = abs(time_[:] - time_offset).argmin()
                elif index is not None:
                    if index < 0:
                        index = len(time_) + index

            for idx in range(index):
                _start_ix += data.variables['particle_count'][idx]

            _stop_ix = _start_ix + data.variables['particle_count'][index]
            elem = data.variables['particle_count'][index]

            c_time = nc.num2date(time_[index], time_.units,
                                 calendar=time_.calendar)

            arrays_dict['current_time_stamp'] = np.array(c_time)

            # figure out what arrays to read in:
            if which_data == 'standard':
                data_arrays = set(klass.standard_arrays)
                # swap out positions:
                [data_arrays.discard(x)
                 for x in ('latitude', 'longitude', 'depth')]
                data_arrays.add('positions')
            elif which_data == 'all':
                # pull them from the nc file
                data_arrays = set(data.variables.keys())
                # remove the irrelevant ones:
                [data_arrays.discard(x) for x in ('time',
                                                  'particle_count',
                                                  'latitude',
                                                  'longitude',
                                                  'depth')]
                data_arrays.add('positions')
            else:  # should be list of data arrays
                data_arrays = set(which_data)

            # get the data
            for array_name in data_arrays:
                # special case time and positions:
                if array_name == 'positions':
                    positions = np.zeros((elem, 3), dtype=world_point_type)
                    positions[:, 0] = \
                        data.variables['longitude'][_start_ix:_stop_ix]
                    positions[:, 1] = \
                        data.variables['latitude'][_start_ix:_stop_ix]
                    positions[:, 2] = \
                        data.variables['depth'][_start_ix:_stop_ix]

                    arrays_dict['positions'] = positions
                else:
                    arrays_dict[array_name] = \
                        data.variables[array_name][_start_ix:_stop_ix]

            # get mass_balance
            weathering_data = {}
            if 'mass_balance' in data.groups:
                mb = data.groups['mass_balance']
                for key, val in mb.variables.iteritems():
                    'assume SI units'
                    weathering_data[key] = val[index]

        return (arrays_dict, weathering_data)

    def save(self, saveloc, references=None, name=None):
        '''
        See baseclass :meth:`~gnome.persist.Savable.save`

        update netcdf_filename to point to saveloc, then call base class save
        using super
        '''
        json_ = self.serialize('save')
        fname = os.path.split(json_['netcdf_filename'])[1]
        json_['netcdf_filename'] = os.path.join('./', fname)
        return self._json_to_saveloc(json_, saveloc, references, name)

    @classmethod
    def loads(cls, json_data, saveloc, references=None):
        '''
        loads object from json_data

        update path to 'netcdf_filename' in json_data, then finish loading
        by calling super class' load method

        :param saveloc: location of data files. Setup path of netcdf_filename
            to this location

        Optional parameter

        :param references: references object - if this is called by the Model,
            it will pass a references object. It is not required.
        '''
        json_data['netcdf_filename'] = \
            os.path.join(saveloc, json_data['netcdf_filename'])

        return super(NetCDFOutput, cls).loads(json_data, saveloc, references)
Ejemplo n.º 21
0
class ComponentMover(CurrentMoversBase, Serializable):

    # _state = copy.deepcopy(CyMover._state)
    _state = copy.deepcopy(CurrentMoversBase._state)

    _update = ['scale_refpoint',
               'pat1_angle', 'pat1_speed', 'pat1_speed_units',
               'pat1_scale_to_value',
               'pat2_angle', 'pat2_speed', 'pat2_speed_units',
               'pat2_scale_to_value', 'scale_by']
    _create = []
    _create.extend(_update)
    _state.add(update=_update, save=_create)
    _state.add_field([Field('filename1', save=True, read=True, isdatafile=True,
                            test_for_eq=False),
                      Field('filename2', save=True, read=True, isdatafile=True,
                            test_for_eq=False),
                      Field('wind', save=True, update=True,
                            save_reference=True)])

    _schema = ComponentMoverSchema

    def __init__(self, filename1, filename2=None, wind=None,
                 **kwargs):
        """
        Uses super to invoke base class __init__ method.

        :param filename: file containing currents for first Cats pattern

        Optional parameters (kwargs).
        Defaults are defined by CyCatsMover object.

        :param filename: file containing currents for second Cats pattern

        :param wind: A gnome.environment.Wind object to be used to drive the
                     CatsMovers.  Will want a warning that mover will
                     not be active without a wind
        :param scale: A boolean to indicate whether to scale value
                      at reference point or not
        :param scale_value: Value used for scaling at reference point
        :param scale_refpoint: Reference location (long, lat, z).
                               The scaling applied to all data is determined
                               by scaling the raw value at this location.

        Remaining kwargs are passed onto Mover's __init__ using super.
        See Mover documentation for remaining valid kwargs.
        """

        if not os.path.exists(filename1):
            raise ValueError('Path for Cats filename1 does not exist: {0}'
                             .format(filename1))

        if filename2 is not None:
            if not os.path.exists(filename2):
                raise ValueError('Path for Cats filename2 does not exist: {0}'
                                 .format(filename2))

        self.filename1 = filename1
        self.filename2 = filename2

        self.mover = CyComponentMover()
        self.mover.text_read(filename1, filename2)

        self._wind = None
        if wind is not None:
            self.wind = wind

        # TODO: no need to check for None since properties that are None
        #       are not persisted

        # I think this is required...
        if 'scale_refpoint' in kwargs:
            self.scale_refpoint = kwargs.pop('scale_refpoint')

        super(ComponentMover, self).__init__(**kwargs)

    def __repr__(self):
        """
        unambiguous representation of object
        """
        return 'ComponentMover(filename={0})'.format(self.filename1)

    # Properties
    pat1_angle = property(lambda self: self.mover.pat1_angle,
                          lambda self, val: setattr(self.mover, 'pat1_angle',
                                                    val))

    pat1_speed = property(lambda self: self.mover.pat1_speed,
                          lambda self, val: setattr(self.mover, 'pat1_speed',
                                                    val))

    pat1_speed_units = property(lambda self: self.mover.pat1_speed_units,
                                lambda self, val: setattr(self.mover,
                                                          'pat1_speed_units',
                                                          val))

    pat1_scale_to_value = property(lambda self: self.mover.pat1_scale_to_value,
                                   lambda self, val:
                                   setattr(self.mover, 'pat1_scale_to_value',
                                           val))

    pat2_angle = property(lambda self: self.mover.pat2_angle,
                          lambda self, val: setattr(self.mover, 'pat2_angle',
                                                    val))

    pat2_speed = property(lambda self: self.mover.pat2_speed,
                          lambda self, val: setattr(self.mover, 'pat2_speed',
                                                    val))

    pat2_speed_units = property(lambda self: self.mover.pat2_speed_units,
                                lambda self, val: setattr(self.mover,
                                                          'pat2_speed_units',
                                                          val))

    pat2_scale_to_value = property(lambda self: self.mover.pat2_scale_to_value,
                                   lambda self, val:
                                   setattr(self.mover, 'pat2_scale_to_value',
                                           val))

    scale_by = property(lambda self: self.mover.scale_by,
                        lambda self, val: setattr(self.mover, 'scale_by', val))

    extrapolate = property(lambda self: self.mover.extrapolate,
                           lambda self, val: setattr(self.mover,
                                                     'extrapolate',
                                                     val))

    use_averaged_winds = property(lambda self: self.mover.use_averaged_winds,
                                  lambda self, val:
                                  setattr(self.mover, 'use_averaged_winds',
                                          val))

    wind_power_factor = property(lambda self: self.mover.wind_power_factor,
                                 lambda self, val: setattr(self.mover,
                                                           'wind_power_factor',
                                                           val))

    past_hours_to_average = property(lambda self: (self.mover
                                                   .past_hours_to_average),
                                     lambda self, val:
                                     setattr(self.mover,
                                             'past_hours_to_average', val))

    scale_factor_averaged_winds = property(lambda self: self.mover.scale_factor_averaged_winds,
                                           lambda self, val: setattr(self.mover,
                                                                     'scale_factor_averaged_winds',
                                                                     val))

    use_original_scale_factor = property(lambda self: self.mover.use_original_scale_factor,
                                         lambda self, val: setattr(self.mover,
                                                                   'use_original_scale_factor',
                                                                   val))

    @property
    def scale_refpoint(self):
        return self.mover.ref_point

    @scale_refpoint.setter
    def scale_refpoint(self, val):
        '''
        Must be a tuple of length 2 or 3: (long, lat, z). If only (long, lat)
        is given, the set z = 0
        '''
        if len(val) == 2:
            self.mover.ref_point = (val[0], val[1], 0.)
        else:
            self.mover.ref_point = val

    @property
    def wind(self):
        return self._wind

    @wind.setter
    def wind(self, wind_obj):
        if not isinstance(wind_obj, Wind):
            raise TypeError('wind must be of type environment.Wind')

        self.mover.set_ossm(wind_obj.ossm)
        self._wind = wind_obj

    def get_grid_data(self):
        """
            Invokes the GetToplogyHdl method of TriGridVel_c object
        """
        return self.get_triangles()

    def get_center_points(self):
        return self.get_triangle_center_points()

    def get_scaled_velocities(self, model_time):
        """
        Get file values scaled to ref pt value, with tide applied (if any)
        """
        return self.mover._get_velocity_handle()

    def serialize(self, json_='webapi'):
        """
        Since 'wind' property is saved as a reference when used in save file
        and 'save' option, need to add appropriate node to WindMover schema
        """
        dict_ = self.to_serialize(json_)
        schema = self.__class__._schema()

        if json_ == 'webapi' and 'wind' in dict_:
            schema.add(WindSchema(name='wind'))

        return schema.serialize(dict_)

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for wind object
        """
        schema = cls._schema()

        if 'wind' in json_:
            # for 'webapi', there will be nested Wind structure
            # for 'save' option, there should be no nested 'wind'. It is
            # removed, loaded and added back after deserialization
            schema.add(WindSchema())

        return schema.deserialize(json_)
Ejemplo n.º 22
0
class IceMover(CurrentMoversBase, Serializable):

    _update = ['uncertain_cross', 'uncertain_along',
               'current_scale', 'extrapolate']
    _save = ['uncertain_cross', 'uncertain_along',
             'current_scale', 'extrapolate']
    _state = copy.deepcopy(CurrentMoversBase._state)

    _state.add(update=_update, save=_save)
    _state.add_field([Field('filename', save=True, read=True,
                            isdatafile=True, test_for_eq=False),
                      Field('topology_file', save=True, read=True,
                            isdatafile=True, test_for_eq=False)])

    _schema = IceMoverSchema

    def __init__(self, filename,
                 topology_file=None,
                 extrapolate=False,
                 time_offset=0,
                 **kwargs):
        """
        Initialize an IceMover

        :param filename: absolute or relative path to the data file:
                         could be netcdf or filelist
        :param topology_file=None: absolute or relative path to topology file.
                                   If not given, the IceMover will
                                   compute the topology from the data file.
        :param active_start: datetime when the mover should be active
        :param active_stop: datetime after which the mover should be inactive
        :param current_scale: Value to scale current data
        :param uncertain_duration: how often does a given uncertain element
                                   get reset
        :param uncertain_time_delay: when does the uncertainly kick in.
        :param uncertain_cross: Scale for uncertainty perpendicular to the flow
        :param uncertain_along: Scale for uncertainty parallel to the flow
        :param extrapolate: Allow current data to be extrapolated
                            before and after file data
        :param time_offset: Time zone shift if data is in GMT

        uses super, super(IceMover,self).__init__(\*\*kwargs)
        """

        # NOTE: will need to add uncertainty parameters and other dialog fields
        #       use super with kwargs to invoke base class __init__

        # if child is calling, the self.mover is set by child - do not reset
        if type(self) == IceMover:
            self.mover = CyIceMover()

        if not os.path.exists(filename):
            raise ValueError('Path for current file does not exist: {0}'
                             .format(filename))

        if topology_file is not None:
            if not os.path.exists(topology_file):
                raise ValueError('Path for Topology file does not exist: {0}'
                                 .format(topology_file))

        # check if this is stored with cy_ice_mover?
        self.filename = filename
        self.name = os.path.split(filename)[1]

        # check if this is stored with cy_ice_mover?
        self.topology_file = topology_file
        self.mover.text_read(filename, topology_file)

        self.extrapolate = extrapolate
        self.mover.extrapolate_in_time(extrapolate)

        self.mover.offset_time(time_offset * 3600.)

        super(IceMover, self).__init__(**kwargs)

    def __repr__(self):
        return ('IceMover('
                'uncertain_duration={0.uncertain_duration}, '
                'uncertain_time_delay={0.uncertain_time_delay}, '
                'uncertain_cross={0.uncertain_cross}, '
                'uncertain_along={0.uncertain_along}, '
                'active_start={1.active_start}, '
                'active_stop={1.active_stop}, '
                'on={1.on})'
                .format(self.mover, self))

    def __str__(self):
        return ('IceMover - current _state.\n'
                '  uncertain_duration={0.uncertain_duration}\n'
                '  uncertain_time_delay={0.uncertain_time_delay}\n'
                '  uncertain_cross={0.uncertain_cross}\n'
                '  uncertain_along={0.uncertain_along}\n'
                '  active_start time={1.active_start}\n'
                '  active_stop time={1.active_stop}\n'
                '  current on/off status={1.on}'
                .format(self.mover, self))

    # Define properties using lambda functions: uses lambda function, which are
    # accessible via fget/fset as follows:
    uncertain_cross = property(lambda self: self.mover.uncertain_cross,
                               lambda self, val: setattr(self.mover,
                                                         'uncertain_cross',
                                                         val))

    uncertain_along = property(lambda self: self.mover.uncertain_along,
                               lambda self, val: setattr(self.mover,
                                                         'uncertain_along',
                                                         val))

    current_scale = property(lambda self: self.mover.current_scale,
                             lambda self, val: setattr(self.mover,
                                                       'current_scale',
                                                       val))

    extrapolate = property(lambda self: self.mover.extrapolate,
                           lambda self, val: setattr(self.mover,
                                                     'extrapolate',
                                                     val))

    time_offset = property(lambda self: self.mover.time_offset / 3600.,
                           lambda self, val: setattr(self.mover,
                                                     'time_offset',
                                                     val * 3600.))

    def get_grid_data(self):
        if self.mover._is_triangle_grid():
            return self.get_triangles()
        else:
            return self.get_cells()

    def get_grid_bounding_box(self, grid_data=None, box_to_merge=None):
        '''
            Return a bounding box surrounding the grid data.

            :param grid_data: The point data of our grid.
            :type grid_data: A sequence of 3-tuples or 4-tuples containing
                             (long, lat) pairs.

            :param box_to_merge: A bounding box to surround in combination
                                 with our grid data.  This allows us to pad
                                 the bounding box that we generate.
            :type box_to_merge: A bounding box (extent) of the form:
                                ((left, bottom),
                                 (right, top))
        '''
        if grid_data is None:
            grid_data = self.get_grid_data()

        dtype = grid_data.dtype.descr
        unstructured_type = dtype[0][1]

        longs = (grid_data
                 .view(dtype=unstructured_type)
                 .reshape(-1, len(dtype))[:, 0])
        lats = (grid_data
                .view(dtype=unstructured_type)
                .reshape(-1, len(dtype))[:, 1])

        left, right = longs.min(), longs.max()
        bottom, top = lats.min(), lats.max()

        if (box_to_merge is not None and
                len(box_to_merge) == 2 and
                [len(p) for p in box_to_merge] == [2, 2]):
            if left > box_to_merge[0][0]:
                left = box_to_merge[0][0]

            if right < box_to_merge[1][0]:
                right = box_to_merge[1][0]

            if bottom > box_to_merge[0][1]:
                bottom = box_to_merge[0][1]

            if top < box_to_merge[1][1]:
                top = box_to_merge[1][1]

        return ((left, bottom), (right, top))

    def get_center_points(self):
        if self.mover._is_triangle_grid():
            return self.get_triangle_center_points()
        else:
            return self.get_cell_center_points()

    def get_scaled_velocities(self, model_time):
        """
        :param model_time=0:
        """
        num_tri = self.mover.get_num_triangles()

        if self.mover._is_triangle_grid():
            num_cells = num_tri
        else:
            num_cells = num_tri / 2

        vels = np.zeros(num_cells, dtype=basic_types.velocity_rec)
        self.mover.get_scaled_velocities(model_time, vels)

        return vels

    def get_ice_velocities(self, model_time):
        """
        :param model_time=0:
        """
        num_tri = self.mover.get_num_triangles()

        vels = np.zeros(num_tri, dtype=basic_types.velocity_rec)
        self.mover.get_ice_velocities(model_time, vels)

        return vels

    def get_movement_velocities(self, model_time):
        """
        :param model_time=0:
        """
        num_tri = self.mover.get_num_triangles()

        vels = np.zeros(num_tri, dtype=basic_types.velocity_rec)
        self.mover.get_movement_velocities(model_time, vels)

        return vels

    def get_ice_fields(self, model_time):
        """
        :param model_time=0:
        """
        num_tri = self.mover.get_num_triangles()
        num_cells = num_tri / 2

        frac_coverage = np.zeros(num_cells, dtype=np.float64)
        thickness = np.zeros(num_cells, dtype=np.float64)

        self.mover.get_ice_fields(model_time, frac_coverage, thickness)

        return frac_coverage, thickness

    def export_topology(self, topology_file):
        """
        :param topology_file=None: absolute or relative path where
                                   topology file will be written.
        """
        if topology_file is None:
            raise ValueError('Topology file path required: {0}'
                             .format(topology_file))

        self.mover.export_topology(topology_file)

    def extrapolate_in_time(self, extrapolate):
        """
        :param extrapolate=false: allow current data to be extrapolated
                                  before and after file data.
        """
        self.mover.extrapolate_in_time(extrapolate)
        self.extrapolate = extrapolate

    def offset_time(self, time_offset):
        """
        :param offset_time=0: allow data to be in GMT with a time zone offset
                              (hours).
        """
        self.mover.offset_time(time_offset * 3600.)

    def get_offset_time(self):
        """
        :param offset_time=0: allow data to be in GMT with a time zone offset
                              (hours).
        """
        return (self.mover.get_offset_time()) / 3600.
Ejemplo n.º 23
0
class GridCurrentMover(CurrentMoversBase, Serializable):

    _update = ['uncertain_cross', 'uncertain_along',
               'current_scale', 'extrapolate', 'time_offset']
    _save = ['uncertain_cross', 'uncertain_along',
             'current_scale', 'extrapolate', 'time_offset']
    _state = copy.deepcopy(CurrentMoversBase._state)

    _state.add(update=_update, save=_save)
    _state.add_field([Field('filename', save=True, read=True,
                            isdatafile=True, test_for_eq=False),
                      Field('topology_file', save=True, read=True,
                            isdatafile=True, test_for_eq=False),
                      Field('is_data_on_cells', save=False, read=True)])

    _schema = GridCurrentMoverSchema

    def __init__(self, filename,
                 topology_file=None,
                 extrapolate=False,
                 time_offset=0,
                 current_scale=1,
                 uncertain_along=0.5,
                 uncertain_across=0.25,
                 num_method='Euler',
                 **kwargs):
        """
        Initialize a GridCurrentMover

        :param filename: absolute or relative path to the data file:
                         could be netcdf or filelist
        :param topology_file=None: absolute or relative path to topology file.
                                   If not given, the GridCurrentMover will
                                   compute the topology from the data file.
        :param active_start: datetime when the mover should be active
        :param active_stop: datetime after which the mover should be inactive
        :param current_scale: Value to scale current data
        :param uncertain_duration: how often does a given uncertain element
                                   get reset
        :param uncertain_time_delay: when does the uncertainly kick in.
        :param uncertain_cross: Scale for uncertainty perpendicular to the flow
        :param uncertain_along: Scale for uncertainty parallel to the flow
        :param extrapolate: Allow current data to be extrapolated
                            before and after file data
        :param time_offset: Time zone shift if data is in GMT
        :param num_method: Numerical method for calculating movement delta.
                           Default Euler
                           option: Runga-Kutta 4 (RK4)

        uses super, super(GridCurrentMover,self).__init__(\*\*kwargs)
        """
        # if child is calling, the self.mover is set by child - do not reset
        if type(self) == GridCurrentMover:
            self.mover = CyGridCurrentMover()

        if not os.path.exists(filename):
            raise ValueError('Path for current file does not exist: {0}'
                             .format(filename))

        if topology_file is not None:
            if not os.path.exists(topology_file):
                raise ValueError('Path for Topology file does not exist: {0}'
                                 .format(topology_file))

        super(GridCurrentMover, self).__init__(**kwargs)

        # check if this is stored with cy_gridcurrent_mover?
        self.filename = filename
        self.name = os.path.split(filename)[1]

        # check if this is stored with cy_gridcurrent_mover?
        self.topology_file = topology_file
        self.current_scale = current_scale
        self.uncertain_along = uncertain_along
        self.uncertain_across = uncertain_across

        self.mover.text_read(filename, topology_file)
        self.mover.extrapolate_in_time(extrapolate)
        self.mover.offset_time(time_offset * 3600.)

        if type(self) != CurrentCycleMover:
            self.real_data_start = sec_to_datetime(self.mover.get_start_time())
            self.real_data_stop = sec_to_datetime(self.mover.get_end_time())

        self.num_method = num_method

        # super(GridCurrentMover, self).__init__(**kwargs)

        if self.topology_file is None:
            self.topology_file = filename + '.dat'
            self.export_topology(self.topology_file)

    def __repr__(self):
        return ('GridCurrentMover('
                'uncertain_duration={0.uncertain_duration},'
                'uncertain_time_delay={0.uncertain_time_delay}, '
                'uncertain_cross={0.uncertain_cross}, '
                'uncertain_along={0.uncertain_along}, '
                'active_start={1.active_start}, '
                'active_stop={1.active_stop}, '
                'on={1.on})'
                .format(self.mover, self))

    def __str__(self):
        return ('GridCurrentMover - current _state.\n'
                '  uncertain_duration={0.uncertain_duration}\n'
                '  uncertain_time_delay={0.uncertain_time_delay}\n'
                '  uncertain_cross={0.uncertain_cross}\n'
                '  uncertain_along={0.uncertain_along}\n'
                '  active_start time={1.active_start}\n'
                '  active_stop time={1.active_stop}\n'
                '  current on/off status={1.on}'
                .format(self.mover, self))

    # Define properties using lambda functions: uses lambda function, which are
    # accessible via fget/fset as follows:
    uncertain_cross = property(lambda self: self.mover.uncertain_cross,
                               lambda self, val: setattr(self.mover,
                                                         'uncertain_cross',
                                                         val))

    uncertain_along = property(lambda self: self.mover.uncertain_along,
                               lambda self, val: setattr(self.mover,
                                                         'uncertain_along',
                                                         val))

    current_scale = property(lambda self: self.mover.current_scale,
                             lambda self, val: setattr(self.mover,
                                                       'current_scale',
                                                       val))

    extrapolate = property(lambda self: self.mover.extrapolate,
                           lambda self, val: setattr(self.mover,
                                                     'extrapolate',
                                                     val))

    time_offset = property(lambda self: self.mover.time_offset / 3600.,
                           lambda self, val: setattr(self.mover,
                                                     'time_offset',
                                                     val * 3600.))

    @property
    def num_method(self):
        return self._num_method

    @num_method.setter
    def num_method(self, val):
        self.mover.num_method = val
        self._num_method = val

    @property
    def is_data_on_cells(self):
        return self.mover._is_data_on_cells()

    def get_grid_data(self):
        """
            The main function for getting grid data from the mover
        """
        if self.mover._is_triangle_grid():
            return self.get_triangles()
        else:
            return self.get_cells()

    def get_center_points(self):
        if self.mover._is_triangle_grid():
            if self.mover._is_data_on_cells():
                return self.get_triangle_center_points()
            else:
                return self.get_points()
        else:
            return self.get_cell_center_points()

    def get_scaled_velocities(self, time):
        """
        :param model_time=0:
        """
        num_tri = self.mover.get_num_triangles()
        # will need to update this for regular grids
        if self.mover._is_triangle_grid():
            if self.mover._is_data_on_cells():
                num_cells = num_tri
            else:
                num_vertices = self.mover.get_num_points()
                num_cells = num_vertices
        else:
            num_cells = num_tri / 2

        vels = np.zeros(num_cells, dtype=basic_types.velocity_rec)

        self.mover.get_scaled_velocities(time, vels)

        return vels

    def export_topology(self, topology_file):
        """
        :param topology_file=None: absolute or relative path where
                                   topology file will be written.
        """
        if topology_file is None:
            raise ValueError('Topology file path required: {0}'
                             .format(topology_file))

        self.mover.export_topology(topology_file)

    def extrapolate_in_time(self, extrapolate):
        """
        :param extrapolate=false: allow current data to be extrapolated
                                  before and after file data.
        """
        self.mover.extrapolate_in_time(extrapolate)

    def offset_time(self, time_offset):
        """
        :param offset_time=0: allow data to be in GMT with a time zone offset
                              (hours).
        """
        self.mover.offset_time(time_offset * 3600.)

    def get_offset_time(self):
        """
        :param offset_time=0: allow data to be in GMT with a time zone offset
                              (hours).
        """
        return self.mover.get_offset_time() / 3600.

    def get_start_time(self):
        """
        :this will be the real_data_start time (seconds).
        """
        return self.mover.get_start_time()

    def get_end_time(self):
        """
        :this will be the real_data_stop time (seconds).
        """
        return self.mover.get_end_time()

    def get_num_method(self):
        return self.mover.num_method
Ejemplo n.º 24
0
class Waves(Environment, serializable.Serializable):
    """
    class to compute the wave height for a time series

    At the moment, it only does a single point, non spatially
    variable, but may be extended in the future
    """
    _ref_as = 'waves'
    _state = copy.deepcopy(Environment._state)
    _state += [
        Field('water', save=True, update=True, save_reference=True),
        Field('wind', save=True, update=True, save_reference=True)
    ]
    _schema = WavesSchema

    _state['name'].test_for_eq = False

    def __init__(self, wind=None, water=None, **kwargs):
        """
        wind and water must be set before running the model; however, these
        can be set after object construction

        :param wind: A wind object to get the wind speed.
                     This should be a moving average wind object.
        :type wind: a Wind type, or equivelent

        :param water: water properties, specifically fetch and wave height
        :type water: environment.Water object.

        .. note:: must take **kwargs since base class supports more inputs like
            'name'. The new_from_dict() alternate constructor will invoke
            __init__ will arguments that supported by baseclass
        """

        self.wind = wind
        self.water = water

        # turn off make_default_refs if references are defined and
        # make_default_refs is False
        if self.water is not None and self.wind is not None:
            kwargs['make_default_refs'] = \
                kwargs.pop('make_default_refs', False)

        super(Waves, self).__init__(**kwargs)

    # def update_water(self):
    #     """
    #     updates values from water object

    #     this should be called when you want to make sure new data is Used

    #     note: yes, this is kludgy, but it avoids calling self.water.fetch all over the place
    #     """
    #     self.wave_height = self.water.wave_height
    #     self.fetch = self.water.fetch
    #     self.density = self.water.density

    def get_value(self, time):
        """
        return the rms wave height, peak period and percent wave breaking
        at a given time. Does not currently support location-variable waves.

        :param time: the time you want the wave data for 
        :type time: datetime.datetime object

        :returns: wave_height, peak_period, whitecap_fraction, dissipation_energy

        Units:
          wave_height: meters (RMS height)
          peak_perid: seconds
          whitecap_fraction: unit-less fraction
          dissipation_energy: not sure!! # fixme!
        """
        # make sure are we are up to date with water object
        wave_height = self.water.wave_height

        if wave_height is None:
            U = self.wind.get_value(time)[0]  # only need velocity
            H = self.compute_H(U)
        else:  # user specified a wave height
            H = wave_height
            U = self.comp_psuedo_wind(H)
        Wf = self.comp_whitecap_fraction(U)
        T = self.comp_period(U)

        De = self.disp_wave_energy(H)

        return H, T, Wf, De

    def get_emulsification_wind(self, time):
        """
        Return the right wind for the wave climate

        If a wave height was specified, then you need the greater of the real or
        psuedo wind. 

        If not, then you need the actual wind.

        The idea here is that if there is a low wind, but the user specified waves,
        we really want emulsification that makes sense for the waves. But if the
        actual wind is stronger than that for the wave height give, we should use
        the actual wind.

        fixme: I'm not sure this is right -- if we stick with the wave energy given
        by the user for dispersion, why not for emulsification?

        """
        wave_height = self.water.wave_height
        U = self.wind.get_value(time)[0]  # only need velocity
        if wave_height is None:
            return U
        else:  # user specified a wave height
            return max(U, self.comp_psuedo_wind(wave_height))

    # def get_pseudo_wind(self, time):
    #     wave_height = self.water.wave_height
    #     if wave_height is None:
    #         U = self.wind.get_value(time)[0]  # only need velocity
    #         H = self.compute_H(U)
    #     else:  # user specified a wave height
    #         H = wave_height
    #     U = self.comp_psuedo_wind(H)

    #     return U

    def compute_H(self, U):
        """
        compute the wave height

        :param U: wind speed
        :type U: floating point number in m/s units

        :returns Hrms: RMS wave height in meters
        """
        fetch = self.water.fetch
        ## wind stress factor
        ## Transition at U = 4.433049525859078 for linear scale with wind speed.
        ##   4.433049525859078 is where the solutions match
        ws = 0.71 * U**1.23 if U < 4.433049525859078 else U  # wind stress factor

        # 2268*ws**2 is limit of fetch limited case.
        if (fetch
                is not None) and (fetch < 2268 * ws**2):  # fetch limited case
            H = 0.0016 * sqrt(fetch / g) * ws
        else:  # fetch unlimited
            H = 0.243 * ws * ws / g

        Hrms = 0.707 * H

        # arbitrary limit at 30 m -- about the largest waves recorded
        # fixme -- this really depends on water depth -- should take that into account?
        return Hrms if Hrms < 30.0 else 30.0

    def comp_psuedo_wind(self, H):
        """
        Compute the wind speed to use for the whitecap fraction

        Used if the wave height is specified.

        Unlimited fetch is assumed: this is the reverse of compute_H

        :param H: given wave height.
        """

        ##U_h = 2.0286*g*sqrt(H/g) # Bill's version
        U_h = sqrt(g * H / 0.243)
        if U_h < 4.433049525859078:  # check if low wind case
            U_h = (U_h / 0.71)**0.813008
        return U_h

    def comp_whitecap_fraction(self, U):
        """
        compute the white capping fraction

        This and wave height drives dispersion

        This based on the formula in:
        Lehr and Simecek-Beatty
        The Relation of Langmuir Circulation Processes to the Standard Oil Spill Spreading, Dispersion and Transport Algorithms
        Spill Sci. and Tech. Bull, 6:247-253 (2000)
        (maybe typo -- didn't match)

        Should look in:  Ocean Waves Breaking and Marine Aerosol Fluxes
                         By Stanislaw R. Massel

        """

        ## Monahan(JPO, 1971) time constant characterizing exponential whitecap decay.
        ## The saltwater value for   is 3.85 sec while the freshwater value is 2.54 sec.
        #  interpolate with salinity:
        Tm = 0.03742857 * self.water.salinity + 2.54

        if U < 4.0:  # m/s
            ## linear fit from 0 to the 4m/s value from Ding and Farmer
            ## maybe should be a exponential / quadratic fit?
            ## or zero less than 3, then a sharp increase to 4m/s?
            fw = (0.0125 * U) / Tm
        else:
            #fw = (0.01*U + 0.01) / Tm  # Ding and Farmer (JPO 1994)
            # old ADIOS had a .5 factor - not sure why but we'll keep it for now
            fw = .5 * (0.01 * U + 0.01) / Tm  # Ding and Farmer (JPO 1994)

        return fw if fw <= 1.0 else 1.0  # only with U > 200m/s!

    def comp_period(self, U):
        """
        Compute the mean wave period 
        """
        # wind stress factor
        ## fixme: check for discontinuity at large fetch..
        ##        Is this s bit low??? 32 m/s -> T=15.7 s
        wave_height = self.water.wave_height
        fetch = self.water.wave_height
        if wave_height is None:
            ws = U * 0.71 * U**1.23  ## fixme -- linear for large windspeed?
            if (fetch is None) or (fetch >= 2268 * ws**2):  # fetch unlimited
                T = 0.83 * ws
            else:
                T = 0.06238 * (fetch * ws)**0.3333333333  # eq 3-34 (SPM?)
        else:  # user-specified wave height
            T = 7.508 * sqrt(wave_height)
        return T

    def disp_wave_energy(self, H):
        """
        Compute the dissipative wave energy
        """
        # fixme: does this really only depend on height?
        return 0.0034 * self.water.density * g * H**2

    def serialize(self, json_='webapi'):
        """
        Since 'wind'/'water' property is saved as references in save file
        need to add appropriate node to WindMover schema for 'webapi'
        """
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()
        if json_ == 'webapi':
            if self.wind:
                # add wind schema
                schema.add(WindSchema(name='wind'))
            if self.water:
                schema.add(WaterSchema(name='water'))

        serial = schema.serialize(toserial)

        return serial

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for wind object
        """
        schema = cls._schema()
        if 'wind' in json_:
            schema.add(WindSchema(name='wind'))

        if 'water' in json_:
            schema.add(WaterSchema(name='water'))
        _to_dict = schema.deserialize(json_)

        return _to_dict

# wind.get_timeseries(self, datetime=None, units=None, format='r-theta')

    def prepare_for_model_run(self, model_time):
        if self.wind is None:
            msg = "wind object not defined for " + self.__class__.__name__
            raise ReferencedObjectNotSet(msg)

        if self.water is None:
            msg = "water object not defined for " + self.__class__.__name__
            raise ReferencedObjectNotSet(msg)
Ejemplo n.º 25
0
class CurrentCycleMover(GridCurrentMover, Serializable):
    _state = copy.deepcopy(GridCurrentMover._state)
    _state.add_field([Field('tide', save=True, update=True,
                            save_reference=True)])

    _schema = CurrentCycleMoverSchema

    def __init__(self,
                 filename,
                 topology_file=None,
                 **kwargs):
        """
        Initialize a CurrentCycleMover

        :param filename: Absolute or relative path to the data file:
                         could be netcdf or filelist
        :param topology_file=None: Absolute or relative path to topology file.
                                   If not given, the GridCurrentMover will
                                   compute the topology from the data file.
        :param tide: A gnome.environment.Tide object to be attached to
                     CatsMover
        :param active_start: datetime when the mover should be active
        :param active_stop: datetime after which the mover should be inactive
        :param current_scale: Value to scale current data
        :param uncertain_duration: How often does a given uncertain element
                                   get reset
        :param uncertain_time_delay: when does the uncertainly kick in.
        :param uncertain_cross: Scale for uncertainty perpendicular to the flow
        :param uncertain_along: Scale for uncertainty parallel to the flow
        :param extrapolate: Allow current data to be extrapolated
                            before and after file data
        :param time_offset: Time zone shift if data is in GMT

        uses super: super(CurrentCycleMover,self).__init__(**kwargs)
        """

        # NOTE: will need to add uncertainty parameters
        #       and other dialog fields.
        #       use super with kwargs to invoke base class __init__
        self.mover = CyCurrentCycleMover()

        self._tide = None

        tide = kwargs.pop('tide', None)
        if tide is not None:
            self.tide = tide

        super(CurrentCycleMover, self).__init__(filename=filename,
                                                topology_file=topology_file,
                                                **kwargs)

    def __repr__(self):
        return ('GridCurrentMover(uncertain_duration={0.uncertain_duration}, '
                'uncertain_time_delay={0.uncertain_time_delay}, '
                'uncertain_cross={0.uncertain_cross}, '
                'uncertain_along={0.uncertain_along}, '
                'active_start={1.active_start}, '
                'active_stop={1.active_stop}, '
                'on={1.on})'
                .format(self.mover, self))

    def __str__(self):
        return ('GridCurrentMover - current _state.\n'
                '  uncertain_duration={0.uncertain_duration}\n'
                '  uncertain_time_delay={0.uncertain_time_delay}\n'
                '  uncertain_cross={0.uncertain_cross}\n'
                '  uncertain_along={0.uncertain_along}'
                '  active_start time={1.active_start}'
                '  active_stop time={1.active_stop}'
                '  current on/off status={1.on}'
                .format(self.mover, self))

    @property
    def tide(self):
        return self._tide

    @tide.setter
    def tide(self, tide_obj):
        if not isinstance(tide_obj, Tide):
            raise TypeError('tide must be of type environment.Tide')

        if isinstance(tide_obj.cy_obj, CyShioTime):
            self.mover.set_shio(tide_obj.cy_obj)
        elif isinstance(tide_obj.cy_obj, CyOSSMTime):
            self.mover.set_ossm(tide_obj.cy_obj)
        else:
            raise TypeError('Tide.cy_obj attribute must be either '
                            'CyOSSMTime or CyShioTime type '
                            'for CurrentCycleMover.')

        self._tide = tide_obj

    @property
    def is_data_on_cells(self):
        return None

    def serialize(self, json_='webapi'):
        """
        Since 'tide' property is saved as a reference when used in save file
        and 'save' option, need to add appropriate node to
        CurrentCycleMover schema
        """
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()

        if json_ == 'webapi' and 'tide' in toserial:
            schema.add(TideSchema(name='tide'))

        return schema.serialize(toserial)

    @classmethod
    def deserialize(cls, json_):
        """
        append correct schema for tide object
        """
        schema = cls._schema()

        if 'tide' in json_:
            schema.add(TideSchema())

        return schema.deserialize(json_)
Ejemplo n.º 26
0
class Outputter(Serializable):
    '''
    base class for all outputters
    Since this outputter doesn't do anything, it'll never be used as part
    of a gnome model. As such, it should never need to be serialized
    '''
    _state = copy.deepcopy(Serializable._state)
    _state += (Field('on', save=True, update=True),
               Field('output_zero_step', save=True, update=True),
               Field('output_last_step', save=True, update=True),
               Field('output_timestep', save=True, update=True))
    _schema = BaseSchema

    def __init__(self,
                 cache=None,
                 on=True,
                 output_timestep=None,
                 output_zero_step=True,
                 output_last_step=True,
                 name=None):
        """
        sets attributes for all outputters, like output_timestep, cache

        :param cache: sets the cache object from which to read data. The model
            will automatically set this param

        :param output_timestep: default is None in which case every time the
            write_output is called, output is written. If set, then output is
            written every output_timestep starting from model_start_time.
        :type output_timestep: timedelta object

        :param output_zero_step: default is True. If True then output for
            initial step (showing initial release conditions) is written
            regardless of output_timestep
        :type output_zero_step: boolean

        :param output_last_step: default is True. If True then output for
            final step is written regardless of output_timestep
        :type output_last_step: boolean
        """
        self.cache = cache
        self.on = on
        self.output_zero_step = output_zero_step
        self.output_last_step = output_last_step
        if output_timestep:
            self._output_timestep = int(output_timestep.total_seconds())
        else:
            self._output_timestep = None
        self.sc_pair = None     # set in prepare_for_model_run

        if name:
            self.name = name

        # reset internally used variables
        self.rewind()

    @property
    def output_timestep(self):
        if self._output_timestep is not None:
            return timedelta(seconds=self._output_timestep)
        else:
            return None

    @output_timestep.setter
    def output_timestep(self, value):
        '''
        make it a property so internally we keep it in seconds, easier to work
        with but let user set it as a timedelta object since that's probably
        easier for user
        '''
        if value is None:
            self._output_timestep = None
        else:
            self._output_timestep = value.seconds

    def prepare_for_model_run(self,
                              model_start_time=None,
                              spills=None,
                              **kwargs):
        """
        This method gets called by the model at the beginning of a new run.
        Do what you need to do to prepare.

        :param model_start_time: (Required) start time of the model run. NetCDF
            time units calculated with respect to this time.
        :type model_start_time: datetime.datetime object
        :param spills: (Required) model.spills object (SpillContainerPair)
        :type spills: gnome.spill_container.SpillContainerPair object

        Optional argument - in case cache needs to be updated

        :param cache=None: Sets the cache object to be used for the data.
            If None, it will use the one already set up.
        :type cache: As defined in cache module (gnome.utilities.cache).
            Currently only ElementCache is defined/used.

        also added **kwargs since a derived class like NetCDFOutput could
        require additional variables.

        .. note:: base class doesn't use model_start_time or spills, but
        multiple outputters need spills and netcdf needs model_start_time,
        so just set them here
        """
        self._model_start_time = model_start_time
        self.sc_pair = spills
        cache = kwargs.pop('cache', None)
        if cache is not None:
            self.cache = cache

        if self._output_timestep is None:
            self._write_step = True

        self._dt_since_lastoutput = 0

    def prepare_for_model_step(self, time_step, model_time):
        """
        This method gets called by the model at the beginning of each time step
        Do what you need to do to prepare for a new model step

        base class method checks to see if data for model_time should be output
        Set self._write_step flag to true if:
            model_time < self._dt_since_lastoutput <= model_time + time_step

        It also updates the _dt_since_lastoutput internal variable if the data
        from this step will be written to output

        :param time_step: time step in seconds
        :param model_time: current model time as datetime object

        .. note:: The write_output() method will be called after the Model
        calls model_step_is_done(). Let's set the _write_step flag here and
        update the _dt_since_lastoutput variable

        """
        if self._output_timestep is not None:
            self._write_step = False
            self._dt_since_lastoutput += time_step
            if self._dt_since_lastoutput >= self._output_timestep:
                self._write_step = True
                self._dt_since_lastoutput = (self._dt_since_lastoutput %
                                             self._output_timestep)

    def model_step_is_done(self):
        '''
        This method gets called by the model when after everything else is done
        in a time step. Put any code need for clean-up, etc.
        The write_output method is called by Model after all processing.
        '''
        pass

    def write_output(self, step_num, islast_step=False):
        """
        called by the model at the end of each time step
        This is the last operation after model_step_is_done()

        :param step_num: the model step number you want rendered.
        :type step_num: int

        :param islast_step: default is False. Flag that indicates that step_num
            is last step. If 'output_last_step' is True then this is written
            out
        :type islast_step: bool

        """
        if (step_num == 0 and self.output_zero_step):
            self._write_step = True

        if (islast_step and self.output_last_step):
            self._write_step = True

        if (self._write_step and self.cache is None):
            raise ValueError('cache object is not defined. It is required'
                             ' prior to calling write_output')

    def rewind(self):
        '''
        Called by model.rewind()

        Reset variables set during prepare_for_model_run() to init conditions
        Make sure all child classes call parent rewind() first!
        '''
        self._model_start_time = None
        self._dt_since_lastoutput = None
        self._write_step = True

    def write_output_post_run(self, model_start_time, num_time_steps,
                              **kwargs):
        """
        If the model has already been run and the data is cached, then use
        this function to write output. In this case, num_time_steps is known
        so pass it into this function.

        :param model_start_time: (Required) start time of the model run. NetCDF
            time units calculated with respect to this time.
        :type model_start_time: datetime.datetime object

        :param num_time_steps: (Required) total number of time steps for the
            run. Currently this is known and fixed.
        :type num_time_steps: int

        Optional argument - depending on the outputter, the following maybe
        required. For instance, the 'spills' are required by NetCDFOutput,
        GeoJson, but not Renderer in prepare_for_model_run(). The **kwargs here
        are those required by prepare_for_model_run() for an outputter

        :param cache=None: Sets the cache object to be used for the data.
            If None, it will use the one already set up.
        :type cache: As defined in cache module (gnome.utilities.cache).
            Currently only ElementCache is defined/used.

        :param uncertain: is there uncertain data to write. Used by
            NetCDFOutput to setup attributes for uncertain data file
        :type uncertain: bool

        :param spills: SpillContainerPair object containing spill information
            Used by both the NetCDFOutput and by GeoJson to obtain spill_id
            from spill_num
        :type spills: This is the Model's spills attribute which refers to the
            SpillContainerPair object

        Follows the iteration in Model().step() for each step_num
        """
        self.prepare_for_model_run(model_start_time, **kwargs)
        model_time = model_start_time
        last_step = False

        for step_num in range(num_time_steps):
            if (step_num > 0 and step_num < num_time_steps - 1):
                next_ts = (self.cache.load_timestep(step_num).items()[0].
                           current_time_stamp)
                ts = next_ts - model_time
                self.prepare_for_model_step(ts.seconds, model_time)

            if step_num == num_time_steps - 1:
                last_step = True

            self.write_output(step_num, last_step)
            model_time = (self.cache.load_timestep(step_num).items()[0].
                          current_time_stamp)
Ejemplo n.º 27
0
class WeatheringOutput(Outputter, Serializable):
    '''
    class that outputs GNOME weathering results.
    The output is the aggregation of properties for all LEs (aka Mass Balance)
    for a particular time step.
    There are a number of different things we would like to graph:
    - Evaporation
    - Dissolution
    - Dissipation
    - Biodegradation
    - ???

    However at this time we will simply try to implement an outputter for the
    halflife Weatherer.
    Following is the output format.

        {
        "type": "WeatheringGraphs",
        "half_life": {"properties": {"mass_components": <Component values>,
                                     "mass": <total Mass value>,
                                     }
                      },
            ...
        }

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state += [Field('output_dir', update=True, save=True)]
    _schema = WeatheringOutputSchema

    def __init__(
            self,
            output_dir=None,  # default is to not output to file
            **kwargs):
        '''
        :param str output_dir='./': output directory for geojson files

        use super to pass optional \*\*kwargs to base class __init__ method
        '''
        self.output_dir = output_dir
        self.units = {
            'default': 'kg',
            'avg_density': 'kg/m^3',
            'avg_viscosity': 'm^2/s'
        }
        super(WeatheringOutput, self).__init__(**kwargs)

    def write_output(self, step_num, islast_step=False):
        '''
        Weathering data is only output for forecast spill container, not
        the uncertain spill container. This is because Weathering has its
        own uncertainty and mixing the two was giving weird results. The
        cloned models that are modeling weathering uncertainty do not include
        the uncertain spill container.
        '''
        super(WeatheringOutput, self).write_output(step_num, islast_step)

        if not self._write_step:
            return None

        # return a dict - json of the mass_balance data
        # weathering outputter should only apply to forecast spill_container
        sc = self.cache.load_timestep(step_num).items()[0]
        dict_ = {}
        dict_.update(sc.mass_balance)

        output_info = {'time_stamp': sc.current_time_stamp.isoformat()}
        output_info.update(dict_)
        self.logger.debug(self._pid + 'step_num: {0}'.format(step_num))
        for name, val in dict_.iteritems():
            msg = ('\t{0}: {1}'.format(name, val))
            self.logger.debug(msg)

        if self.output_dir:
            output_filename = self.output_to_file(output_info, step_num)
            output_info.update({'output_filename': output_filename})

        return output_info

    def output_to_file(self, json_content, step_num):
        file_format = 'mass_balance_{0:06d}.json'
        filename = os.path.join(self.output_dir, file_format.format(step_num))

        with open(filename, 'w') as outfile:
            dump(json_content, outfile, indent=True)

        return filename

    def clean_output_files(self):
        if self.output_dir:
            files = glob(os.path.join(self.output_dir, 'mass_balance_*.json'))
            for f in files:
                os.remove(f)

    def rewind(self):
        'remove previously written files'
        super(WeatheringOutput, self).rewind()
        self.clean_output_files()
Ejemplo n.º 28
0
class KMZOutput(Outputter, Serializable):
    '''
    class that outputs GNOME results in a kmz format.

    Suitable for Google Earth, and semi-suitable for MarPlot

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state += [
        Field('filename', update=True, save=True),
    ]
    _schema = KMZSchema

    time_formatter = '%m/%d/%Y %H:%M'

    def __init__(self, filename, **kwargs):
        '''
        :param str output_dir=None: output directory for kmz files.

        uses super to pass optional \*\*kwargs to base class __init__ method
        '''
        ## a little check:
        self._check_filename(filename)
        # strip off the .kml or .kmz
        filename = filename.rstrip(".kml").rstrip(".kmz")

        self.filename = filename + ".kmz"
        self.kml_name = os.path.split(filename)[-1] + ".kml"

        super(KMZOutput, self).__init__(**kwargs)

    def prepare_for_model_run(self, model_start_time, spills, **kwargs):
        """
        .. function:: prepare_for_model_run(model_start_time,
                                            cache=None,
                                            uncertain=False,
                                            spills=None,
                                            **kwargs)

        Write the headers, png files, etc for the KMZ file

        This must be done in prepare_for_model_run because if model _state
        changes, it is rewound and re-run from the beginning.

        If there are existing output files, they are deleted here.

        This takes more than standard 'cache' argument. Some of these are
        required arguments - they contain None for defaults because non-default
        argument cannot follow default argument. Since cache is already 2nd
        positional argument for Renderer object, the required non-default
        arguments must be defined following 'cache'.

        If uncertainty is on, then SpillContainerPair object contains
        identical _data_arrays in both certain and uncertain SpillContainer's,
        the data itself is different, but they contain the same type of data
        arrays. If uncertain, then data arrays for uncertain spill container
        are written to the KMZ file.

        .. note::
            Does not take any other input arguments; however, to keep the
            interface the same for all outputters, define kwargs in case
            future outputters require different arguments.
        """
        super(KMZOutput, self).prepare_for_model_run(model_start_time, spills,
                                                     **kwargs)

        if not self.on:
            return

        self.delete_output_files()
        # shouldn't be required if the above worked!
        self._file_exists_error(self.filename)

        # create a list to hold what will be the contents of the kml
        self.kml = [
            kmz_templates.header_template.format(
                caveat=kmz_templates.caveat,
                kml_name=self.kml_name,
                valid_timestring=model_start_time.strftime(
                    self.time_formatter),
                issued_timestring=datetime.now().strftime(self.time_formatter),
            )
        ]

        # # netcdf outputter has this --  not sure why
        # self._middle_of_run = True

    def write_output(self, step_num, islast_step=False):
        """dump a timestep's data into the kmz file"""

        super(KMZOutput, self).write_output(step_num, islast_step)

        if not self.on or not self._write_step:
            return None

        # add to the kml list:
        for sc in self.cache.load_timestep(
                step_num).items():  # loop through uncertain and certain LEs
            ## extract the data
            start_time = sc.current_time_stamp
            if self.output_timestep is None:
                end_time = start_time + timedelta(seconds=self.model_timestep)
            else:
                end_time = start_time + self.output_timestep
            start_time = start_time.isoformat()
            end_time = end_time.isoformat()

            positions = sc['positions']
            water_positions = positions[sc['status_codes'] ==
                                        oil_status.in_water]
            beached_positions = positions[sc['status_codes'] ==
                                          oil_status.on_land]

            data_dict = {
                'certain': "Uncertainty" if sc.uncertain else "Best Guess",
            }
            self.kml.append(
                kmz_templates.build_one_timestep(water_positions,
                                                 beached_positions, start_time,
                                                 end_time, sc.uncertain))

        if islast_step:  # now we really write the file:
            self.kml.append(kmz_templates.footer)
            with zipfile.ZipFile(self.filename,
                                 'w',
                                 compression=zipfile.ZIP_DEFLATED) as kmzfile:
                kmzfile.writestr('dot.png', base64.b64decode(DOT))
                kmzfile.writestr('x.png', base64.b64decode(X))
                # write the kml file
                kmzfile.writestr(self.kml_name,
                                 "".join(self.kml).encode('utf8'))

        # output_filename = self.output_to_file(geojson, step_num)
        output_info = {
            'time_stamp': sc.current_time_stamp.isoformat(),
            'output_filename': self.filename
        }

        return output_info

    def rewind(self):
        '''
        reset a few parameter and call base class rewind to reset
        internal variables.
        '''
        super(KMZOutput, self).rewind()

        self._middle_of_run = False
        self._start_idx = 0

    def delete_output_files(self):
        '''
        deletes ouput files that may be around

        called by prepare_for_model_run

        here in case it needs to be called from elsewhere
        '''
        try:
            os.remove(self.filename)
        except OSError:
            pass  # it must not be there
Ejemplo n.º 29
0
class ShapeOutput(Outputter, Serializable):
    '''
    class that outputs GNOME results (particles) in a shapefile format.

    '''
    _state = copy.deepcopy(Outputter._state)

    # need a schema and also need to override save so output_dir
    # is saved correctly - maybe point it to saveloc
    _state += [
        Field('filename', update=True, save=True),
    ]
    _schema = ShapeSchema

    time_formatter = '%m/%d/%Y %H:%M'

    def __init__(self, filename, **kwargs):
        '''
        :param str output_dir=None: output directory for shape files
        uses super to pass optional \*\*kwargs to base class __init__ method
        '''
        # # a little check:
        self._check_filename(filename)

        filename = filename.split(".zip")[0].split(".shp")[0]

        self.filename = filename
        self.filedir = os.path.dirname(filename)

        super(ShapeOutput, self).__init__(**kwargs)

    def prepare_for_model_run(self, model_start_time, spills, **kwargs):
        """
        .. function:: prepare_for_model_run(model_start_time,
                                            cache=None,
                                            uncertain=False,
                                            spills=None,
                                            **kwargs)

        Write the headers, png files, etc for the shape file file

        This must be done in prepare_for_model_run because if model _state
        changes, it is rewound and re-run from the beginning.

        If there are existing output files, they are deleted here.

        This takes more than standard 'cache' argument. Some of these are
        required arguments - they contain None for defaults because non-default
        argument cannot follow default argument. Since cache is already 2nd
        positional argument for Renderer object, the required non-default
        arguments must be defined following 'cache'.

        If uncertainty is on, then SpillContainerPair object contains
        identical _data_arrays in both certain and uncertain SpillContainer's,
        the data itself is different, but they contain the same type of data
        arrays. If uncertain, then data arrays for uncertain spill container
        are written to the KMZ file.

        .. note::
            Does not take any other input arguments; however, to keep the
            interface the same for all outputters, define kwargs in case
            future outputters require different arguments.
        """
        super(ShapeOutput, self).prepare_for_model_run(model_start_time,
                                                       spills, **kwargs)

        if not self.on:
            return

        self.delete_output_files()
        # shouldn't be required if the above worked!
        self._file_exists_error(self.filename + '.zip')

        # info for prj file
        epsg = 'GEOGCS["WGS 84",'
        epsg += 'DATUM["WGS_1984",'
        epsg += 'SPHEROID["WGS 84",6378137,298.257223563]]'
        epsg += ',PRIMEM["Greenwich",0],'
        epsg += 'UNIT["degree",0.0174532925199433]]'
        self.epsg = epsg

        for sc in self.sc_pair.items():
            w = shp.Writer(shp.POINT)
            w.autobalance = 1

            w.field('Year', 'C')
            w.field('Month', 'C')
            w.field('Day', 'C')
            w.field('Hour', 'C')
            w.field('LE id', 'N')
            w.field('Depth', 'N')
            w.field('Mass', 'N')
            w.field('Age', 'N')
            w.field('Status_Code', 'N')

            if sc.uncertain:
                self.w_u = w
            else:
                self.w = w

    def write_output(self, step_num, islast_step=False):
        """dump a timestep's data into the kmz file"""

        super(ShapeOutput, self).write_output(step_num, islast_step)

        if not self.on or not self._write_step:
            return None

        uncertain = False

        for sc in self.cache.load_timestep(step_num).items():

            curr_time = sc.current_time_stamp

            if sc.uncertain:
                uncertain = True
                for k, p in enumerate(sc['positions']):
                    self.w_u.point(p[0], p[1])
                    self.w_u.record(curr_time.year, curr_time.month,
                                    curr_time.day, curr_time.hour, sc['id'][k],
                                    p[2], sc['mass'][k], sc['age'][k],
                                    sc['status_codes'][k])
            else:
                for k, p in enumerate(sc['positions']):
                    self.w.point(p[0], p[1])
                    self.w.record(curr_time.year, curr_time.month,
                                  curr_time.day, curr_time.hour, sc['id'][k],
                                  p[2], sc['mass'][k], sc['age'][k],
                                  sc['status_codes'][k])

        if islast_step:  # now we really write the files:

            if uncertain:
                shapefilenames = [self.filename, self.filename + '_uncert']
            else:
                shapefilenames = [self.filename]

            for fn in shapefilenames:

                if uncertain:
                    self.w_u.save(fn)
                else:
                    self.w.save(fn)
                zfilename = fn + '.zip'

                prj_file = open("%s.prj" % fn, "w")
                prj_file.write(self.epsg)
                prj_file.close()

                zipf = zipfile.ZipFile(zfilename, 'w')
                for suf in ['shp', 'prj', 'dbf', 'shx']:
                    f = os.path.split(fn)[-1] + '.' + suf
                    zipf.write(os.path.join(self.filedir, f), arcname=f)
                    os.remove(fn + '.' + suf)
                zipf.close()

        output_info = {
            'time_stamp': sc.current_time_stamp.isoformat(),
            'output_filename': self.filename + '.zip'
        }

        return output_info

    def rewind(self):
        '''
        reset a few parameter and call base class rewind to reset
        internal variables.
        '''
        super(ShapeOutput, self).rewind()

        self._middle_of_run = False
        self._start_idx = 0

    def delete_output_files(self):
        '''
        deletes ouput files that may be around

        called by prepare_for_model_run

        here in case it needs to be called from elsewhere
        '''
        try:
            os.remove(self.filename + '.zip')
            os.remove(self.filename + '_uncert.zip')
        except OSError:
            pass  # it must not be there
Ejemplo n.º 30
0
def test_state_init_field():
    ''' test init if a single Field object is given as opposed to a list '''

    _state = State(field=Field('field0', save=True))
    assert len(_state.fields) == 1