def test_datetime_array():
    """
    test time_utils conversion works for python datetime object
    """

    x = np.zeros((3, ), dtype=datetime)
    xn = _convert(x)
    assert np.all(time_utils.round_time(x, roundTo=1) == xn)
Example #2
0
def test_datetime_array():
    """
    test time_utils conversion works for python datetime object
    """

    x = np.array([datetime.utcfromtimestamp(time_utils.zero_time())] * 3, dtype=datetime)
    xn = _convert(x)

    assert np.all(time_utils.round_time(x, roundTo=1) == xn)
Example #3
0
def test_datetime_array():
    """
    test time_utils conversion works for python datetime object
    """

    x = np.array([datetime.utcfromtimestamp(time_utils.zero_time())] * 3,
                 dtype=datetime)
    xn = _convert(x)

    assert np.all(time_utils.round_time(x, roundTo=1) == xn)
Example #4
0
    def test_sec_to_date(self):
        """
        Uses time_utils.secondsToDate_noDST to
        convert the time in seconds back to a datetime object and make
        """

        tgt = time_utils.round_time(dt=self.now, roundTo=1)
        act = time_utils.sec_to_date(self.pySec)
        print
        print 'expected:\t' + str(tgt)
        print 'actual:  \t' + str(act)
        assert tgt == act
Example #5
0
    def test_sec_to_date(self):
        """
        Uses time_utils.secondsToDate_noDST to
        convert the time in seconds back to a datetime object and make
        """

        tgt = time_utils.round_time(dt=self.now, roundTo=1)
        act = time_utils.sec_to_date(self.pySec)
        print
        print 'expected:\t' + str(tgt)
        print 'actual:  \t' + str(act)
        assert tgt == act
Example #6
0
def test_scalar_input():
    """
    test time_utils conversion return a scalar if that is what the user input

    always returns a numpy object
    """

    x = datetime.now()
    xn = _convert(x)
    assert isinstance(xn, datetime)
    x = time_utils.round_time(x, roundTo=1)
    assert isinstance(x, datetime)
    assert x == xn
Example #7
0
def test_scalar_input():
    """
    test time_utils conversion return a scalar if that is what the user input

    always returns a numpy object
    """

    x = datetime.now()
    xn = _convert(x)
    assert isinstance(xn, datetime)
    x = time_utils.round_time(x, roundTo=1)
    assert isinstance(x, datetime)
    assert x == xn
Example #8
0
    def test_get_model_gets_a_valid_model(self):
        self.create_model()
        resp = self.testapp.get(self.base_url, status=200)
        data = resp.json_body
        iso_rounded_now = round_time(datetime.datetime.now(), 3600).isoformat()

        self.assertEqual(data['uncertain'], False)
        self.assertEqual(data['start_time'], iso_rounded_now)
        self.assertEqual(data['time_step'], 0.25)
        self.assertEqual(data['duration_days'], 1)
        self.assertEqual(data['duration_hours'], 0)

        self.assertEqual(data['surface_release_spills'], [])
        self.assertEqual(data['wind_movers'], [])
Example #9
0
    def __init__(
        self,
        time_step=timedelta(minutes=15),
        start_time=round_time(datetime.now(), 3600),
        duration=timedelta(days=1),
        weathering_substeps=1,
        map=gnome.map.GnomeMap(),
        uncertain=False,
        cache_enabled=False,
        id=None,
    ):
        """
        Initializes a model. All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds
                                                or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime
                                          object. Rounded to the nearest hour.
        :param duration=timedelta(days=1): How long to run the model,
                                           a timedelta object.
        :param int weathering_substeps=1: How many weathering substeps to
                                          run inside a single model time step.
        :param map=gnome.map.GnomeMap(): The land-water map.
        :param uncertain=False: Flag for setting uncertainty.
        :param cache_enabled=False: Flag for setting whether the model should
                                    cache results to disk.
        :param id: Unique Id identifying the newly created mover (a UUID as a
                   string).  This is used when loading an object from a
                   persisted model
        """
        self.__restore__(time_step, start_time, duration, weathering_substeps, map, uncertain, cache_enabled)

        self._gnome_id = gnome.GnomeId(id)

        # register callback with OrderedCollection
        self.movers.register_callback(self._callback_add_mover, ("add", "replace"))

        self.weatherers.register_callback(self._callback_add_weatherer, ("add", "replace"))
Example #10
0
    def __init__(self,
                 time_step=timedelta(minutes=15),
                 start_time=round_time(datetime.now(), 3600),
                 duration=timedelta(days=1),
                 weathering_substeps=1,
                 map=None,
                 uncertain=False,
                 cache_enabled=False,
                 name=None):
        '''

        Initializes a model.
        All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds
            or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime
            object. Rounded to the nearest hour.
        :param duration=timedelta(days=1): How long to run the model,
            a timedelta object.
        :param int weathering_substeps=1: How many weathering substeps to
            run inside a single model time step.
        :param map=gnome.map.GnomeMap(): The land-water map.
        :param uncertain=False: Flag for setting uncertainty.
        :param cache_enabled=False: Flag for setting whether the model should
            cache results to disk.
        '''

        self.__restore__(time_step, start_time, duration,
                         weathering_substeps,
                         uncertain, cache_enabled, map, name)

        # register callback with OrderedCollection
        self.movers.register_callback(self._callback_add_mover,
                                      ('add', 'replace'))

        self.weatherers.register_callback(self._callback_add_weatherer,
                                          ('add', 'replace'))
Example #11
0
    def __init__(
        self,
        time_step=timedelta(minutes=15),
        start_time=round_time(datetime.now(), 3600),
        duration=timedelta(days=1),
        map=gnome.map.GnomeMap(),
        uncertain=False,
        cache_enabled=False,
        id=None,
        ):
        """ 
        Initializes a model. All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime object. default to now, rounded to the nearest hour
        :param duration=timedelta(days=1): how long to run the model, a timedelta object
        :param map=gnome.map.GnomeMap(): the land-water map, default is a map with no land-water
        :param uncertain=False: flag for setting uncertainty
        :param cache_enabled=False: flag for setting whether the mocel should cache results to disk.
        :param id: Unique Id identifying the newly created mover (a UUID as a string). 
                   This is used when loading an object from a persisted model
        """
        self.__restore__(
            time_step,
            start_time,
            duration,
            map,
            uncertain,
            cache_enabled,
            id,
            )

        # register callback with OrderedCollection

        self.movers.register_callback(self._callback_add_mover, ('add',
                'replace'))
Example #12
0
    def prepare_for_model_run(self, model_start_time,
                              spills, uncertain=False,
                              **kwargs):
        """
        .. function:: prepare_for_model_run(model_start_time,
                                            cache=None,
                                            uncertain=False,
                                            spills=None,
                                            **kwargs)

        Write global attributes and define dimensions and variables for NetCDF
        file. This must be done in prepare_for_model_run because if model _state
        changes, it is rewound and re-run from the beginning.

        This takes more than standard 'cache' argument. Some of these are
        required arguments - they contain None for defaults because non-default
        argument cannot follow default argument. Since cache is already 2nd
        positional argument for Renderer object, the required non-default
        arguments must be defined following 'cache'.

        If uncertainty is on, then UncertainSpillPair object contains
        identical _data_arrays in both certain and uncertain SpillContainer's,
        the data itself is different, but they contain the same type of data
        arrays.

        :param bool uncertain: Default is False. Model automatically sets this
                               based on whether uncertainty is on or off.
                               If this is True then an uncertain data is
                               written to netcdf_filename + '_uncertain.nc'

        :param spills: If 'which_data' flag is set to 'all' or 'most', then
            model must provide the model.spills object
            (SpillContainerPair object) so NetCDF variables can be
            defined for the remaining data arrays.
            If spills is None, but which_data flag is 'all' or
            'most', a ValueError will be raised.
            It does not make sense to write 'all' or 'most' but not
            provide 'model.spills'.
        :type spills: gnome.spill_container.SpillContainerPair object.

        .. note::
            Does not take any other input arguments; however, to keep the
            interface the same for all outputters, define kwargs in case
            future outputters require different arguments.

        use super to pass model_start_time, cache=None and
        remaining kwargs to base class method
        """
        super(NetCDFOutput, self).prepare_for_model_run(model_start_time,
                                                        **kwargs)
        self._uncertain = uncertain

        self._update_spill_names(spills)
        if self._uncertain:
            name, ext = os.path.splitext(self.netcdf_filename)
            self._u_netcdf_filename = '{0}_uncertain{1}'.format(name, ext)
            filenames = (self.netcdf_filename, self._u_netcdf_filename)
        else:
            filenames = (self.netcdf_filename,)

        for file_ in filenames:
            self._nc_file_exists_error(file_)
            ## create the netcdf files and write the standard stuff:
            with nc.Dataset(file_, 'w', format=self._format) as rootgrp:
                ## fixme: why remove the "T" ??
                self.cf_attributes['creation_date'] = \
                            round_time(datetime.now(), roundTo=1).isoformat()
                rootgrp.setncatts(self.cf_attributes)
                    #rootgrp.comment = self.cf_attributes['comment']
                    #rootgrp.source = self.cf_attributes['source']
                    #rootgrp.references = self.cf_attributes['references']
                    #rootgrp.feature_type = self.cf_attributes['feature_type']
                    #rootgrp.institution = self.cf_attributes['institution']
                    #rootgrp.convention = self.cf_attributes['conventions']

                # create the dimensions we need
                # not sure if it's a convention or if dimensions
                # need to be names...
                rootgrp.createDimension('time')  # unlimited
                rootgrp.createDimension('data')  # unlimited
                rootgrp.createDimension('two', 2)
                rootgrp.createDimension('three', 3)
                rootgrp.createDimension('four', 4)
                rootgrp.createDimension('five', 5)

                # create the time variable
                time_ = rootgrp.createVariable('time',
                                               np.float64,
                                               ('time', ),
                                               zlib=self._compress,
                                               chunksizes=(self._chunksize,))
                time_.setncatts(var_attributes['time'])
                time_.units = 'seconds since {0}'.format(
                        self._model_start_time.isoformat())

                # create the particle count variable
                pc_ = rootgrp.createVariable('particle_count',
                                            np.int32,
                                            ('time', ),
                                            zlib=self._compress,
                                            chunksizes=(self._chunksize,))
                pc_.setncatts(var_attributes['particle_count'])

                ## create the list of variables that we want to put in the file
                if self.which_data in ('all', 'most'):
                    for var_name in spills.items()[0].array_types:
                        if var_name != 'positions':
                            # handled by latitude, longitude, depth
                            self.arrays_to_output.add(var_name)

                    if self.which_data == 'most':
                        # remove the ones we don't want
                        for var_name in self.usually_skipped_arrays:
                            self.arrays_to_output.discard(var_name)

                for var_name in self.arrays_to_output:
                    # the special cases:
                    if var_name in ('latitude', 'longitude', 'depth'):
                        # these don't  map directly to an array_type
                        dt = world_point_type
                        shape = ('data', )
                        chunksizes = (self._chunksize,)
                    else:
                        at = getattr(array_types, var_name)
                        dt = at.dtype

                        # total kludge for the cases we happen to have...
                        if at.shape == (5,):
                            shape = ('data', 'five')
                            chunksizes = (self._chunksize, 5)
                        elif at.shape == (4,):
                            shape = ('data', 'four')
                            chunksizes = (self._chunksize, 4)
                        elif at.shape == (3,):
                            shape = ('data', 'three')
                            chunksizes = (self._chunksize, 3)
                        elif at.shape == (2,):
                            shape = ('data', 'two')
                            chunksizes = (self._chunksize, 2)
                        else:
                            shape = ('data',)
                            chunksizes = (self._chunksize,)

                    var = rootgrp.createVariable(var_name,
                                                 dt,
                                                 shape,
                                                 zlib=self._compress,
                                                 chunksizes=chunksizes,
                                                 )

                    # add attributes
                    if var_name in var_attributes:
                        var.setncatts(var_attributes[var_name])
                    elif var_name in self._var_attributes:
                        var.setncatts(self._var_attributes[var_name])

        # need to keep track of starting index for writing data since variable
        # number of particles are released
        self._start_idx = 0
        self._middle_of_run = True
Example #13
0
    def prepare_for_model_run(
        self,
        model_start_time,
        cache=None,
        uncertain=False,
        spills=None,
        **kwargs
        ):
        """
        .. function:: prepare_for_model_run(model_start_time,
                cache=None, uncertain=False, spills=None,
                **kwargs)

        Write global attributes and define dimensions and variables for NetCDF
        file. This must be done in prepare_for_model_run because if model state
        changes, it is rewound and re-run from the beginning.

        This takes more than standard 'cache' argument. Some of these are
        required arguments - they contain None for defaults because non-default
        argument cannot follow default argument. Since cache is already 2nd
        positional argument for Renderer object, the required non-default
        arguments must be defined following 'cache'.

        If uncertainty is on, then UncertainSpillPair object contains
        identical _data_arrays in both certain and uncertain SpillContainer's,
        the data itself is different, but they contain the same type of data
        arrays.

        :param uncertain: Default is False. Model automatically sets this based
            on whether uncertainty is on or off. If this is True then a
            uncertain data is written to netcdf_filename + '_uncertain.nc'
        :type uncertain: bool
        :param spills: If 'all_data' flag is True, then model must provide the
            model.spills object (SpillContainerPair object) so NetCDF variables
            can be defined for the remaining data arrays. If spills is None,
            but all_data flag is True, a ValueError will be raised. It does not
            make sense to write 'all_data' but not provide 'model.spills'.
        :type spills: gnome.spill_container.SpillContainerPair object.

        .. note::
        Does not take any other input arguments; however, to keep the interface
            the same for all outputters, define **kwargs incase future
            outputters require different arguments.

        use super to pass model_start_time, cache=None and
        remaining **kwargs to base class method
        """

        super(NetCDFOutput, self).prepare_for_model_run(model_start_time,
                cache, **kwargs)

        if self.all_data and spills is None:
            raise ValueError("'all_data' flag is True, however spills is None."
                " Please provide valid model.spills so we know which"
                " additional data to write.")

        self._uncertain = uncertain

        if self._uncertain:
            (name, ext) = os.path.splitext(self.netcdf_filename)
            self._u_netcdf_filename = '{0}_uncertain{1}'.format(name,
                    ext)
            filenames = (self.netcdf_filename, self._u_netcdf_filename)
        else:
            filenames = (self.netcdf_filename, )

        for file_ in filenames:
            self._nc_file_exists_error(file_)
            with nc.Dataset(file_, 'w', format=self._format) as rootgrp:
                rootgrp.comment = self.cf_attributes['comment']
                rootgrp.creation_date = \
                    time_utils.round_time(datetime.now(),
                        roundTo=1).isoformat().replace('T', ' ')
                rootgrp.source = self.cf_attributes['source']
                rootgrp.references = self.cf_attributes['references']
                rootgrp.feature_type = self.cf_attributes['feature_type'
                        ]
                rootgrp.institution = self.cf_attributes['institution']
                rootgrp.convention = self.cf_attributes['conventions']

                rootgrp.createDimension('time', 0)
                rootgrp.createDimension('data', 0)

                time_ = rootgrp.createVariable('time', np.double,
                        ('time', ), zlib=self._compress)
                time_.units = 'seconds since {0}'.format(
                        self._model_start_time.isoformat().replace('T', ' '))
                time_.long_name = 'time'
                time_.standard_name = 'time'
                time_.calendar = 'gregorian'
                time_.comment = 'unspecified time zone'

                pc = rootgrp.createVariable('particle_count', np.int32,
                        ('time', ), zlib=self._compress)
                pc.units = '1'
                pc.long_name = 'number of particles in a given timestep'
                pc.ragged_row_count = 'particle count at nth timestep'

                for (key, val) in self.data_vars.iteritems():
                    # don't pop since it maybe required twice
                    var = rootgrp.createVariable(key, val.get('dtype'),
                            ('data', ), zlib=self._compress)

                    # iterate over remaining attributes

                    [setattr(var, key2, val2) for (key2, val2) in
                     val.iteritems() if key2 != 'dtype']

                if self.all_data:
                    rootgrp.createDimension('world_point', 3)
                    self.arr_types = dict()

                    at = spills.items()[0].array_types
                    [self.arr_types.update({key: atype}) for (key,
                     atype) in at.iteritems() if key
                     not in self.arr_types and key
                     not in self.standard_data]

                    # create variables

                    for (key, val) in self.arr_types.iteritems():
                        if len(val.shape) == 0:
                            rootgrp.createVariable(key, val.dtype,
                                    'data', zlib=self._compress)
                        elif val.shape[0] == 3:
                            rootgrp.createVariable(key, val.dtype,
                                    ('data', 'world_point'),
                                    zlib=self._compress)
                        else:
                            raise ValueError('{0} has an undefined dimension:'
                                             ' {1}'.format(key, val.shape))

        # need to keep track of starting index for writing data since variable
        # number of particles are released
        self._start_idx = 0
        self._middle_of_run = True