Esempio n. 1
0
    def test_add_replace_callback(self):
        'register one callback with multiple events (add, replace)'

        # lets work with a mutable type

        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._add_callback, events=('add', 'replace'))

        # check everything if False initially
        self._reset_ObjToAdd_init_state()

        oc += self.to_add

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback

        rep = ObjToAdd()
        oc[s_id(self.to_add[0])] = rep

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
    def test_add_replace_callback(self):
        "register one callback with multiple events (add, replace)"

        # lets work with a mutable type

        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._add_callback, events=("add", "replace"))

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback

        rep = ObjToAdd()
        oc[s_id(self.to_add[0])] = rep

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
 def test_index(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     assert oc.index(id(3)) == 2
     oc[id(3)] = 6
     assert oc.index(id(6)) == 2
     del oc[id(6)]
     assert oc.index(id(4)) == 2
    def test_remove_callback(self):
        "test remove callback is invoked after removing an object"

        oc = OrderedCollection(dtype=ObjToAdd)  # lets work with a mutable type
        oc.register_callback(self._rm_callback, events="remove")
        oc.register_callback(self._add_callback, events="add")

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add

        del oc[s_id(self.to_add[0])]

        assert self.to_add[0].rm_callback
        assert self.to_add[0].add_callback
        assert not self.to_add[0].replace_callback

        self.to_add[0].reset()  # reset all to false
        oc += self.to_add[0]  # let's add this back in

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
Esempio n. 5
0
    def __restore__(self, time_step, start_time, duration, weathering_substeps, map, uncertain, cache_enabled):
        """
        Take out initialization that does not register the callback here.
        This is because new_from_dict will use this to restore the model _state
        when doing a midrun persistence.
        """

        # making sure basic stuff is in place before properties are set
        self.environment = OrderedCollection(dtype=Environment)
        self.movers = OrderedCollection(dtype=Mover)
        self.weatherers = OrderedCollection(dtype=Weatherer)

        # contains both certain/uncertain spills
        self.spills = SpillContainerPair(uncertain)

        self._cache = gnome.utilities.cache.ElementCache()
        self._cache.enabled = cache_enabled

        # list of output objects
        self.outputters = OrderedCollection(dtype=Outputter)

        # default to now, rounded to the nearest hour
        self._start_time = start_time
        self._duration = duration
        self.weathering_substeps = weathering_substeps
        self._map = map
        self.time_step = time_step  # this calls rewind() !
Esempio n. 6
0
    def test_with_movers(self):
        mover_1 = SimpleMover(velocity=(1.0, -1.0, 0.0))
        mover_2 = SimpleMover(velocity=(1.0, -1.0, 0.0))
        mover_3 = SimpleMover(velocity=(1.0, -1.0, 0.0))
        mover_4 = SimpleMover(velocity=(1.0, -1.0, 0.0))

        # test our init, iter, get, and len methods
        mymovers = OrderedCollection([mover_1, mover_2], dtype=Mover)
        assert [m for m in mymovers] == [mover_1, mover_2]
        assert mymovers[mover_1.id] == mover_1
        assert len(mymovers) == 2

        # test our add methods
        mymovers = OrderedCollection(dtype=Mover)
        mymovers += mover_1
        mymovers += mover_2
        assert [m for m in mymovers] == [mover_1, mover_2]

        mymovers = OrderedCollection(dtype=Mover)
        mymovers += [mover_1, mover_2]
        assert [m for m in mymovers] == [mover_1, mover_2]

        # test our del method
        mymovers = OrderedCollection([mover_1, mover_2, mover_3], dtype=Mover)
        del mymovers[mover_2.id]
        assert [m for m in mymovers] == [mover_1, mover_3]

        # test our replace method
        mymovers = OrderedCollection([mover_1, mover_2, mover_3], dtype=Mover)
        mymovers[mover_2.id] = mover_4
        assert [m for m in mymovers] == [mover_1, mover_4, mover_3]
        assert mymovers[mover_4.id] == mover_4
Esempio n. 7
0
def test_values():
    'OrderedCollection().values() works like a dict.values()'
    x = range(5)
    oc = OrderedCollection(x)
    del x[-2]
    del oc[-2]
    for ix, v in enumerate(oc.values()):
        assert x[ix] == v
def test_values():
    'OrderedCollection().values() works like a dict.values()'
    x = range(5)
    oc = OrderedCollection(x)
    del x[-2]
    del oc[-2]
    for ix, v in enumerate(oc.values()):
        assert x[ix] == v
Esempio n. 9
0
def test_remake():
    'delete automatically remakes internal lists without None'
    oc = OrderedCollection(['p', 'q', 'ab', 'adsf', 'ss'])
    del oc[0]
    del oc[2]
    oc.remake()
    for ix, elem in enumerate(oc._elems):
        assert elem is not None
        assert oc._d_index[s_id(elem)] == ix
def test_remake():
    'delete automatically remakes internal lists without None'
    oc = OrderedCollection(['p', 'q', 'ab', 'adsf', 'ss'])
    del oc[0]
    del oc[2]
    oc.remake()
    for ix, elem in enumerate(oc._elems):
        assert elem is not None
        assert oc._d_index[s_id(elem)] == ix
Esempio n. 11
0
    def test_ne(self):
        'Test comparison operator (not equal)'

        assert (OrderedCollection([1, 2, 3, 4, 5]) != OrderedCollection(
            [2, 1, 3, 4, 5]))

        assert (OrderedCollection([1, 2, 3, 4, 5]) != OrderedCollection(
            [1, 2, 3, 4]))

        assert OrderedCollection([1, 2, 3, 4, 5]) != [1, 2, 3, 4, 5]
def test_clear():
    'test clear()'
    oc = OrderedCollection(range(4))
    oc.clear()

    assert len(oc) == 0
    assert oc._elems == []   # there should be no None's
    assert oc._d_index == {}
    assert oc.dtype is int
    with raises(TypeError):
        oc += 1.0
def test_remake():
    "remakes internal lists without None enteries"
    oc = OrderedCollection(["p", "q", "ab", "adsf", "ss"])
    del oc[0]
    del oc[3]
    assert oc._elems[0] is None
    assert oc._elems[3] is None
    oc.remake()
    for ix, elem in enumerate(oc._elems):
        assert elem is not None
        assert oc._index[s_id(elem)] == ix
Esempio n. 14
0
def test_clear():
    'test clear()'
    oc = OrderedCollection(range(4))
    oc.clear()

    assert len(oc) == 0
    assert oc._elems == []  # there should be no None's
    assert oc._d_index == {}
    assert oc.dtype is int
    with raises(TypeError):
        oc += 1.0
Esempio n. 15
0
    def test_int_to_dict(self):
        '''added a to_dict() method - test this method for int dtype.
        Tests the try, except is working correctly'''
        items = range(5)
        oc = OrderedCollection(items)
        dict_ = oc.to_dict()

        assert dict_['dtype'] == int
        for (i, item) in enumerate(items):
            assert dict_['items'][i][0] \
                == '{0}'.format(item.__class__.__name__)
            assert dict_['items'][i][1] == i
 def test_add(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     oc.add(6)
     assert [i for i in oc] == [
         1,
         2,
         3,
         4,
         5,
         6,
         ]
     with pytest.raises(TypeError):
         oc.add('not an int')
Esempio n. 17
0
    def test_to_dict(self):
        'added a to_dict() method - test this method'

        items = [SimpleMover(velocity=(i * 0.5, -1.0, 0.0)) for i in
                 range(2)]
        items.extend([RandomMover() for i in range(2)])
        mymovers = OrderedCollection(items, dtype=Mover)
        dict_ = mymovers.to_dict()

        assert dict_['dtype'] == mymovers.dtype
        for (i, mv) in enumerate(items):
            assert dict_['items'][i][0] \
                == '{0}.{1}'.format(mv.__module__, mv.__class__.__name__)
            assert dict_['items'][i][1] == i
Esempio n. 18
0
    def test_update(self):
        int_oc = OrderedCollection([1, 2, 3, 4, 5])
        upd_cstruct = [2, 3, 4]
        int_oc.update(upd_cstruct)
        assert int_oc._elems == [2, 3, 4]

        obj_oc = OrderedCollection(
            [DemoObj.demo(), DemoObj.demo(),
             DemoObj.demo()])
        obj0 = obj_oc[0]
        obj1 = obj_oc[1]
        assert obj_oc[0] is not obj_oc[1]
        upd_list = [o.serialize() for o in obj_oc
                    ]  #this is like the payload from the web client
        upd_list[0]['name'] = '2nd DemoObj'
        upd_list[1]['foo_float'] = 55
        temp = upd_list[0]
        upd_list[0] = upd_list[1]
        upd_list[1] = temp
        del upd_list[2]
        obj_oc.update(upd_list)
        assert obj_oc[0] is obj1
        assert obj_oc[1] is obj0
        assert obj_oc[0].foo_float == 55
        assert obj_oc[0].id == upd_list[0]['id']
        assert obj_oc[1].name == '2nd DemoObj'
        assert obj_oc[1].id == upd_list[1]['id']
        assert len(obj_oc) == 2
Esempio n. 19
0
    def __init__(self,
                 grid=None,
                 depth=None,
                 grid_file=None,
                 data_file=None,
                 dataset=None,
                 varnames=None,
                 **kwargs):

        super(GridVectorProp, self).__init__(**kwargs)
        if isinstance(self.variables, list):
            self.variables = OrderedCollection(elems=self.variables,
                                               dtype=EnvProp)
        if isinstance(self.variables[0], GriddedProp):
            self.grid = self.variables[0].grid if grid is None else grid
            self.depth = self.variables[0].depth if depth is None else depth
            self.grid_file = self.variables[
                0].grid_file if grid_file is None else grid_file
            self.data_file = self.variables[
                0].data_file if data_file is None else data_file

#         self._check_consistency()
        self._result_memo = OrderedDict()
        for i, comp in enumerate(self.__class__.comp_order):
            setattr(self, comp, self.variables[i])
Esempio n. 20
0
 def test_getitem_byindex(self):
     oc = OrderedCollection(['x', 'a', 'p', 'd'])
     assert oc[1] == 'a'
     oc[s_id('a')] = 'b'
     assert oc[1] == 'b'
     del oc[1]
     assert oc[1] == 'p'
Esempio n. 21
0
    def test_iadd(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        oc += 6
        assert [i for i in oc] == [1, 2, 3, 4, 5, 6]

        oc += [7, 8, 9]
        assert [i for i in oc] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
Esempio n. 22
0
    def test_remove(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        with raises(KeyError):
            oc.remove(s_id(6))
        oc.remove(s_id(4))
        assert [i for i in oc] == [1, 2, 3, 5]

        oc.remove(2)
        assert [i for i in oc] == [1, 2, 5]
Esempio n. 23
0
    def test_to_dict(self, json_):
        'added a to_dict() method - test this method'

        items = [SimpleMover(velocity=(i * 0.5, -1.0, 0.0)) for i in range(2)]
        items.extend([RandomMover() for i in range(2)])

        mymovers = OrderedCollection(items, dtype=Mover)
        self._to_dict_assert(mymovers, items, json_)
Esempio n. 24
0
    def test_setitem_exceptions(self):
        'Use add to add an element'
        oc = OrderedCollection([1, 2, 3, 4, 5])
        with raises(KeyError):
            oc[s_id(6)] = 6

        with raises(IndexError):
            oc[5] = 6
Esempio n. 25
0
def test_getslice():
    # getting a slice returns a new list
    l_ = range(6)
    oc = OrderedCollection(l_)
    b = oc[:3]
    b[0] = 10
    assert b != l_
    assert l_[::2] == oc[::2]
    def test_replace(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        oc.replace(id(6), 6)
        assert [i for i in oc] == [
            1,
            2,
            3,
            4,
            5,
            6,
            ]
        oc.replace(id(4), 7)
        assert [i for i in oc] == [
            1,
            2,
            3,
            7,
            5,
            6,
            ]
        assert oc[id(7)] == 7
        with pytest.raises(KeyError):

            # our key should also be gone after the delete

            oc[id(4)]
        with pytest.raises(TypeError):
            oc.replace(id(7), 'not an int')
Esempio n. 27
0
    def test_init(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        assert oc.dtype == int
        oc = OrderedCollection([1, 2, 3, 4, 5], int)
        assert oc.dtype == int
        oc = OrderedCollection(dtype=int)
        assert oc.dtype == int

        with raises(TypeError):

            # either a populated list or a dtype is required

            oc = OrderedCollection()

        with raises(TypeError):
            oc = OrderedCollection('not a list')

        with raises(TypeError):

            # either a populated list or a dtype is required

            oc = OrderedCollection([])

        with raises(TypeError):
            oc = OrderedCollection([1, 2, 3, 4, 5], float)
    def test_remove(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        with raises(KeyError):
            oc.remove(s_id(6))
        oc.remove(s_id(4))
        assert [i for i in oc] == [1, 2, 3, 5]

        oc.remove(2)
        assert [i for i in oc] == [1, 2, 5]
Esempio n. 29
0
    def test_replace(self):
        oc = OrderedCollection([1, 2, 3, 4, 5])
        oc.replace(s_id(4), 7)  # replace by object ID
        oc.replace(0, 0)  # replace by index
        assert [i for i in oc] == [0, 2, 3, 7, 5]
        assert oc[s_id(7)] == 7

        with raises(KeyError):
            # our key should also be gone after the delete
            oc[s_id(4)]

        with raises(TypeError):
            oc.replace(s_id(7), 'not an int')
Esempio n. 30
0
    def __init__(self, uncertain=False):
        super(SpillContainer, self).__init__(uncertain=uncertain)
        self.spills = OrderedCollection(dtype=gnome.spill.Spill)
        self.rewind()

        # don't want user to add to array_types in middle of run. Since its
        # not possible to throw an error in this case, let's just make it a
        # bit difficult to do.
        # dict must be updated via prepar_for_model_run() only at beginning of
        # run. Make self._array_types an an instance variable
        self._reset_arrays()
Esempio n. 31
0
    def test_index(self):
        'behaves like index for a list'
        oc = OrderedCollection([1, 2, 3, 4, 5])
        assert oc.index(3) == 2
        assert oc.index(s_id(3)) == 2

        oc[s_id(3)] = 6
        assert oc.index(6) == 2
        assert oc.index(s_id(6)) == 2

        del oc[s_id(6)]
        assert oc.index(4) == 2
        assert oc.index(s_id(4)) == 2
Esempio n. 32
0
    def test_add_callback(self):
        '''
            test add callback is invoked after adding an object or
            list of objects
        '''

        # lets work with a mutable type
        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._add_callback, events='add')

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add
        oc += ObjToAdd()

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
    def test_add_callback(self):
        '''
            test add callback is invoked after adding an object or
            list of objects
        '''

        # lets work with a mutable type
        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._add_callback, events='add')

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add
        oc += ObjToAdd()

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
Esempio n. 34
0
    def test_remove_callback(self):
        'test remove callback is invoked after removing an object'

        oc = OrderedCollection(dtype=ObjToAdd)  # lets work with a mutable type
        oc.register_callback(self._rm_callback, events='remove')
        oc.register_callback(self._add_callback, events='add')

        # check everything if False initially
        self._reset_ObjToAdd_init_state()

        oc += self.to_add

        del oc[s_id(self.to_add[0])]

        assert self.to_add[0].rm_callback
        assert self.to_add[0].add_callback
        assert not self.to_add[0].replace_callback

        self.to_add[0].reset()  # reset all to false
        oc += self.to_add[0]  # let's add this back in

        for obj in oc:
            assert obj.add_callback
            assert not obj.rm_callback
            assert not obj.replace_callback
    def test_update(self):
        int_oc = OrderedCollection([1, 2, 3, 4, 5])
        upd_cstruct = [2,3,4]
        int_oc.update(upd_cstruct)
        assert int_oc._elems == [2,3,4]

        obj_oc = OrderedCollection([DemoObj.demo(), DemoObj.demo(), DemoObj.demo()])
        obj0 = obj_oc[0]
        obj1 = obj_oc[1]
        assert obj_oc[0] is not obj_oc[1]
        upd_list = [o.serialize() for o in obj_oc] #this is like the payload from the web client
        upd_list[0]['name'] = '2nd DemoObj'
        upd_list[1]['foo_float'] = 55
        temp = upd_list[0]
        upd_list[0] = upd_list[1]
        upd_list[1] = temp
        del upd_list[2]
        obj_oc.update(upd_list)
        assert obj_oc[0] is obj1
        assert obj_oc[1] is obj0
        assert obj_oc[0].foo_float == 55
        assert obj_oc[0].id == upd_list[0]['id']
        assert obj_oc[1].name == '2nd DemoObj'
        assert obj_oc[1].id == upd_list[1]['id']
        assert len(obj_oc) == 2
 def test_index(self):
     "behaves like index for a list"
     oc = OrderedCollection([1, 2, 3, 4, 5])
     assert oc.index(3) == 2
     assert oc.index(s_id(3)) == 2
     oc[s_id(3)] = 6
     assert oc.index(6) == 2
     assert oc.index(s_id(6)) == 2
     del oc[s_id(6)]
     assert oc.index(4) == 2
     assert oc.index(s_id(4)) == 2
 def test_replace(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     oc.replace(s_id(4), 7)  # replace by object ID
     oc.replace(0, 0)  # replace by index
     assert [i for i in oc] == [0, 2, 3, 7, 5]
     assert oc[s_id(7)] == 7
     with raises(KeyError):
         # our key should also be gone after the delete
         oc[s_id(4)]
     with raises(TypeError):
         oc.replace(s_id(7), "not an int")
Esempio n. 38
0
    def test_replace_callback(self):
        'test replace callback is invoked after replacing an object'

        # lets work with a mutable type

        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._replace_callback, events='replace')

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add
        rep = ObjToAdd()
        oc[s_id(self.to_add[0])] = rep

        for obj in oc:
            assert not obj.add_callback
            assert not obj.rm_callback
            if id(obj) == id(rep):
                assert obj.replace_callback
            else:
                assert not obj.replace_callback
    def test_replace_callback(self):
        "test replace callback is invoked after replacing an object"

        # lets work with a mutable type

        oc = OrderedCollection(dtype=ObjToAdd)
        oc.register_callback(self._replace_callback, events="replace")

        # check everything if False initially

        self._reset_ObjToAdd_init_state()

        oc += self.to_add
        rep = ObjToAdd()
        oc[s_id(self.to_add[0])] = rep

        for obj in oc:
            assert not obj.add_callback
            assert not obj.rm_callback
            if id(obj) == id(rep):
                assert obj.replace_callback
            else:
                assert not obj.replace_callback
Esempio n. 40
0
def test_str():
    """
    __str__ and __repr__ methods should return something handy

    not really a test, but it runs the code
    """
    mymovers = OrderedCollection([
        SimpleMover(velocity=(1.0, -1.0, 0.0)),
        SimpleMover(velocity=(2.0, -1.0, 0.0)),
        SimpleMover(velocity=(3.0, -1.0, 0.0)),
        SimpleMover(velocity=(4.0, -1.0, 0.0))
    ])

    print repr(mymovers)
    print str(mymovers)

    assert repr(mymovers).startswith("OrderedCollection(")
    assert str(mymovers).startswith("OrderedCollection(")
def test_remake_emptyoc():
    "empty OC"
    oc = OrderedCollection(dtype=int)
    oc.remake()
Esempio n. 42
0
class Model(serializable.Serializable):

    """ 
    PyGNOME Model Class
    
    """

    _update = [
        'time_step',
        'start_time',
        'duration',
        'uncertain',
        'movers',
        'environment',
        'spills',
        'map',
        'outputters',
        'cache_enabled',
        ]
    _create = []
    _create.extend(_update)
    state = copy.deepcopy(serializable.Serializable.state)
    state.add(create=_create, update=_update)  # no need to copy parent's state in tis case

    @classmethod
    def new_from_dict(cls, dict_):
        """
        Restore model from previously persisted state
        """

        l_env = dict_.pop('environment')
        l_out = dict_.pop('outputters')
        l_movers = dict_.pop('movers')

        c_spills = dict_.pop('certain_spills')
        if 'uncertain_spills' in dict_.keys():
            u_spills = dict_.pop('uncertain_spills')
            l_spills = zip(c_spills, u_spills)
        else:
            l_spills = c_spills

        model = object.__new__(cls)
        model.__restore__(**dict_)
        [model.environment.add(obj) for obj in l_env]
        [model.outputters.add(obj) for obj in l_out]
        [model.spills.add(obj) for obj in l_spills]
        [model.movers.add(obj) for obj in l_movers]

        # register callback with OrderedCollection

        model.movers.register_callback(model._callback_add_mover, ('add'
                , 'replace'))

        return model

    def __init__(
        self,
        time_step=timedelta(minutes=15),
        start_time=round_time(datetime.now(), 3600),
        duration=timedelta(days=1),
        map=gnome.map.GnomeMap(),
        uncertain=False,
        cache_enabled=False,
        id=None,
        ):
        """ 
        Initializes a model. All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime object. default to now, rounded to the nearest hour
        :param duration=timedelta(days=1): how long to run the model, a timedelta object
        :param map=gnome.map.GnomeMap(): the land-water map, default is a map with no land-water
        :param uncertain=False: flag for setting uncertainty
        :param cache_enabled=False: flag for setting whether the mocel should cache results to disk.
        :param id: Unique Id identifying the newly created mover (a UUID as a string). 
                   This is used when loading an object from a persisted model
        """
        self.__restore__(
            time_step,
            start_time,
            duration,
            map,
            uncertain,
            cache_enabled,
            id,
            )

        # register callback with OrderedCollection

        self.movers.register_callback(self._callback_add_mover, ('add',
                'replace'))

    def __restore__(
        self,
        time_step,
        start_time,
        duration,
        map,
        uncertain,
        cache_enabled,
        id,
        ):
        """
        Take out initialization that does not register the callback here.
        This is because new_from_dict will use this to restore the model state
        when doing a midrun persistence.
        """

        # making sure basic stuff is in place before properties are set

        self.environment = OrderedCollection(dtype=Environment)
        self.movers = OrderedCollection(dtype=Mover)
        self.spills = SpillContainerPair(uncertain)  # contains both certain/uncertain spills
        self._cache = gnome.utilities.cache.ElementCache()
        self._cache.enabled = cache_enabled

        self.outputters = \
            OrderedCollection(dtype=gnome.outputter.Outputter)  # list of output objects
        self._start_time = start_time  # default to now, rounded to the nearest hour
        self._duration = duration
        self._map = map
        self.time_step = time_step  # this calls rewind() !

        self._gnome_id = gnome.GnomeId(id)

    def reset(self, **kwargs):
        """
        Resets model to defaults -- Caution -- clears all movers, spills, etc.

        Takes same keyword arguments as __init__
        """

        self.__init__(**kwargs)

    def rewind(self):
        """
        Rewinds the model to the beginning (start_time)
        """

        # # fixme: do the movers need re-setting? -- or wait for prepare_for_model_run?

        self.current_time_step = -1  # start at -1
        self.model_time = self._start_time

        # # note: this may be redundant -- they will get reset in setup_model_run() anyway..

        self.spills.rewind()
        gnome.utilities.rand.seed(1)  # set rand before each call so windages are set correctly

        # clear the cache:

        self._cache.rewind()
        for outputter in self.outputters:
            outputter.rewind()

#    def write_from_cache(self, filetype='netcdf', time_step='all'):
#        """
#        write the already-cached data to an output files.
#        """

    # ## Assorted properties

    @property
    def uncertain(self):
        return self.spills.uncertain

    @uncertain.setter
    def uncertain(self, uncertain_value):
        """
        only if uncertainty switch is toggled, then restart model
        """

        if self.spills.uncertain != uncertain_value:
            self.spills.uncertain = uncertain_value  # update uncertainty
            self.rewind()

    @property
    def cache_enabled(self):
        return self._cache.enabled

    @cache_enabled.setter
    def cache_enabled(self, enabled):
        self._cache.enabled = enabled

    @property
    def id(self):
        return self._gnome_id.id

    @property
    def start_time(self):
        return self._start_time

    @start_time.setter
    def start_time(self, start_time):
        self._start_time = start_time
        self.rewind()

    @property
    def time_step(self):
        return self._time_step

    @time_step.setter
    def time_step(self, time_step):
        """
        sets the time step, and rewinds the model

        :param time_step: the timestep as a timedelta object or integer seconds.
        """

        try:
            self._time_step = time_step.total_seconds()
        except AttributeError:
            # not a timedelta object -- assume it's in seconds.
            self._time_step = int(time_step)

        # there is a zeroth time step
        self._num_time_steps = int(self._duration.total_seconds()
                                   // self._time_step) + 1
        self.rewind()

    @property
    def current_time_step(self):
        return self._current_time_step

    @current_time_step.setter
    def current_time_step(self, step):
        self.model_time = self._start_time + timedelta(seconds=step
                * self.time_step)
        self._current_time_step = step

    @property
    def duration(self):
        return self._duration

    @duration.setter
    def duration(self, duration):
        if duration < self._duration:  # only need to rewind if shorter than it was...

            # # fixme: actually, only need to rewide is current model time is byond new time...

            self.rewind()
        self._duration = duration
        self._num_time_steps = int(self._duration.total_seconds()
                                   // self.time_step) + 1  # there is a zeroth time step

    @property
    def map(self):
        return self._map

    @map.setter
    def map(self, map_in):
        self._map = map_in
        self.rewind()

    @property
    def num_time_steps(self):
        return self._num_time_steps

    def setup_model_run(self):
        """
        Sets up each mover for the model run

        """

        self.spills.rewind()  # why is rewind for spills here?

        for outputter in self.outputters:
            outputter.prepare_for_model_run(model_start_time=self.start_time,
                                            cache=self._cache,
                                            uncertain=self.uncertain,
                                            spills=self.spills)

        array_types = {}
        for mover in self.movers:
            mover.prepare_for_model_run()
            array_types.update(mover.array_types)

        for sc in self.spills.items():
            sc.prepare_for_model_run(array_types)

    def setup_time_step(self):
        """
        sets up everything for the current time_step:

        right now only prepares the movers -- maybe more later?.
        """

        # initialize movers differently if model uncertainty is on

        for mover in self.movers:
            for sc in self.spills.items():
                mover.prepare_for_model_step(sc, self.time_step,
                        self.model_time)
        for outputter in self.outputters:
            outputter.prepare_for_model_step(self.time_step, self.model_time)

    def move_elements(self):
        """

        Moves elements:
         - loops through all the movers. and moves the elements
         - sets new_position array for each spill
         - calls the beaching code to beach the elements that need beaching.
         - sets the new position
        """

        # # if there are no spills, there is nothing to do:

        if len(self.spills) > 0:  # can this check be removed?
            for sc in self.spills.items():
                if sc.num_released > 0:  # can this check be removed?

                    # possibly refloat elements

                    self.map.refloat_elements(sc, self.time_step)

                    # reset next_positions

                    (sc['next_positions'])[:] = sc['positions']

                    # loop through the movers

                    for mover in self.movers:
                        delta = mover.get_move(sc, self.time_step,
                                self.model_time)
                        sc['next_positions'] += delta

                    self.map.beach_elements(sc)

                    # the final move to the new positions

                    (sc['positions'])[:] = sc['next_positions']

    def step_is_done(self):
        """
        Loop through movers and call model_step_is_done
        """

        for mover in self.movers:
            for sc in self.spills.items():
                mover.model_step_is_done(sc)
        for sc in self.spills.items():
            sc.model_step_is_done()

        for outputter in self.outputters:
            outputter.model_step_is_done()

    def write_output(self):
        output_info = {'step_num': self.current_time_step}
        for outputter in self.outputters:
            if self.current_time_step == self.num_time_steps - 1:
                output = outputter.write_output(self.current_time_step, True)
            else:
                output = outputter.write_output(self.current_time_step)
            if output is not None:
                output_info.update(output)
        return output_info

    def step(self):
        """
        Steps the model forward (or backward) in time. Needs testing for
        hind casting.
        """

        for sc in self.spills.items():
            # set the current time stamp only after current_time_step is
            # incremented and before the output is written. Set it to None here
            # just so we're not carrying around the old time_stamp
            sc.current_time_stamp = None

        # it gets incremented after this check
        if self.current_time_step >= self._num_time_steps - 1:
            raise StopIteration

        if self.current_time_step == -1:
            # that's all we need to do for the zeroth time step
            self.setup_model_run()
        else:
            self.setup_time_step()
            self.move_elements()
            self.step_is_done()

        self.current_time_step += 1

        # this is where the new step begins!
        # the elements released are during the time period:
        #    self.model_time + self.time_step
        # The else part of the loop computes values for data_arrays that
        # correspond with time_stamp:
        #    self.model_time + self.time_step
        # This is the current_time_stamp attribute of the SpillContainer
        #     [sc.current_time_stamp for sc in self.spills.items()]
        for sc in self.spills.items():
            sc.current_time_stamp = self.model_time
            sc.release_elements(self.time_step, self.model_time)

        # cache the results - current_time_step is incremented but the
        # current_time_stamp in spill_containers (self.spills) is not updated
        # till we go through the prepare_for_model_step
        self._cache.save_timestep(self.current_time_step, self.spills)
        output_info = self.write_output()
        return output_info

    def __iter__(self):
        """
        for compatibility with Python's iterator protocol

        rewinds the model and returns itself so it can be iterated over.
        """

        self.rewind()
        return self

    def next(self):
        """
        (This method here to satisfy Python's iterator and generator protocols)

        Simply calls model.step()

        :return: the step number
        """

        return self.step()

    def full_run(self, rewind=True, log=False):
        """
        Do a full run of the model.

        :param rewind=True: whether to rewind the model first -- defaults to True
                            if set to false, model will be run from the current
                            step to the end
        :returns: list of outputter info dicts

        """

        if rewind:
            self.rewind()

        # run the model

        output_data = []
        while True:
            try:
                results = self.step()
                if log:
                    print results
                output_data.append(results)
            except StopIteration:
                print 'Done with the model run'
                break
        return output_data

    def movers_to_dict(self):
        """
        call to_dict method of OrderedCollection object
        """

        return self.movers.to_dict()

    def environment_to_dict(self):
        """
        call to_dict method of OrderedCollection object
        """

        return self.environment.to_dict()

    def spills_to_dict(self):
        return self.spills.to_dict()

    def outputters_to_dict(self):
        """
        call to_dict method of OrderedCollection object
        """

        return self.outputters.to_dict()

    def map_to_dict(self):
        """
        create a tuple that contains: (type, object.id)
        """

        # dict_ = {'map': ("{0}.{1}".format(self.map.__module__, self.map.__class__.__name__), self.map.id)}

        return ('{0}.{1}'.format(self.map.__module__,
                self.map.__class__.__name__), self.map.id)

        # if self.output_map is not None:
        #    dict_.update({'output_map': ("{0}.{1}".format(self.output_map.__module__, self.output_map.__class__.__name__), self.output_map.id)})

    def _callback_add_mover(self, obj_added):
        """ callback after mover has been added """

        if isinstance(obj_added, WindMover):
            if obj_added.wind.id not in self.environment:
                self.environment += obj_added.wind

        if isinstance(obj_added, CatsMover):
            if obj_added.tide is not None and obj_added.tide.id \
                not in self.environment:
                self.environment += obj_added.tide

        self.rewind()  # rewind model if a new mover is added

    def __eq__(self, other):
        check = super(Model, self).__eq__(other)
        if check:

            # also check the data in spill_container object

            if type(self.spills) != type(other.spills):
                return False
            if self.spills != other.spills:
                return False

        return check

    def __ne__(self, other):
        """ 
        Compare inequality (!=) of two objects
        """

        if self == other:
            return False
        else:
            return True
Esempio n. 43
0
class Model(Serializable):
    '''
    PyGnome Model Class
    '''
    _update = ['time_step',
               'weathering_substeps',
               'start_time',
               'duration',
               'time_step',
               'uncertain',
               'cache_enabled',
               'weathering_substeps',
               'map',
               'movers',
               'weatherers',
               'environment',
               'outputters'
               ]
    _create = []
    _create.extend(_update)
    _state = copy.deepcopy(Serializable._state)
    _schema = ModelSchema

    # no need to copy parent's _state in this case
    _state.add(save=_create, update=_update)

    # override __eq__ since 'spills' and 'uncertain_spills' need to be checked
    # They both have _to_dict() methods to return underlying ordered
    # collections and that would not be the correct way to check equality
    _state += [Field('spills', save=True, update=True, test_for_eq=False),
               Field('uncertain_spills', save=True, test_for_eq=False)]

    # list of OrderedCollections
    _oc_list = ['movers', 'weatherers', 'environment', 'outputters']

    @classmethod
    def new_from_dict(cls, dict_):
        'Restore model from previously persisted _state'
        json_ = dict_.pop('json_')
        l_env = dict_.pop('environment', [])
        l_out = dict_.pop('outputters', [])
        l_movers = dict_.pop('movers', [])
        l_weatherers = dict_.pop('weatherers', [])
        c_spills = dict_.pop('spills', [])

        if 'uncertain_spills' in dict_:
            u_spills = dict_.pop('uncertain_spills')
            l_spills = zip(c_spills, u_spills)
        else:
            l_spills = c_spills

        # define defaults for properties that a location file may not contain
        kwargs = inspect.getargspec(cls.__init__)
        default_restore = dict(zip(kwargs[0][1:], kwargs[3]))

        if json_ == 'webapi':
            # default is to enable cache
            default_restore['cache_enabled'] = True

        for key in default_restore:
            default_restore[key] = dict_.pop(key, default_restore[key])

        model = object.__new__(cls)
        model.__restore__(**default_restore)

        # if there are other values in dict_, setattr
        if json_ == 'webapi':
            model.update_from_dict(dict_)
        else:
            cls._restore_attr_from_save(model, dict_)

        [model.environment.add(obj) for obj in l_env]
        [model.outputters.add(obj) for obj in l_out]
        [model.spills.add(obj) for obj in l_spills]
        [model.movers.add(obj) for obj in l_movers]
        [model.weatherers.add(obj) for obj in l_weatherers]

        # register callback with OrderedCollection
        model.movers.register_callback(model._callback_add_mover,
                                       ('add', 'replace'))

        model.weatherers.register_callback(model._callback_add_weatherer,
                                           ('add', 'replace'))

        # restore the spill data outside this method - let's not try to find
        # the saveloc here
        return model

    def __init__(self,
                 time_step=timedelta(minutes=15),
                 start_time=round_time(datetime.now(), 3600),
                 duration=timedelta(days=1),
                 weathering_substeps=1,
                 map=None,
                 uncertain=False,
                 cache_enabled=False,
                 name=None):
        '''

        Initializes a model.
        All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds
            or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime
            object. Rounded to the nearest hour.
        :param duration=timedelta(days=1): How long to run the model,
            a timedelta object.
        :param int weathering_substeps=1: How many weathering substeps to
            run inside a single model time step.
        :param map=gnome.map.GnomeMap(): The land-water map.
        :param uncertain=False: Flag for setting uncertainty.
        :param cache_enabled=False: Flag for setting whether the model should
            cache results to disk.
        '''

        self.__restore__(time_step, start_time, duration,
                         weathering_substeps,
                         uncertain, cache_enabled, map, name)

        # register callback with OrderedCollection
        self.movers.register_callback(self._callback_add_mover,
                                      ('add', 'replace'))

        self.weatherers.register_callback(self._callback_add_weatherer,
                                          ('add', 'replace'))

    def __restore__(self, time_step, start_time, duration,
                    weathering_substeps, uncertain, cache_enabled, map, name):
        '''
        Take out initialization that does not register the callback here.
        This is because new_from_dict will use this to restore the model _state
        when doing a midrun persistence.
        '''
        # making sure basic stuff is in place before properties are set
        self.environment = OrderedCollection(dtype=Environment)
        self.movers = OrderedCollection(dtype=Mover)
        self.weatherers = OrderedCollection(dtype=Weatherer)

        # contains both certain/uncertain spills
        self.spills = SpillContainerPair(uncertain)

        self._cache = gnome.utilities.cache.ElementCache()
        self._cache.enabled = cache_enabled

        # list of output objects
        self.outputters = OrderedCollection(dtype=Outputter)

        # default to now, rounded to the nearest hour
        self._start_time = start_time
        self._duration = duration
        self.weathering_substeps = weathering_substeps
        if not map:
            map = gnome.map.GnomeMap()

        if name:
            self.name = name

        self._map = map
        self.time_step = time_step  # this calls rewind() !

    def reset(self, **kwargs):
        '''
        Resets model to defaults -- Caution -- clears all movers, spills, etc.
        Takes same keyword arguments as :meth:`__init__()`
        '''
        self.__init__(**kwargs)

    def rewind(self):
        '''
        Rewinds the model to the beginning (start_time)
        '''

        # fixme: do the movers need re-setting? -- or wait for
        #        prepare_for_model_run?

        self.current_time_step = -1
        self.model_time = self._start_time

        # note: This may be redundant.  They will get reset in
        #       setup_model_run() anyway..

        self.spills.rewind()

        # set rand before each call so windages are set correctly
        gnome.utilities.rand.seed(1)

        # clear the cache:
        self._cache.rewind()

        for outputter in self.outputters:
            outputter.rewind()

#    def write_from_cache(self, filetype='netcdf', time_step='all'):
#        """
#        write the already-cached data to an output files.
#        """

    @property
    def uncertain(self):
        '''
        Uncertainty attribute of the model. If flag is toggled, rewind model
        '''
        return self.spills.uncertain

    @uncertain.setter
    def uncertain(self, uncertain_value):
        '''
        Uncertainty attribute of the model
        '''
        if self.spills.uncertain != uncertain_value:
            self.spills.uncertain = uncertain_value  # update uncertainty
            self.rewind()

    @property
    def cache_enabled(self):
        '''
        If True, then generated data is cached
        '''
        return self._cache.enabled

    @cache_enabled.setter
    def cache_enabled(self, enabled):
        self._cache.enabled = enabled

    @property
    def start_time(self):
        '''
        Start time of the simulation
        '''
        return self._start_time

    @start_time.setter
    def start_time(self, start_time):
        self._start_time = start_time
        self.rewind()

    @property
    def time_step(self):
        '''
        time step over which the dynamics is computed
        '''
        return self._time_step

    @time_step.setter
    def time_step(self, time_step):
        '''
        Sets the time step, and rewinds the model

        :param time_step: The timestep can be a timedelta object
                          or integer seconds.
        '''
        try:
            self._time_step = time_step.total_seconds()
        except AttributeError:
            self._time_step = int(time_step)

        # there is a zeroth time step
        self._num_time_steps = int(self._duration.total_seconds()
                                   // self._time_step) + 1
        self.rewind()

    @property
    def current_time_step(self):
        '''
        Current timestep of the simulation
        '''
        return self._current_time_step

    @current_time_step.setter
    def current_time_step(self, step):
        self.model_time = self._start_time + timedelta(seconds=step *
                                                       self.time_step)
        self._current_time_step = step

    @property
    def duration(self):
        '''
        total duration of the model run
        '''
        return self._duration

    @duration.setter
    def duration(self, duration):
        if duration < self._duration:
            # only need to rewind if shorter than it was...
            # fixme: actually, only need to rewind if current model time
            # is beyond new time...
            self.rewind()
        self._duration = duration

        # there is a zeroth time step
        self._num_time_steps = int(self._duration.total_seconds()
                                   // self.time_step) + 1

    @property
    def map(self):
        '''
        land water map used for simulation
        '''
        return self._map

    @map.setter
    def map(self, map_in):
        self._map = map_in
        self.rewind()

    @property
    def num_time_steps(self):
        '''
        Read only attribute
        computed number of timesteps based on py:attribute:`duration` and
        py:attribute:`time_step`
        '''
        return self._num_time_steps

    def setup_model_run(self):
        '''
        Sets up each mover for the model run
        '''
        self.spills.rewind()  # why is rewind for spills here?

        # remake orderedcollections defined by model
        for oc in [self.movers, self.weatherers,
                   self.outputters, self.environment]:
            oc.remake()

        array_types = {}

        for mover in self.movers:
            mover.prepare_for_model_run()
            array_types.update(mover.array_types)

        for w in self.weatherers:
            w.prepare_for_model_run()
            array_types.update(w.array_types)

        for sc in self.spills.items():
            sc.prepare_for_model_run(array_types)

        # outputters need array_types, so this needs to come after those
        # have been updated.
        for outputter in self.outputters:
            outputter.prepare_for_model_run(model_start_time=self.start_time,
                                            cache=self._cache,
                                            uncertain=self.uncertain,
                                            spills=self.spills)

    def setup_time_step(self):
        '''
        sets up everything for the current time_step:
        '''
        # initialize movers differently if model uncertainty is on
        for m in self.movers:
            for sc in self.spills.items():
                m.prepare_for_model_step(sc, self.time_step, self.model_time)

        for w in self.weatherers:
            for sc in self.spills.items():
                # maybe we will setup a super-sampling step here???
                w.prepare_for_model_step(sc, self.time_step, self.model_time)

        for outputter in self.outputters:
            outputter.prepare_for_model_step(self.time_step, self.model_time)

    def move_elements(self):
        '''
        Moves elements:
         - loops through all the movers. and moves the elements
         - sets new_position array for each spill
         - calls the beaching code to beach the elements that need beaching.
         - sets the new position
        '''
        for sc in self.spills.items():
            if sc.num_released > 0:  # can this check be removed?

                # possibly refloat elements
                self.map.refloat_elements(sc, self.time_step)

                # reset next_positions
                (sc['next_positions'])[:] = sc['positions']

                # loop through the movers
                for m in self.movers:
                    delta = m.get_move(sc, self.time_step, self.model_time)
                    sc['next_positions'] += delta

                self.map.beach_elements(sc)

                # the final move to the new positions
                (sc['positions'])[:] = sc['next_positions']

    def weather_elements(self):
        '''
        Weathers elements:

        - loops through all the weatherers, passing in the spill_container
          and the time range
        - a weatherer modifies the data arrays in the spill container, so a
          particular time range should not be run multiple times.  It is
          expected that we are processing a sequence of contiguous time ranges.
        - Note: If there are multiple sequential weathering processes, some
          inaccuracy could occur.  A proposed solution is to
          'super-sample' the model time step so that it will be replaced
          with many smaller time steps.  We'll have to see if this pans
          out in practice.

        '''
        for sc in self.spills.items():
            for w in self.weatherers:
                for model_time, time_step in self._split_into_substeps():
                    w.weather_elements(sc, time_step, model_time)

    def _split_into_substeps(self):
        '''
        :return: sequence of (datetime, timestep)
         (Note: we divide evenly on second boundaries.
                   Thus, there will likely be a remainder
                   that needs to be included.  We include
                   this remainder, which results in
                   1 more sub-step than we requested.)
        '''
        time_step = int(self._time_step)
        sub_step = time_step / self.weathering_substeps

        indexes = [idx for idx in range(0, time_step + 1, sub_step)]
        res = [(idx, next_idx - idx)
               for idx, next_idx in zip(indexes, indexes[1:])]

        if sum(res[-1]) < time_step:
            # collect the remaining slice
            res.append((sum(res[-1]), time_step % sub_step))

        res = [(self.model_time + timedelta(seconds=idx), delta)
               for idx, delta in res]

        return res

    def step_is_done(self):
        '''
        Loop through movers and call model_step_is_done
        '''
        for mover in self.movers:
            for sc in self.spills.items():
                mover.model_step_is_done(sc)

        for w in self.weatherers:
            w.model_step_is_done()

        for sc in self.spills.items():
            'removes elements with oil_status.to_be_removed'
            sc.model_step_is_done()

            # age remaining particles
            sc['age'][:] = sc['age'][:] + self.time_step

        for outputter in self.outputters:
            outputter.model_step_is_done()

    def write_output(self):
        output_info = {}

        for outputter in self.outputters:
            if self.current_time_step == self.num_time_steps - 1:
                output = outputter.write_output(self.current_time_step, True)
            else:
                output = outputter.write_output(self.current_time_step)

            if output is not None:
                output_info.update(output)

        if not output_info:
            return {'step_num': self.current_time_step}

        return output_info

    def step(self):
        '''
        Steps the model forward (or backward) in time. Needs testing for
        hind casting.
        '''
        for sc in self.spills.items():
            # Set the current time stamp only after current_time_step is
            # incremented and before the output is written. Set it to None here
            # just so we're not carrying around the old time_stamp
            sc.current_time_stamp = None

        # it gets incremented after this check
        if self.current_time_step >= self._num_time_steps - 1:
            raise StopIteration

        if self.current_time_step == -1:
            # that's all we need to do for the zeroth time step
            self.setup_model_run()
        else:
            self.setup_time_step()
            self.move_elements()
            self.weather_elements()
            self.step_is_done()

        self.current_time_step += 1

        # this is where the new step begins!
        # the elements released are during the time period:
        #    self.model_time + self.time_step
        # The else part of the loop computes values for data_arrays that
        # correspond with time_stamp:
        #    self.model_time + self.time_step
        # This is the current_time_stamp attribute of the SpillContainer
        #     [sc.current_time_stamp for sc in self.spills.items()]
        for sc in self.spills.items():
            sc.current_time_stamp = self.model_time

            # release particles for next step - these particles will be aged
            # in the next step
            sc.release_elements(self.time_step, self.model_time)

        # cache the results - current_time_step is incremented but the
        # current_time_stamp in spill_containers (self.spills) is not updated
        # till we go through the prepare_for_model_step
        self._cache.save_timestep(self.current_time_step, self.spills)
        output_info = self.write_output()
        return output_info

    def __iter__(self):
        '''
        Rewinds the model and returns itself so it can be iterated over.
        '''
        self.rewind()

        return self

    def next(self):
        '''
        (This method satisfies Python's iterator and generator protocols)

        :return: the step number
        '''
        return self.step()

    def full_run(self, rewind=True, log=False):
        '''
        Do a full run of the model.

        :param rewind=True: whether to rewind the model first -- if set to
            false, model will be run from the current step to the end
        :returns: list of outputter info dicts
        '''
        if rewind:
            self.rewind()

        # run the model
        output_data = []
        while True:
            try:
                results = self.step()

                if log:
                    print results

                output_data.append(results)
            except StopIteration:
                print 'Done with the model run'
                break

        return output_data

    def _callback_add_mover(self, obj_added):
        'Callback after mover has been added'
        if hasattr(obj_added, 'wind'):
            if obj_added.wind.id not in self.environment:
                self.environment += obj_added.wind

        if hasattr(obj_added, 'tide') and obj_added.tide is not None:
            if obj_added.tide.id not in self.environment:
                self.environment += obj_added.tide

        self.rewind()  # rewind model if a new mover is added

    def _callback_add_weatherer(self, obj_added):
        'Callback after weatherer has been added'
        if isinstance(obj_added, Weatherer):
            # not sure what kind of dependencies we have just yet.
            pass

        self.rewind()  # rewind model if a new weatherer is added

    def __eq__(self, other):
        check = super(Model, self).__eq__(other)
        print 'Model.__eq__(): super check =', check
        if check:
            # also check the data in ordered collections
            if type(self.spills) != type(other.spills):
                print 'Model.__eq__(): spill types:', (type(self.spills),
                                                       type(other.spills))
                return False

            if self.spills != other.spills:
                print 'Model.__eq__(): spills:'
                pp.pprint((self.spills, other.spills))
                return False

        return check

    def __ne__(self, other):
        return not self == other

    '''
    Following methods are for saving a Model instance or creating a new
    model instance from a saved location
    '''
    def spills_to_dict(self):
        '''
        return the spills ordered collection for serialization
        '''
        return self.spills.to_dict()['spills']

    def uncertain_spills_to_dict(self):
        '''
        return the uncertain_spills ordered collection for serialization/save
        files
        '''
        if self.uncertain:
            dict_ = self.spills.to_dict()
            return dict_['uncertain_spills']

        return None

    def save(self, saveloc, references=None, name=None):
        # Note: Defining references=References() in the function definition
        # keeps the references object in memory between tests - it changes the
        # scope of Referneces() to be outside the Model() instance. We don't
        # want this
        references = (references, References())[references is None]
        self._make_saveloc(saveloc)
        self._empty_save_dir(saveloc)
        json_ = self.serialize('save')

        # map is the only nested structure - let's manually call
        # _move_data_file on it
        self.map._move_data_file(saveloc, json_['map'])

        for oc in self._oc_list:
            coll_ = getattr(self, oc)
            self._save_collection(saveloc, coll_, references, json_[oc])

        for sc in self.spills.items():
            if sc.uncertain:
                key = 'uncertain_spills'
            else:
                key = 'spills'

            self._save_collection(saveloc, sc.spills, references, json_[key])

        if self.current_time_step > -1:
            '''
            hard code the filename - can make this an attribute if user wants
            to change it - but not sure if that will ever be needed?
            '''
            self._save_spill_data(os.path.join(saveloc,
                                        'spills_data_arrays.nc'))

        # there should be no more references
        self._json_to_saveloc(json_, saveloc, references, name)
        if name and references.reference(self) != name:
            # todo: want a warning here instead of an exception
            raise Exception("{0} already exists, cannot name "
                "the model's json file: {0}".format(name))
            pass
        return references

    def _save_collection(self, saveloc, coll_, refs, coll_json):
        """
        Reference objects inside OrderedCollections. Since the OC itself
        isn't a reference but the objects in the list are a reference, do
        something a little differently here

        :param OrderedCollection coll_: ordered collection to be saved
        """
        for count, obj in enumerate(coll_):
            json_ = obj.serialize('save')
            for field in obj._state:
                if field.save_reference:
                    'attribute is stored as a reference to environment list'
                    if getattr(obj, field.name) is not None:
                        ref_obj = getattr(obj, field.name)
                        index = self.environment.index(ref_obj)
                        json_[field.name] = index
            obj_ref = refs.get_reference(obj)
            if obj_ref is None:
                # try following name - if 'fname' already exists in references,
                # then obj.save() assigns a different name to file
                fname = '{0.__class__.__name__}_{1}.json'.format(obj, count)
                obj.save(saveloc, refs, fname)
                coll_json[count]['id'] = refs.reference(obj)
            else:
                coll_json[count]['id'] = obj_ref

    def _save_spill_data(self, datafile):
        """ save the data arrays for current timestep to NetCDF """
        nc_out = NetCDFOutput(datafile, which_data='all', cache=self._cache)
        nc_out.prepare_for_model_run(model_start_time=self.start_time,
                                     uncertain=self.uncertain,
                                     spills=self.spills)
        nc_out.write_output(self.current_time_step)

    def _load_spill_data(self, spill_data):
        """
        load NetCDF file and add spill data back in - designed for savefiles
        """

        if not os.path.exists(spill_data):
            return

        if self.uncertain:
            saveloc, spill_data_fname = os.path.split(spill_data)
            spill_data_fname, ext = os.path.splitext(spill_data_fname)
            u_spill_data = os.path.join(saveloc,
                '{0}_uncertain{1}'.format(spill_data_fname, ext))

        array_types = {}

        for m in self.movers:
            array_types.update(m.array_types)

        for w in self.weatherers:
            array_types.update(w.array_types)

        for sc in self.spills.items():
            if sc.uncertain:
                data = NetCDFOutput.read_data(u_spill_data, time=None,
                                              which_data='all')
            else:
                data = NetCDFOutput.read_data(spill_data, time=None,
                                              which_data='all')

            sc.current_time_stamp = data.pop('current_time_stamp').item()
            sc._data_arrays = data
            sc._array_types.update(array_types)

    def _empty_save_dir(self, saveloc):
        '''
        Remove all files, directories under saveloc

        First clean out directory, then add new save files
        This should only be called by self.save()
        '''
        (dirpath, dirnames, filenames) = os.walk(saveloc).next()

        if dirnames:
            for dir_ in dirnames:
                shutil.rmtree(os.path.join(dirpath, dir_))

        if filenames:
            for file_ in filenames:
                os.remove(os.path.join(dirpath, file_))

    def serialize(self, json_='webapi'):
        '''
        Serialize Model object
        treat special-case attributes of Model.
        '''
        toserial = self.to_serialize(json_)
        schema = self.__class__._schema()
        o_json_ = schema.serialize(toserial)
        o_json_['map'] = self.map.serialize(json_)

        if json_ == 'webapi':
            for attr in ('environment', 'outputters', 'weatherers', 'movers',
                         'spills'):
                o_json_[attr] = self.serialize_oc(attr, json_)

        return o_json_

    def serialize_oc(self, attr, json_='webapi'):
        '''
        Serialize Model attributes of type ordered collection
        '''
        json_out = []
        attr = getattr(self, attr)
        if isinstance(attr, (OrderedCollection, SpillContainerPair)):
            for item in attr:
                json_out.append(item.serialize(json_))
        return json_out

    @classmethod
    def deserialize(cls, json_):
        '''
        treat special-case attributes of Model.
        '''
        deserial = cls._schema().deserialize(json_)

        if 'map' in json_ and json_['map']:
            deserial['map'] = json_['map']

        if json_['json_'] == 'webapi':
            for attr in ('environment', 'outputters', 'weatherers', 'movers',
                         'spills'):
                if attr in json_ and json_[attr]:
                    deserial[attr] = cls.deserialize_oc(json_[attr])

        return deserial

    @classmethod
    def deserialize_oc(cls, json_):
        '''
        check contents of orderered collections to figure out what schema to
        use.
        Basically, the json serialized ordered collection looks like a regular
        list.
        '''
        deserial = []
        for item in json_:
            fqn = item['obj_type']
            name, scope = (list(reversed(fqn.rsplit('.', 1)))
                           if fqn.find('.') >= 0
                           else [fqn, ''])
            my_module = __import__(scope, globals(), locals(), [str(name)], -1)
            py_class = getattr(my_module, name)

            deserial.append(py_class.deserialize(item))

        return deserial

    @classmethod
    def load(cls, saveloc, json_data, references=None):
        '''
        '''
        references = (references, References())[references is None]

        # model has no datafiles or 'save_reference' attributes so no need to
        # do anything special for it. But let's add this as a check anyway
        datafiles = cls._state.get_field_by_attribute('isdatafile')
        ref_fields = cls._state.get_field_by_attribute('save_reference')
        if (datafiles or ref_fields):
            raise Exception("Model.load() assumes none of the attributes "
                "defining the state 'isdatafile' or is 'save_reference'. "
                "If this changes, then we need to make this more robust.")

        # deserialize after removing references
        _to_dict = cls.deserialize(json_data)

        # load nested map object and add it - currently, 'load' is only used
        # for laoding save files/location files, so it assumes:
        # json_data['json_'] == 'save'
        if ('map' in json_data):
            map_obj = eval(json_data['map']['obj_type']).load(saveloc,
                json_data['map'], references)
            _to_dict['map'] = map_obj

        # load collections
        for oc in cls._oc_list:
            if oc in _to_dict:
                _to_dict[oc] = cls._load_collection(saveloc, _to_dict[oc],
                    references)
        for spill in ['spills', 'uncertain_spills']:
            if spill in _to_dict:
                _to_dict[spill] = cls._load_collection(saveloc,
                                                       _to_dict[spill],
                                                       references)
            # also need to load spill data for mid-run save!

        model = cls.new_from_dict(_to_dict)

        model._load_spill_data(os.path.join(saveloc, 'spills_data_arrays.nc'))

        return model

    @classmethod
    def _load_collection(cls, saveloc, l_coll_dict, refs):
        '''
        doesn't need to be classmethod of the Model, but its only used by
        Model at present
        '''
        l_coll = []
        for item in l_coll_dict:
            i_ref = item['id']
            if refs.retrieve(i_ref):
                l_coll.append(refs.retrieve(i_ref))
            else:
                f_name = os.path.join(saveloc, item['id'])
                obj = load(f_name, refs)    # will add obj to refs
                l_coll.append(obj)
        return (l_coll)
 def __init__(self, uncertain=False):
     super(SpillContainer, self).__init__(uncertain=uncertain)
     self.spills = OrderedCollection(dtype=gnome.spill.Spill)
     self.spills.register_callback(self._spills_changed,
                                   ('add', 'replace', 'remove'))
     self.rewind()
class SpillContainer(AddLogger, SpillContainerData):
    """
    Container class for all spills -- it takes care of capturing the released
    LEs from all the spills, putting them all in a single set of arrays.

    Many of the "fields" associated with a collection of elements are optional,
    or used only by some movers, so only the ones required will be requested
    by each mover.

    The data for the elements is stored in the _data_arrays dict. They can be
    accessed by indexing. For example:

    positions = spill_container['positions'] : returns a (num_LEs, 3) array of
    world_point_types
    """
    def __init__(self, uncertain=False):
        super(SpillContainer, self).__init__(uncertain=uncertain)
        self.spills = OrderedCollection(dtype=gnome.spill.Spill)
        self.spills.register_callback(self._spills_changed,
                                      ('add', 'replace', 'remove'))
        self.rewind()

    def __setitem__(self, data_name, array):
        """
        Invoke base class __setitem__ method so the _data_array is set
        correctly.  In addition, create the appropriate ArrayType if it wasn't
        created by the user.
        """
        super(SpillContainer, self).__setitem__(data_name, array)
        if data_name not in self._array_types:
            shape = self._data_arrays[data_name].shape[1:]
            dtype = self._data_arrays[data_name].dtype.type

            self._array_types[data_name] = ArrayType(shape, dtype,
                                                     name=data_name)

    def _reset_arrays(self):
        '''
        reset _array_types dict so it contains default keys/values
        '''
        gnome.array_types.reset_to_defaults(['spill_num', 'id'])

        self._array_types = {'positions': positions,
                             'next_positions': next_positions,
                             'last_water_positions': last_water_positions,
                             'status_codes': status_codes,
                             'spill_num': spill_num,
                             'id': id,
                             'mass': mass,
                             'age': age}
        self._data_arrays = {}

    def _reset__substances_spills(self):
        '''
        reset internal attributes to None and empty list []:

        1. _substances_spills: data structure to contain spills per substance
        2. _oil_comp_array_len: max number of psuedocomponents - relevant if
           more than one substance is used.
        3. _fate_data_list: list of FateDataView() objects. One object per
           substance if substance is not None

        '''
        # Initialize following either the first time it is used or in
        # prepare_for_model_run() -- it could change with each new spill
        self._substances_spills = None
        self._oil_comp_array_len = None

    def _reset__fate_data_list(self):
        # define the fate view of the data if 'fate_status' is in data arrays
        # 'fate_status' is included if weathering is on
        self._fate_data_list = []

    def reset_fate_dataview(self):
        '''
        reset data arrays for each fate_dataviewer. Each substance that is not
        None has a fate_dataviewer object.
        '''
        for viewer in self._fate_data_list:
            viewer.reset()

    def _set_substancespills(self):
        '''
        _substances could change when spills are added/deleted
        using _spills_changed callback to reset self._substance_spills to None
        If 'substance' is None, we still include it in this data structure -
        all spills that are 'on' are included. A spill that is off isn't really
        being modeled so ignore it.

        .. note::
            Should not be called in middle of run. prepare_for_model_run()
            will invoke this if self._substance_spills is None. This is another
            view of the data - it doesn't contain any state that needs to be
            persisted.
        '''
        subs = []
        spills = []
        if self._oil_comp_array_len is None:
            self._oil_comp_array_len = 1

        for spill in self.spills:
            if not spill.on:
                continue
            new_subs = spill.get('substance')
            if new_subs in subs:
                # substance already defined for another spill
                ix = subs.index(new_subs)
                spills[ix].append(spill)
            else:
                # new substance not yet defined
                subs.append(new_subs)
                spills.append([spill])

                # also set _oil_comp_array_len to substance with most
                # components? -- *not* being used right now, but make it so
                # it works correctly for testing multiple substances
                if (hasattr(new_subs, 'num_components') and
                    new_subs.num_components > self._oil_comp_array_len):
                    self._oil_comp_array_len = new_subs.num_components

        # let's reorder subs so None is in the end:
        if None in subs:
            ix = subs.index(None)
            spills.append(spills.pop(ix))
            subs.append(subs.pop(ix))

        s_id = range(len(subs))

        # 'data' will be updated when weatherers ask for arrays they need
        # define the substances list and the list of spills for each substance
        self._substances_spills = \
            substances_spills(substances=subs, s_id=s_id, spills=spills)

        if len(self.get_substances()) > 1:
            # add an arraytype for substance if more than one substance
            self._array_types.update({'substance': substance})

        self.logger.info('{0} - number of substances: {1}'.
                         format(os.getpid(), len(self.get_substances())))

    def _set_fate_data_list(self):
        '''
        For each substance that is not None, initialize
        FateDataView(substance_id) object. The substance_id corresponds with
        self._substance_spills.s_id for each substance.
        '''
        self._fate_data_list = []
        for s_id, subs in zip(self._substances_spills.s_id,
                              self._substances_spills.substances):
            if subs is None:
                continue

            self._fate_data_list.append(FateDataView(s_id))

    def _spills_changed(self, *args):
        '''
        call back called on spills add/delete/replace
        Callback simply resets the internal _substance_spills attribute to None
        since the old _substance_spills value could now be invalid.
        '''
        self._substances_spills = None

    def _get_s_id(self, substance):
        '''
        Look in the _substances_spills data structure of substance and return
        the corresponding s_id
        '''
        try:
            ix = self._substances_spills.substances.index(substance)
        except ValueError:
            'substance is not in list'
            self.logger.debug('{0} - Substance named: {1}, not found in data '
                              'structure'.format(os.getpid(), substance.name))
            return None

        return self._substances_spills.s_id[ix]

    def _get_fatedataview(self, substance):
        '''
        return the FateDataView object associated with substance
        '''
        ix = self._get_s_id(substance)

        if ix is None:
            msg = "substance named {0} not found".format(substance.name)
            self.logger.info(msg)
            return

        # check
        view = self._fate_data_list[ix]
        if view.substance_id != ix:
            msg = "substance_id did not match as expected. Check!"
            raise ValueError(msg)

        return view

    def _array_name(self, at):
        '''
        given an array type, return the name of the array. This can be string,
        in which case, it is the name of the array so return it. If its not
        a string, then return the at.name attribute.
        '''
        if isinstance(at, basestring):
            return at
        else:
            return at.name

    def _append_array_types(self, array_types):
        '''
        append to self.array_types the input array_types.

        :param array_types: set of array_types to be appended
        :type array_types: set()

        The set contains either a name as a string, say: 'rise_vel'
        In this case, get the ArrayType from gnome.array_types.rise_vel
        Set elements could also be tuples, say: ('rise_vel': ArrayType())
        In this case the user name of the data_array and its array_type is
        specified by the tuple so append it.

        .. note:: If a tuple: ('name', ArrayType()), is given and an ArrayType
            with that name already exists in self._array_types, then it is
            overwritten.
        '''
        for array in array_types:
            if isinstance(array, basestring):
                # allow user to override an array_type that might already exist
                # in self._array_types
                try:
                    array = getattr(gat, array)
                except AttributeError:
                    msg = ("Skipping {0} - not found in gnome.array_types;"
                           " and ArrayType is not provided.").format(array)
                    self.logger.error(msg)
                    raise GnomeRuntimeError(msg)

            # must be an ArrayType of an object
            self._array_types[array.name] = array

    def _append_initializer_array_types(self, array_types):
        # for each array_types, use the name to get the associated initializer
        for name in array_types:
            for spill in self.spills:
                if spill.has_initializer(name):
                    self._append_array_types(spill.get_initializer(name).
                                             array_types)

    def _append_data_arrays(self, num_released):
        """
        initialize data arrays once spill has spawned particles
        Data arrays are set to their initial_values

        :param int num_released: number of particles released

        """
        for name, atype in self._array_types.iteritems():
            # initialize all arrays even if 0 length
            if atype.shape is None:
                # assume array type is for weather data, provide it the shape
                # per the number of components used to model the oil
                # currently, we only have one type of oil, so all spills will
                # model same number of oil_components
                a_append = atype.initialize(num_released,
                                            shape=(self._oil_comp_array_len,),
                                            initial_value=tuple([0] * self._oil_comp_array_len))
            else:
                a_append = atype.initialize(num_released)
            self._data_arrays[name] = np.r_[self._data_arrays[name], a_append]

    def _set_substance_array(self, subs_idx, num_rel_by_substance):
        '''
        -. update 'substance' array if more than one substance present. The
        value of array is the index of 'substance' in _substances_spills
        data structure
        '''
        if 'substance' in self:
            if num_rel_by_substance > 0:
                self['substance'][-num_rel_by_substance:] = subs_idx

    def substancefatedata(self,
                          substance,
                          array_types,
                          fate='surface_weather'):
        '''
        todo: fix this so it works for type of fate requested
        return the data for specified substance
        data must contain array names specified in 'array_types'
        '''
        view = self._get_fatedataview(substance)
        return view.get_data(self, array_types, fate)

    def iterspillsbysubstance(self):
        '''
        iterate through the substances spills datastructure and return the
        spills associated with each substance. This is used by release_elements
        DataStructure contains all spills. If some spills contain None for
        substance, these will be returned
        '''
        if self._substances_spills is None:
            self._set_substancespills()
        return self._substances_spills.spills

    def itersubstancedata(self, array_types, fate='surface_weather'):
        '''
        iterates through and returns the following for each iteration:
        (substance, substance_data)

        This is used by weatherers - if a substance is None, omit it from
        the iteration.

        :param array_types: iterable containing array that should be in the
            data. This could be a set of strings corresponding with array names
            or ArrayType objects which have a name attribute
        :param select='select': a string stating the type of data to be
            returned. Default if 'surface', so all elements with
            status_codes==oil_status.in_water and z == 0 in positions array
        :returns: (substance, substance_data) for each iteration
            substance: substance object
            substance_data: dict of numpy arrays associated with substance with
            elements in_water and on surface if select == 'surface' or
            subsurface if select == 'subsurface'
        '''
        if self._substances_spills is None:
            self._set_substancespills()

        return zip(self.get_substances(complete=False),
                   [view.get_data(self, array_types, fate) for view in
                    self._fate_data_list])

    def update_from_fatedataview(self, substance=None,
                                 fate='surface_weather'):
        '''
        let's only update the arrays that were changed
        only update if a copy of 'data' exists. This is the case if there are
        more then one substances
        '''
        if substance is not None:
            view = self._get_fatedataview(substance)
            view.update_sc(self, fate)

        else:
            # do for all substances
            for view in self._fate_data_list:
                view.update_sc(self, fate)

    def get_substances(self, complete=True):
        '''
        return substances stored in _substances_spills structure.
        Include None if complete is True. Default is complete=True.
        '''
        if self._substances_spills is None:
            self._set_substancespills()

        if complete:
            return self._substances_spills.substances
        else:
            return filter(None, self._substances_spills.substances)

    @property
    def total_mass(self):
        '''
        return total mass spilled in 'kg'
        '''
        mass = 0
        for spill in self.spills:
            if spill.get_mass() is not None:
                mass += spill.get_mass()

        if mass == 0:
            return None
        else:
            return mass

    @property
    def substances(self):
        '''
        Returns list of substances for weathering - not including None since
        that is non-weathering.
        Currently, only one weathering substance is supported
        '''
        return self.get_substances(complete=False)

    @property
    def array_types(self):
        """
        user can modify ArrayType initial_value in middle of run. Changing
        the shape should throw an error. Change the dtype at your own risk.
        This returns a new dict so user cannot add/delete an ArrayType in
        middle of run. Use prepare_for_model_run() to do add an ArrayType.
        """
        return dict(self._array_types)

    def rewind(self):
        """
        In the rewind operation, we:
        - rewind all the spills
        - restore _array_types to contain only defaults
          - movers/weatherers could have been deleted and we don't want to
            carry associated data_arrays
          - prepare_for_model_run() will be called before the next run and
            new arrays can be given

        - purge the data arrays
          - we gather data arrays for each contained spill
          - the stored arrays are cleared, then replaced with appropriate
            empty arrays
        """
        for spill in self.spills:
            spill.rewind()
        # create a full set of zero-sized arrays. If we rewound, something
        # must have changed so let's get back to default _array_types
        self._reset_arrays()
        self._reset__substances_spills()
        self._reset__fate_data_list()
        self.initialize_data_arrays()
        self.mass_balance = {}  # reset to empty array

    def get_spill_mask(self, spill):
        return self['spill_num'] == self.spills.index(spill)

    def uncertain_copy(self):
        """
        Returns a copy of the spill_container suitable for uncertainty

        It has all the same spills, with the same ids, and the uncertain
        flag set to True
        """
        u_sc = SpillContainer(uncertain=True)
        for sp in self.spills:
            u_sc.spills += sp.uncertain_copy()

        return u_sc

    def prepare_for_model_run(self, array_types=set()):
        """
        called when setting up the model prior to 1st time step
        This is considered 0th timestep by model

        Make current_time optional since SpillContainer doesn't require it
        especially for 0th step; however, the model needs to set it because
        it will write_output() after each step. The data_arrays along with
        the current_time_stamp must be set in order to write_output()

        :param model_start_time: model_start_time to initialize
            current_time_stamp. This is the time_stamp associated with 0-th
            step so initial conditions for data arrays
        :param array_types: a set of additional names and/or array_types to
            append to standard array_types attribute. Set can contain only
            strings or a tuple with (string, ArrayType). See Note below.

        .. note:: set can contains strings or tuples. If set contains only
            strings, say: {'mass', 'windages'},
            then SpillContainer looks for corresponding ArrayType object
            defined in gnome.array_types for 'mass' and 'windages'.
            If set contains a tuple, say: {('mass', gnome.array_types.mass)},
            then SpillContainer uses the ArrayType defined in the tuple.

        .. note:: The SpillContainer iterates through each of the item in
            array_types and checks to see if there is an associated initializer
            in any Spill. If corresponding initializer is found, it gets the
            array_types from initializer and appends them to its own list. This
            was added for the case where 'droplet_diameter' array is
            defined/used by initializer (InitRiseVelFromDropletSizeFromDist)
            and we would like to see it in output, but no Mover/Weatherer needs
            it.
        """
        # Question - should we purge any new arrays that were added in previous
        # call to prepare_for_model_run()?
        # No! If user made modifications to _array_types before running model,
        # let's keep those. A rewind will reset data_arrays.
        self._append_array_types(array_types)
        self._append_initializer_array_types(array_types)

        if self._substances_spills is None:
            self._set_substancespills()

        # also create fate_dataview if 'fate_status' is part of arrays
        if 'fate_status' in self.array_types:
            self._set_fate_data_list()

        # 'substance' data_array may have been added so initialize after
        # _set_substancespills() is invoked
        self.initialize_data_arrays()

        # todo: maybe better to let map do this, but it does not have a
        # prepare_for_model_run() yet so can't do it there
        # need 'amount_released' here as well
        self.mass_balance['beached'] = 0.0
        self.mass_balance['off_maps'] = 0.0

    def initialize_data_arrays(self):
        """
        initialize_data_arrays() is called without input data during rewind
        and prepare_for_model_run to define all data arrays.
        At this time the arrays are empty.
        """
        for name, atype in self._array_types.iteritems():
            # Initialize data_arrays with 0 elements
            if atype.shape is None:
                num_comp = self._oil_comp_array_len
                self._data_arrays[name] = \
                    atype.initialize_null(shape=(num_comp, ))
            else:
                self._data_arrays[name] = atype.initialize_null()

    def release_elements(self, time_step, model_time):
        """
        Called at the end of a time step

        This calls release_elements on all of the contained spills, and adds
        the elements to the data arrays

        :returns: total number of particles released

        todo: may need to update the 'mass' array to use a default of 1.0 but
        will need to define it in particle units or something along those lines
        """
        total_released = 0
        # substance index - used label elements from same substance
        # used internally only by SpillContainer - could be a strided array.
        # Simpler to define it only in SpillContainer as opposed to ArrayTypes
        # 'substance': ((), np.uint8, 0)
        for ix, spills in enumerate(self.iterspillsbysubstance()):
            num_rel_by_substance = 0
            for spill in spills:
                # only spills that are included here - no need to check
                # spill.on flag
                num_rel = spill.num_elements_to_release(model_time, time_step)
                if num_rel > 0:
                    # update 'spill_num' ArrayType's initial_value so it
                    # corresponds with spill number for this set of released
                    # particles - just another way to set value of spill_num
                    # correctly
                    self._array_types['spill_num'].initial_value = \
                        self.spills.index(spill)

                    if len(self['spill_num']) > 0:
                        # unique identifier for each new element released
                        # this adjusts the _array_types initial_value since the
                        # initialize function just calls:
                        #  range(initial_value, num_released + initial_value)
                        self._array_types['id'].initial_value = \
                            self['id'][-1] + 1
                    else:
                        # always reset value of first particle released to 0!
                        # The array_types are shared globally. To initialize
                        # uncertain spills correctly, reset this to 0.
                        # To be safe, always reset to 0 when no
                        # particles are released
                        self._array_types['id'].initial_value = 0

                    # append to data arrays - number of oil components is
                    # currently the same for all spills
                    self._append_data_arrays(num_rel)
                    spill.set_newparticle_values(num_rel,
                                                 model_time,
                                                 time_step,
                                                 self._data_arrays)
                    num_rel_by_substance += num_rel

            # always reset data arrays else the changing arrays are stale
            self._set_substance_array(ix, num_rel_by_substance)

            # reset fate_dataview at each step - do it after release elements
            self.reset_fate_dataview()

            # update total elements released for substance
            total_released += num_rel_by_substance

        return total_released

    def split_element(self, ix, num, l_frac=None):
        '''
        split an element into specified number.
        For data, like mass, that gets divided, l_frac can be optionally
        provided. l_frac is a list containing fraction of component's value
        given to each new element. len(l_frac) must be equal to num and
        sum(l_frac) == 1.0

        :param ix: id of element to be split - before splitting each element
            has a unique 'id' defined in 'id' data array
        :type ix: int
        :param num: split ix into 'num' number of elements
        :type num: int
        :param l_frac: list containing fractions that sum to 1.0 with
            len(l_frac) == num
        :type l_frac: list or tuple or numpy array
        '''
        # split the first location where 'id' matches
        try:
            idx = np.where(self['id'] == ix)[0][0]
        except IndexError:
            msg = "no element with id = {0} found".format(ix)
            self.logger.warning(msg)
            raise

        for name, at in self.array_types.iteritems():
            data = self[name]
            split_elems = at.split_element(num, self[name][idx], l_frac)
            data = np.insert(data, idx, split_elems[:-1], 0)
            data[idx + len(split_elems) - 1] = split_elems[-1]
            self._data_arrays[name] = data

        # update fate_dataview which contains this LE
        # for now we only have one type of substance
        if len(self._fate_data_list) > 1:
            msg = "split_elements assumes only one substance is being modeled"
            self.logger.error(msg)

        self._fate_data_list[0]._reset_fatedata(self, ix)

    def model_step_is_done(self):
        '''
        Called at the end of a time step
        Need to remove particles marked as to_be_removed...
        '''
        if len(self._data_arrays) == 0:
            return  # nothing to do - arrays are not yet defined.

        # LEs are marked as to_be_removed
        # C++ might care about this so leave as is
        to_be_removed = np.where(self['status_codes'] ==
                                 oil_status.to_be_removed)[0]

        if len(to_be_removed) > 0:
            for key in self._array_types.keys():
                self._data_arrays[key] = np.delete(self[key], to_be_removed,
                                                   axis=0)

    def __str__(self):
        return ('gnome.spill_container.SpillContainer\n'
                'spill LE attributes: {0}'
                .format(sorted(self._data_arrays.keys())))

    __repr__ = __str__
Esempio n. 46
0
 def test_setitem(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     oc[s_id(4)] = 7
     oc[0] = 0
     assert [i for i in oc] == [0, 2, 3, 7, 5]
Esempio n. 47
0
class Model(Serializable):
    "PyGNOME Model Class"
    _update = [
        "time_step",
        "weathering_substeps",
        "start_time",
        "duration",
        "uncertain",
        "movers",
        "weatherers",
        "environment",
        "spills",
        "map",
        "outputters",
        "cache_enabled",
    ]
    _create = []
    _create.extend(_update)
    _state = copy.deepcopy(Serializable._state)

    # no need to copy parent's _state in this case
    _state.add(create=_create, update=_update)

    @classmethod
    def new_from_dict(cls, dict_):
        "Restore model from previously persisted _state"
        l_env = dict_.pop("environment")
        l_out = dict_.pop("outputters")
        l_movers = dict_.pop("movers")
        l_weatherers = dict_.pop("weatherers")
        c_spills = dict_.pop("certain_spills")

        if "uncertain_spills" in dict_:
            u_spills = dict_.pop("uncertain_spills")
            l_spills = zip(c_spills, u_spills)
        else:
            l_spills = c_spills

        model = object.__new__(cls)
        model.__restore__(**dict_)
        [model.environment.add(obj) for obj in l_env]
        [model.outputters.add(obj) for obj in l_out]
        [model.spills.add(obj) for obj in l_spills]
        [model.movers.add(obj) for obj in l_movers]
        [model.weatherers.add(obj) for obj in l_weatherers]

        # register callback with OrderedCollection
        model.movers.register_callback(model._callback_add_mover, ("add", "replace"))

        model.weatherers.register_callback(model._callback_add_weatherer, ("add", "replace"))

        return model

    def __init__(
        self,
        time_step=timedelta(minutes=15),
        start_time=round_time(datetime.now(), 3600),
        duration=timedelta(days=1),
        weathering_substeps=1,
        map=gnome.map.GnomeMap(),
        uncertain=False,
        cache_enabled=False,
        id=None,
    ):
        """
        Initializes a model. All arguments have a default.

        :param time_step=timedelta(minutes=15): model time step in seconds
                                                or as a timedelta object
        :param start_time=datetime.now(): start time of model, datetime
                                          object. Rounded to the nearest hour.
        :param duration=timedelta(days=1): How long to run the model,
                                           a timedelta object.
        :param int weathering_substeps=1: How many weathering substeps to
                                          run inside a single model time step.
        :param map=gnome.map.GnomeMap(): The land-water map.
        :param uncertain=False: Flag for setting uncertainty.
        :param cache_enabled=False: Flag for setting whether the model should
                                    cache results to disk.
        :param id: Unique Id identifying the newly created mover (a UUID as a
                   string).  This is used when loading an object from a
                   persisted model
        """
        self.__restore__(time_step, start_time, duration, weathering_substeps, map, uncertain, cache_enabled)

        self._gnome_id = gnome.GnomeId(id)

        # register callback with OrderedCollection
        self.movers.register_callback(self._callback_add_mover, ("add", "replace"))

        self.weatherers.register_callback(self._callback_add_weatherer, ("add", "replace"))

    def __restore__(self, time_step, start_time, duration, weathering_substeps, map, uncertain, cache_enabled):
        """
        Take out initialization that does not register the callback here.
        This is because new_from_dict will use this to restore the model _state
        when doing a midrun persistence.
        """

        # making sure basic stuff is in place before properties are set
        self.environment = OrderedCollection(dtype=Environment)
        self.movers = OrderedCollection(dtype=Mover)
        self.weatherers = OrderedCollection(dtype=Weatherer)

        # contains both certain/uncertain spills
        self.spills = SpillContainerPair(uncertain)

        self._cache = gnome.utilities.cache.ElementCache()
        self._cache.enabled = cache_enabled

        # list of output objects
        self.outputters = OrderedCollection(dtype=Outputter)

        # default to now, rounded to the nearest hour
        self._start_time = start_time
        self._duration = duration
        self.weathering_substeps = weathering_substeps
        self._map = map
        self.time_step = time_step  # this calls rewind() !

    def reset(self, **kwargs):
        """
        Resets model to defaults -- Caution -- clears all movers, spills, etc.
        Takes same keyword arguments as __init__
        """
        self.__init__(**kwargs)

    def rewind(self):
        """
        Rewinds the model to the beginning (start_time)
        """

        # fixme: do the movers need re-setting? -- or wait for
        #        prepare_for_model_run?

        self.current_time_step = -1
        self.model_time = self._start_time

        # note: This may be redundant.  They will get reset in
        #       setup_model_run() anyway..

        self.spills.rewind()

        # set rand before each call so windages are set correctly
        gnome.utilities.rand.seed(1)

        # clear the cache:
        self._cache.rewind()

        for outputter in self.outputters:
            outputter.rewind()

    #    def write_from_cache(self, filetype='netcdf', time_step='all'):
    #        """
    #        write the already-cached data to an output files.
    #        """

    @property
    def uncertain(self):
        return self.spills.uncertain

    @uncertain.setter
    def uncertain(self, uncertain_value):
        """
        only if uncertainty switch is toggled, then restart model
        """
        if self.spills.uncertain != uncertain_value:
            self.spills.uncertain = uncertain_value  # update uncertainty
            self.rewind()

    @property
    def cache_enabled(self):
        return self._cache.enabled

    @cache_enabled.setter
    def cache_enabled(self, enabled):
        self._cache.enabled = enabled

    @property
    def id(self):
        return self._gnome_id.id

    @property
    def start_time(self):
        return self._start_time

    @start_time.setter
    def start_time(self, start_time):
        self._start_time = start_time
        self.rewind()

    @property
    def time_step(self):
        return self._time_step

    @time_step.setter
    def time_step(self, time_step):
        """
        Sets the time step, and rewinds the model

        :param time_step: The timestep can be a timedelta object
                          or integer seconds.
        """
        try:
            self._time_step = time_step.total_seconds()
        except AttributeError:
            self._time_step = int(time_step)

        # there is a zeroth time step
        self._num_time_steps = int(self._duration.total_seconds() // self._time_step) + 1
        self.rewind()

    @property
    def current_time_step(self):
        return self._current_time_step

    @current_time_step.setter
    def current_time_step(self, step):
        self.model_time = self._start_time + timedelta(seconds=step * self.time_step)
        self._current_time_step = step

    @property
    def duration(self):
        return self._duration

    @duration.setter
    def duration(self, duration):
        if duration < self._duration:
            # only need to rewind if shorter than it was...
            # fixme: actually, only need to rewind if current model time
            # is beyond new time...
            self.rewind()
        self._duration = duration

        # there is a zeroth time step
        self._num_time_steps = int(self._duration.total_seconds() // self.time_step) + 1

    @property
    def map(self):
        return self._map

    @map.setter
    def map(self, map_in):
        self._map = map_in
        self.rewind()

    @property
    def num_time_steps(self):
        return self._num_time_steps

    def setup_model_run(self):
        """
        Sets up each mover for the model run
        """
        self.spills.rewind()  # why is rewind for spills here?

        array_types = {}

        for mover in self.movers:
            mover.prepare_for_model_run()
            array_types.update(mover.array_types)

        for w in self.weatherers:
            w.prepare_for_model_run()
            array_types.update(w.array_types)

        for sc in self.spills.items():
            sc.prepare_for_model_run(array_types)

        # outputters need array_types, so this needs to come after those
        # have been updated.
        for outputter in self.outputters:
            outputter.prepare_for_model_run(
                model_start_time=self.start_time, cache=self._cache, uncertain=self.uncertain, spills=self.spills
            )

    def setup_time_step(self):
        """
        sets up everything for the current time_step:
        """
        # initialize movers differently if model uncertainty is on
        for m in self.movers:
            for sc in self.spills.items():
                m.prepare_for_model_step(sc, self.time_step, self.model_time)

        for w in self.weatherers:
            for sc in self.spills.items():
                # maybe we will setup a super-sampling step here???
                w.prepare_for_model_step(sc, self.time_step, self.model_time)

        for outputter in self.outputters:
            outputter.prepare_for_model_step(self.time_step, self.model_time)

    def move_elements(self):
        """
        Moves elements:
         - loops through all the movers. and moves the elements
         - sets new_position array for each spill
         - calls the beaching code to beach the elements that need beaching.
         - sets the new position
        """
        for sc in self.spills.items():
            if sc.num_released > 0:  # can this check be removed?

                # possibly refloat elements
                self.map.refloat_elements(sc, self.time_step)

                # reset next_positions
                (sc["next_positions"])[:] = sc["positions"]

                # loop through the movers
                for m in self.movers:
                    delta = m.get_move(sc, self.time_step, self.model_time)
                    sc["next_positions"] += delta

                self.map.beach_elements(sc)

                # the final move to the new positions
                (sc["positions"])[:] = sc["next_positions"]

    def weather_elements(self):
        """
        Weathers elements:
        - loops through all the weatherers, passing in the spill_container
          and the time range
        - a weatherer modifies the data arrays in the spill container, so a
          particular time range should not be run multiple times.  It is
          expected that we are processing a sequence of contiguous time ranges.
        - Note: If there are multiple sequential weathering processes, some
                inaccuracy could occur.  A proposed solution is to
                'super-sample' the model time step so that it will be replaced
                with many smaller time steps.  We'll have to see if this pans
                out in practice.
        """
        for sc in self.spills.items():
            for w in self.weatherers:
                for model_time, time_step in self._split_into_substeps():
                    w.weather_elements(sc, time_step, model_time)

    def _split_into_substeps(self):
        """
        :return: sequence of (datetime, timestep)
         (Note: we divide evenly on second boundaries.
                   Thus, there will likely be a remainder
                   that needs to be included.  We include
                   this remainder, which results in
                   1 more sub-step than we requested.)
        """
        time_step = int(self._time_step)
        sub_step = time_step / self.weathering_substeps

        indexes = [idx for idx in range(0, time_step + 1, sub_step)]
        res = [(idx, next_idx - idx) for idx, next_idx in zip(indexes, indexes[1:])]

        if sum(res[-1]) < time_step:
            # collect the remaining slice
            res.append((sum(res[-1]), time_step % sub_step))

        res = [(self.model_time + timedelta(seconds=idx), delta) for idx, delta in res]

        return res

    def step_is_done(self):
        """
        Loop through movers and call model_step_is_done
        """
        for mover in self.movers:
            for sc in self.spills.items():
                mover.model_step_is_done(sc)

        for w in self.weatherers:
            w.model_step_is_done()

        for sc in self.spills.items():
            "removes elements with oil_status.to_be_removed"
            sc.model_step_is_done()

            # age remaining particles
            sc["age"][:] = sc["age"][:] + self.time_step

        for outputter in self.outputters:
            outputter.model_step_is_done()

    def write_output(self):
        output_info = {"step_num": self.current_time_step}

        for outputter in self.outputters:
            if self.current_time_step == self.num_time_steps - 1:
                output = outputter.write_output(self.current_time_step, True)
            else:
                output = outputter.write_output(self.current_time_step)

            if output is not None:
                output_info.update(output)

        return output_info

    def step(self):
        """
        Steps the model forward (or backward) in time. Needs testing for
        hind casting.
        """
        for sc in self.spills.items():
            # Set the current time stamp only after current_time_step is
            # incremented and before the output is written. Set it to None here
            # just so we're not carrying around the old time_stamp
            sc.current_time_stamp = None

        # it gets incremented after this check
        if self.current_time_step >= self._num_time_steps - 1:
            raise StopIteration

        if self.current_time_step == -1:
            # that's all we need to do for the zeroth time step
            self.setup_model_run()
        else:
            self.setup_time_step()
            self.move_elements()
            self.weather_elements()
            self.step_is_done()

        self.current_time_step += 1

        # this is where the new step begins!
        # the elements released are during the time period:
        #    self.model_time + self.time_step
        # The else part of the loop computes values for data_arrays that
        # correspond with time_stamp:
        #    self.model_time + self.time_step
        # This is the current_time_stamp attribute of the SpillContainer
        #     [sc.current_time_stamp for sc in self.spills.items()]
        for sc in self.spills.items():
            sc.current_time_stamp = self.model_time

            # release particles for next step - these particles will be aged
            # in the next step
            sc.release_elements(self.time_step, self.model_time)

        # cache the results - current_time_step is incremented but the
        # current_time_stamp in spill_containers (self.spills) is not updated
        # till we go through the prepare_for_model_step
        self._cache.save_timestep(self.current_time_step, self.spills)
        output_info = self.write_output()
        return output_info

    def __iter__(self):
        """
        Rewinds the model and returns itself so it can be iterated over.
        """
        self.rewind()

        return self

    def next(self):
        """
        (This method satisfies Python's iterator and generator protocols)

        :return: the step number
        """
        return self.step()

    def full_run(self, rewind=True, log=False):
        """
        Do a full run of the model.

        :param rewind=True: whether to rewind the model first
                            -- if set to false, model will be run from the
                               current step to the end
        :returns: list of outputter info dicts
        """
        if rewind:
            self.rewind()

        # run the model
        output_data = []
        while True:
            try:
                results = self.step()
                if log:
                    print results
                output_data.append(results)
            except StopIteration:
                print "Done with the model run"
                break

        return output_data

    def movers_to_dict(self):
        """
        Call to_dict method of OrderedCollection object
        """
        return self.movers.to_dict()

    def weatherers_to_dict(self):
        """
        Call to_dict method of OrderedCollection object
        """
        return self.weatherers.to_dict()

    def environment_to_dict(self):
        """
        Call to_dict method of OrderedCollection object
        """
        return self.environment.to_dict()

    def spills_to_dict(self):
        return self.spills.to_dict()

    def outputters_to_dict(self):
        """
        Call to_dict method of OrderedCollection object
        """
        return self.outputters.to_dict()

    def map_to_dict(self):
        """
        returns the gnome object type as a string
        """
        return "{0}.{1}".format(self.map.__module__, self.map.__class__.__name__)

    def _callback_add_mover(self, obj_added):
        "Callback after mover has been added"
        if isinstance(obj_added, WindMover):
            if obj_added.wind.id not in self.environment:
                self.environment += obj_added.wind

        if isinstance(obj_added, CatsMover):
            if obj_added.tide is not None and obj_added.tide.id not in self.environment:
                self.environment += obj_added.tide

        self.rewind()  # rewind model if a new mover is added

    def _callback_add_weatherer(self, obj_added):
        "Callback after weatherer has been added"
        if isinstance(obj_added, Weatherer):
            # not sure what kind of dependencies we have just yet.
            pass

        self.rewind()  # rewind model if a new weatherer is added

    def __eq__(self, other):
        check = super(Model, self).__eq__(other)
        if check:
            # also check the data in spill_container object
            if type(self.spills) != type(other.spills):
                return False

            if self.spills != other.spills:
                return False

        return check

    def __ne__(self, other):
        "Compare inequality (!=) of two objects"
        if self == other:
            return False
        else:
            return True

    """
    Following methods are for saving a Model instance or creating a new
    model instance from a saved location
    """

    def save(self, saveloc):
        """
        save model in json format to user specified saveloc

        :param saveloc: A valid directory. Model files are either persisted
                        here or a new model is re-created from the files
                        stored here. The files are clobbered when save() is
                        called.
        :type saveloc: A path as a string or unicode
        """
        path_, savedir = os.path.split(saveloc)
        if path_ == "":
            path_ = "."

        if not os.path.exists(path_):
            raise ValueError('"{0}" does not exist. \nCannot create "{1}"'.format(path_, savedir))

        if not os.path.exists(saveloc):
            os.mkdir(saveloc)

        self._empty_save_dir(saveloc)
        json_ = self.serialize("create")
        self._save_json_to_file(saveloc, json_, "{0}.json".format(self.__class__.__name__))

        json_ = self.map.serialize("create")
        self._save_json_to_file(saveloc, json_, "{0}.json".format(self.map.__class__.__name__))

        self._save_collection(saveloc, self.movers)
        self._save_collection(saveloc, self.weatherers)
        self._save_collection(saveloc, self.environment)
        self._save_collection(saveloc, self.outputters)

        for sc in self.spills.items():
            self._save_collection(saveloc, sc.spills)

        # persist model _state since middle of run
        if self.current_time_step > -1:
            self._save_spill_data(os.path.join(saveloc, "spills_data_arrays.nc"))

    def _save_collection(self, saveloc, coll_):
        """
        Function loops over an orderedcollection or any other iterable
        containing a list of objects. It calls the to_dict method for each
        object, then converts it o valid json (dict_to_json),
        and finally saves it to file (_save_json_to_file)

        :param OrderedCollection coll_: ordered collection to be saved

        Note: The movers and weatherer objects reference the environment
        collection. If a field is saved as reference (field.save_reference is
        True), then this function adds json_[field.name] = index where
        index is the index into the environment array for the reference
        object. Currently, only objects in the environment collection are
        referenced by movers.
        """
        for count, obj in enumerate(coll_):
            json_ = obj.serialize("create")
            for field in obj._state:
                if field.save_reference:
                    "attribute is stored as a reference to environment list"
                    if getattr(obj, field.name) is not None:
                        obj_id = getattr(obj, field.name).id
                        index = self.environment.index(obj_id)
                        json_[field.name] = index

            self._save_json_to_file(saveloc, json_, "{0}_{1}.json".format(obj.__class__.__name__, count))

    def _save_json_to_file(self, saveloc, data, name):
        """
        write json data to file

        :param dict data: JSON data to be saved
        :param obj: gnome object corresponding w/ data
        """

        fname = os.path.join(saveloc, name)
        data = self._move_data_file(saveloc, data)  # if there is a

        with open(fname, "w") as outfile:
            json.dump(data, outfile, indent=True)

    def _move_data_file(self, saveloc, json_):
        """
        Look at _state attribute of object. Find all fields with 'isdatafile'
        attribute as True. If there is a key in to_json corresponding with
        'name' of the fields with True 'isdatafile' attribute then move that
        datafile and update the key in the to_json to point to new location

        todo: maybe this belongs in serializable base class? Revisit this
        """
        _state = eval("{0}._state".format(json_["obj_type"]))
        fields = _state.get_field_by_attribute("isdatafile")

        for field in fields:
            if field.name not in json_:
                continue

            value = json_[field.name]

            if os.path.exists(value) and os.path.isfile(value):
                shutil.copy(value, saveloc)
                json_[field.name] = os.path.split(json_[field.name])[1]

        return json_

    def _save_spill_data(self, datafile):
        """ save the data arrays for current timestep to NetCDF """
        nc_out = NetCDFOutput(datafile, which_data="all", cache=self._cache)
        nc_out.prepare_for_model_run(model_start_time=self.start_time, uncertain=self.uncertain, spills=self.spills)
        nc_out.write_output(self.current_time_step)

    def _empty_save_dir(self, saveloc):
        """
        Remove all files, directories under saveloc

        First clean out directory, then add new save files
        This should only be called by self.save()
        """
        (dirpath, dirnames, filenames) = os.walk(saveloc).next()

        if dirnames:
            for dir_ in dirnames:
                shutil.rmtree(os.path.join(dirpath, dir_))

        if filenames:
            for file_ in filenames:
                os.remove(os.path.join(dirpath, file_))
Esempio n. 48
0
 def test_delitem_byindex(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     with raises(IndexError):
         del oc[5]
     del oc[3]
     assert [i for i in oc] == [1, 2, 3, 5]
Esempio n. 49
0
 def test_delitem(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     with raises(KeyError):
         del oc[s_id(6)]
     del oc[s_id(4)]
     assert [i for i in oc] == [1, 2, 3, 5]
Esempio n. 50
0
 def test_contains(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     assert 5 in oc
Esempio n. 51
0
class SpillContainer(SpillContainerData):
    """
    Container class for all spills -- it takes care of capturing the released
    LEs from all the spills, putting them all in a single set of arrays.

    Many of the "fields" associated with a collection of elements are optional,
    or used only by some movers, so only the ones required will be requested
    by each mover.

    The data for the elements is stored in the _data_arrays dict. They can be
    accessed by indexing. For example:

    positions = spill_container['positions'] : returns a (num_LEs, 3) array of
    world_point_types
    """

    def __init__(self, uncertain=False):
        super(SpillContainer, self).__init__(uncertain=uncertain)
        self.spills = OrderedCollection(dtype=gnome.spill.Spill)
        self.rewind()

        # don't want user to add to array_types in middle of run. Since its
        # not possible to throw an error in this case, let's just make it a
        # bit difficult to do.
        # dict must be updated via prepar_for_model_run() only at beginning of
        # run. Make self._array_types an an instance variable
        self._reset_arrays()

    def __setitem__(self, data_name, array):
        """
        Invoke baseclass __setitem__ method so the _data_array is set correctly

        In addition, create the appropriate ArrayType if it wasn't created by
        the user.
        """
        super(SpillContainer, self).__setitem__(data_name, array)
        if data_name not in self._array_types:
            shape = self._data_arrays[data_name].shape[1:]
            dtype = self._data_arrays[data_name].dtype.type
            self._array_types[data_name] = gnome.array_types.ArrayType(shape, dtype)

    def _reset_arrays(self):
        """
        reset _array_types dict so it contains default keys/values
        """
        gnome.array_types.reset_to_defaults(["spill_num", "id"])

        self._array_types = {
            "positions": gnome.array_types.positions,
            "next_positions": gnome.array_types.next_positions,
            "last_water_positions": gnome.array_types.last_water_positions,
            "status_codes": gnome.array_types.status_codes,
            "spill_num": gnome.array_types.spill_num,
            "id": gnome.array_types.id,
            "mass": gnome.array_types.mass,
        }
        self._data_arrays = {}

    @property
    def array_types(self):
        """
        user can modify ArrayType initial_value in middle of run. Changing
        the shape should throw an error. Change the dtype at your own risk.
        This returns a new dict so user cannot add/delete an ArrayType in
        middle of run. Use prepare_for_model_run() to do add an ArrayType.
        """
        return dict(self._array_types)

    def rewind(self):
        """
        In the rewind operation, we:
        - rewind all the spills
        - restore _array_types to contain only defaults
          - movers/weatherers could have been deleted and we don't want to
            carry associated data_arrays
          - prepare_for_model_run() will be called before the next run and
            new arrays can be given

        - purge the data arrays
          - we gather data arrays for each contained spill
          - the stored arrays are cleared, then replaced with appropriate
            empty arrays
        """
        for spill in self.spills:
            spill.rewind()
        # create a full set of zero-sized arrays. If we rewound, something
        # must have changed so let's get back to default _array_types
        self._reset_arrays()
        self.initialize_data_arrays()

    def get_spill_mask(self, spill):
        return self["spill_num"] == self.spills.index(spill.id)

    def uncertain_copy(self):
        """
        Returns a copy of the spill_container suitable for uncertainty

        It has all the same spills, with the same ids, and the uncertain
        flag set to True
        """
        u_sc = SpillContainer(uncertain=True)
        for sp in self.spills:
            u_sc.spills += sp.uncertain_copy()
        return u_sc

    def prepare_for_model_run(self, array_types={}):
        """
        called when setting up the model prior to 1st time step
        This is considered 0th timestep by model

        Make current_time optional since SpillContainer doesn't require it
        especially for 0th step; however, the model needs to set it because
        it will write_output() after each step. The data_arrays along with
        the current_time_stamp must be set in order to write_output()

        :param model_start_time: model_start_time to initialize
            current_time_stamp. This is the time_stamp associated with 0-th
            step so initial conditions for data arrays
        :param array_types: a dict of additional array_types to append to
            standard array_types attribute. The data_arrays are initialized and
            appended based on the values of array_types attribute
        """

        # Question - should we purge any new arrays that were added in previous
        # call to prepare_for_model_run()?
        # No! If user made modifications to _array_types before running model,
        # let's keep those. A rewind will reset data_arrays.
        self._array_types.update(array_types)
        self.initialize_data_arrays()

    def initialize_data_arrays(self):
        """
        initialize_data_arrays() is called without input data during rewind
        and prepare_for_model_run to define all data arrays. At this time the
        arrays are empty.
        """
        for name, elem in self._array_types.iteritems():
            # Initialize data_arrays with 0 elements
            self._data_arrays[name] = elem.initialize_null()

    def _append_data_arrays(self, num_released):
        """
        initialize data arrays once spill has spawned particles
        Data arrays are set to their initial_values

        :param num_released: number of particles released
        :type num_released: int

        """

        for name, array_type in self._array_types.iteritems():
            # initialize all arrays even if 0 length
            self._data_arrays[name] = np.r_[self._data_arrays[name], array_type.initialize(num_released)]

    def release_elements(self, time_step, model_time):
        """
        Called at the end of a time step

        This calls release_elements on all of the contained spills, and adds
        the elements to the data arrays
        """

        for spill in self.spills:
            if spill.on:
                num_released = spill.num_elements_to_release(model_time, time_step)
                if num_released > 0:
                    # update 'spill_num' ArrayType's initial_value so it
                    # corresponds with spill number for this set of released
                    # particles - just another way to set value of spill_num
                    # correctly
                    self._array_types["spill_num"].initial_value = self.spills.index(spill.id, renumber=False)

                    if len(self["spill_num"]) > 0:
                        # unique identifier for each new element released
                        # this adjusts the _array_types initial_value since the
                        # initialize function just calls:
                        #  range(initial_value, num_released + initial_value)
                        self._array_types["id"].initial_value = self["id"][-1] + 1
                    # else:
                    #    self._array_types['id'].initial_value = 0

                    # append to data arrays
                    self._append_data_arrays(num_released)
                    spill.set_newparticle_values(num_released, model_time, time_step, self._data_arrays)

    def model_step_is_done(self):
        """
        Called at the end of a time step
        Need to remove particles marked as to_be_removed...
        """
        if len(self._data_arrays) == 0:
            return  # nothing to do - arrays are not yet defined.
        to_be_removed = np.where(self["status_codes"] == oil_status.to_be_removed)[0]
        if len(to_be_removed) > 0:
            for key in self._array_types.keys():
                self._data_arrays[key] = np.delete(self[key], to_be_removed, axis=0)

    def __str__(self):
        msg = "gnome.spill_container.SpillContainer\nspill LE attributes: %s" % self._data_arrays.keys()
        return msg

    __repr__ = __str__
Esempio n. 52
0
 def test_iter(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     assert [i for i in oc] == [1, 2, 3, 4, 5]
Esempio n. 53
0
 def test_len(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     assert len(oc) == 5
Esempio n. 54
0
 def test_add(self):
     oc = OrderedCollection([1, 2, 3, 4, 5])
     oc.add(6)
     assert [i for i in oc] == [1, 2, 3, 4, 5, 6]
     with raises(TypeError):
         oc.add('not an int')
Esempio n. 55
0
    def test_eq(self):
        'Test comparison operator __eq__'

        assert (OrderedCollection([1, 2, 3, 4,
                                   5]) == OrderedCollection([1, 2, 3, 4, 5]))
Esempio n. 56
0
 def test_int_to_dict(self, json_):
     '''added a to_dict() method - test this method for int dtype.
     Tests the try, except is working correctly'''
     items = range(5)
     oc = OrderedCollection(items)
     self._to_dict_assert(oc, items, json_)