Beispiel #1
0
def test_mean_flow_recorder(solver):
    model = Model(solver=solver)
    model.timestepper.start = pandas.to_datetime("2016-01-01")
    model.timestepper.end = pandas.to_datetime("2016-01-04")

    inpt = Input(model, "input")
    otpt = Output(model, "output")
    inpt.connect(otpt)

    rec_flow = NumpyArrayNodeRecorder(model, inpt)
    rec_mean = MeanFlowRecorder(model, node=inpt, timesteps=3)

    scenario = Scenario(model, "dummy", size=2)

    inpt.max_flow = inpt.min_flow = FunctionParameter(model, inpt, lambda model, t, si: 2 + t.index)
    model.run()

    expected = [
        2.0,
        (2.0 + 3.0) / 2,
        (2.0 + 3.0 + 4.0) / 3,
        (3.0 + 4.0 + 5.0) / 3,  # zeroth day forgotten
    ]

    for value, expected_value in zip(rec_mean.data[:, 0], expected):
        assert_allclose(value, expected_value)
Beispiel #2
0
def test_scenario_storage():
    """Test the behaviour of Storage nodes with multiple scenarios

    The model defined has two inflow scenarios: 5 and 10. It is expected that
    the volume in the storage node should increase at different rates in the
    two scenarios.
    """
    model = Model()

    i = Input(model, 'input', max_flow=999)
    s = Storage(model, 'storage', num_inputs=1, num_outputs=1, max_volume=1000, initial_volume=500)
    o = Output(model, 'output', max_flow=999)

    scenario_input = Scenario(model, 'Inflow', size=2)
    i.min_flow = ConstantScenarioParameter(model, scenario_input, [5.0, 10.0])

    i.connect(s)
    s.connect(o)

    s_rec = NumpyArrayStorageRecorder(model, s)

    model.run()

    assert_allclose(i.flow, [5, 10])
    assert_allclose(s_rec.data[0], [505, 510])
    assert_allclose(s_rec.data[1], [510, 520])
Beispiel #3
0
def test_mean_flow_recorder_days(solver):
    model = Model(solver=solver)
    model.timestepper.delta = 7

    inpt = Input(model, "input")
    otpt = Output(model, "output")
    inpt.connect(otpt)

    rec_mean = MeanFlowRecorder(model, node=inpt, days=31)

    model.run()
    assert(rec_mean.timesteps == 4)
Beispiel #4
0
def test_catchment_many_successors(solver):
    """Test if node with fixed flow can have multiple successors. See #225"""
    model = Model(solver=solver)
    catchment = Catchment(model, "catchment", flow=100)
    out1 = Output(model, "out1", max_flow=10, cost=-100)
    out2 = Output(model, "out2", max_flow=15, cost=-50)
    out3 = Output(model, "out3")
    catchment.connect(out1)
    catchment.connect(out2)
    catchment.connect(out3)
    model.check()
    model.run()
    assert_allclose(out1.flow, 10)
    assert_allclose(out2.flow, 15)
    assert_allclose(out3.flow, 75)
Beispiel #5
0
def test_reset_timestepper_recorder(solver):
    model = Model(
        solver=solver,
        start=pandas.to_datetime('2016-01-01'),
        end=pandas.to_datetime('2016-01-01')
    )

    inpt = Input(model, "input", max_flow=10)
    otpt = Output(model, "output", max_flow=50, cost=-10)
    inpt.connect(otpt)

    rec = NumpyArrayNodeRecorder(model, otpt)

    model.run()

    model.timestepper.end = pandas.to_datetime("2016-01-02")

    model.run()
Beispiel #6
0
                nodes=list(nodes.values()),
                name="{0}".format(node_set))

# Numpy array recorder (to dataframe)

# def assign_recorders(lookup, resource_class, recorder, pywr_type):
#     for resource_id in lookup:
#         if lookup[resource_id]['type'] in resource_class:
#             lookup[resource_id]['recorder'] = recorder(model, pywr_type[resource_id])
#
# assign_recorders(node_lookup_id, non_storage_types, NumpyArrayNodeRecorder, non_storage)
# assign_recorders(link_lookup_id, link_types, NumpyArrayNodeRecorder, pywr_links)
# assign_recorders(node_lookup_id, storage_types, NumpyArrayStorageRecorder, storage)

# --------------------- RUN MODEL -----------------------------
model.run()
# -------------------- ORGANIZE RESULTS --------------------

# Numpy array to dataframe results
# def get_results(lookup, resource_class):
#     dataframes = []
#     for resource_id in lookup:
#         if lookup[resource_id]['type'] in resource_class:
#             dataframe = lookup[resource_id]['recorder'].to_dataframe()
#             dataframe.columns = [lookup[resource_id]['name']]
#             dataframes.append(dataframe)
#     return pd.concat(dataframes, axis=1)
#
# delivery_results = get_results(node_lookup_id, 'Urban Demand')
# storage_results = get_results(node_lookup_id, storage_types)
# outflow_results = get_results(node_lookup_id, 'Outflow Node')
Beispiel #7
0
def test_keating_aquifer(solver):
    model = Model(
        solver=solver,
        start=pandas.to_datetime('2016-01-01'),
        end=pandas.to_datetime('2016-01-01'),
    )

    aqfer = KeatingAquifer(
        model,
        'keating',
        num_streams,
        num_additional_inputs,
        stream_flow_levels,
        transmissivity,
        coefficient,
        levels,
        area=area,
        storativity=storativity,
    )

    catchment = Input(model, 'catchment', max_flow=0)
    stream = Output(model, 'stream', max_flow=np.inf, cost=0)
    abstraction = Output(model, 'abstraction', max_flow=15, cost=-999)

    catchment.connect(aqfer)
    aqfer.connect(stream, from_slot=0)
    aqfer.connect(abstraction, from_slot=1)

    rec_level = NumpyArrayLevelRecorder(model, aqfer)
    rec_volume = NumpyArrayStorageRecorder(model, aqfer)
    rec_stream = NumpyArrayNodeRecorder(model, stream)
    rec_abstraction = NumpyArrayNodeRecorder(model, abstraction)

    model.check()

    assert(len(aqfer.inputs) == (num_streams + num_additional_inputs))

    for initial_level in (50, 100, 110, 150):
        # set the inital aquifer level and therefor the initial volume
        aqfer.initial_level = initial_level
        initial_volume = aqfer.initial_volume
        assert(initial_volume == (area * storativity[0] * initial_level * 0.001))
        # run the model (for one timestep only)
        model.run()
        # manually calculate keating streamflow and check model flows are OK
        Qp = 2 * transmissivity[0] * max(initial_level - stream_flow_levels[0][0], 0) * coefficient
        Qe = 2 * transmissivity[1] * max(initial_level - stream_flow_levels[0][1], 0) * coefficient
        delta_storage = initial_volume - rec_volume.data[0, 0]
        abs_flow = rec_abstraction.data[0, 0]
        stream_flow = rec_stream.data[0, 0]
        assert(delta_storage == (stream_flow + abs_flow))
        assert(stream_flow == (Qp+Qe))

    A_VERY_LARGE_NUMBER = 9999999999999
    model.timestepper.end = pandas.to_datetime('2016-01-02')

    # fill the aquifer completely
    # there is no spill for the storage so it should find no feasible solution
    with pytest.raises(RuntimeError):
        catchment.max_flow = A_VERY_LARGE_NUMBER
        catchment.min_flow = A_VERY_LARGE_NUMBER
        model.run()

    # drain the aquifer completely
    catchment.min_flow = 0
    catchment.max_flow = 0
    abstraction.max_flow = A_VERY_LARGE_NUMBER
    model.run()
    assert(rec_volume.data[1, 0] == 0)
    abs_flow = rec_abstraction.data[1, 0]
    stream_flow = rec_stream.data[1, 0]
    assert(stream_flow == 0)
    assert(abs_flow == 0)
from pywr.core import Model, Input, Output
model = Model()
a = Input(model, 'a', max_flow=10.0)
b = Output(model, 'b', max_flow=5.0, cost=-1)
a.connect(b)
model.run()
assert(b.flow[0] == 5.0)