Ejemplo n.º 1
0
def test_mean_flow_recorder(solver):
    model = Model(solver=solver)
    model.timestepper.start = pandas.to_datetime("2016-01-01")
    model.timestepper.end = pandas.to_datetime("2016-01-04")

    inpt = Input(model, "input")
    otpt = Output(model, "output")
    inpt.connect(otpt)

    rec_flow = NumpyArrayNodeRecorder(model, inpt)
    rec_mean = MeanFlowRecorder(model, node=inpt, timesteps=3)

    scenario = Scenario(model, "dummy", size=2)

    inpt.max_flow = inpt.min_flow = FunctionParameter(model, inpt, lambda model, t, si: 2 + t.index)
    model.run()

    expected = [
        2.0,
        (2.0 + 3.0) / 2,
        (2.0 + 3.0 + 4.0) / 3,
        (3.0 + 4.0 + 5.0) / 3,  # zeroth day forgotten
    ]

    for value, expected_value in zip(rec_mean.data[:, 0], expected):
        assert_allclose(value, expected_value)
Ejemplo n.º 2
0
def test_scenario_storage():
    """Test the behaviour of Storage nodes with multiple scenarios

    The model defined has two inflow scenarios: 5 and 10. It is expected that
    the volume in the storage node should increase at different rates in the
    two scenarios.
    """
    model = Model()

    i = Input(model, 'input', max_flow=999)
    s = Storage(model, 'storage', num_inputs=1, num_outputs=1, max_volume=1000, initial_volume=500)
    o = Output(model, 'output', max_flow=999)

    scenario_input = Scenario(model, 'Inflow', size=2)
    i.min_flow = ConstantScenarioParameter(model, scenario_input, [5.0, 10.0])

    i.connect(s)
    s.connect(o)

    s_rec = NumpyArrayStorageRecorder(model, s)

    model.run()

    assert_allclose(i.flow, [5, 10])
    assert_allclose(s_rec.data[0], [505, 510])
    assert_allclose(s_rec.data[1], [510, 520])
Ejemplo n.º 3
0
def test_mean_flow_recorder_days(solver):
    model = Model(solver=solver)
    model.timestepper.delta = 7

    inpt = Input(model, "input")
    otpt = Output(model, "output")
    inpt.connect(otpt)

    rec_mean = MeanFlowRecorder(model, node=inpt, days=31)

    model.run()
    assert(rec_mean.timesteps == 4)
Ejemplo n.º 4
0
def test_scenario_collection():
    """ Basic test of Scenario and ScenarioCollection API """

    model = Model()

    # There is 1 combination when there are no Scenarios
    model.scenarios.setup()
    assert(len(model.scenarios.combinations) == 1)
    assert(len(model.scenarios) == 0)
    scA = Scenario(model, 'Scenario A', size=3)
    model.scenarios.setup()
    assert(len(model.scenarios.combinations) == 3)
    assert(len(model.scenarios) == 1)
    scA = Scenario(model, 'Scenario B', size=2)
    model.scenarios.setup()
    assert(len(model.scenarios.combinations) == 6)
    assert(len(model.scenarios) == 2)

    assert_equal([comb.indices for comb in model.scenarios.combinations],
                 [[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]])

    names = model.scenarios.combination_names
    for n, (ia, ib) in zip(names, [[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]]):
        assert n == 'Scenario A.{:03d}-Scenario B.{:03d}'.format(ia, ib)

    index = model.scenarios.multiindex
    assert_equal(index.tolist(),
                 [[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]])
    assert_equal(index.names, ['Scenario A', 'Scenario B'])
Ejemplo n.º 5
0
def model(solver):
    model = Model(
        solver=solver,
        start=pandas.to_datetime("2016-01-01"),
        end=pandas.to_datetime("2016-01-10"),
    )
    return model
Ejemplo n.º 6
0
    def test_demand_saving_with_indexed_array(self, solver, tmpdir):
        """Test recording various items from demand saving example.

        This time the TablesRecorder is defined in JSON.
        """
        import os, json, tables
        filename = "demand_saving_with_tables_recorder.json"
        # This is a bit horrible, but need to edit the JSON dynamically
        # so that the output.h5 is written in the temporary directory
        path = os.path.join(os.path.dirname(__file__), 'models')
        with open(os.path.join(path, filename), 'r') as f:
            data = f.read()
        data = json.loads(data)

        # Make an absolute, but temporary, path for the recorder
        url = data['recorders']['database']['url']
        data['recorders']['database']['url'] = str(tmpdir.join(url))

        model = Model.load(data, path=path, solver=solver)

        model.timestepper.end = "2016-01-31"
        model.check()

        # run model
        model.run()

        # run model again (to test reset behaviour)
        model.run()
        max_volume = model.nodes["Reservoir"].max_volume

        h5file = tmpdir.join('output.h5')
        with tables.open_file(str(h5file), 'r') as h5f:
            assert model.metadata['title'] == h5f.title
            # Check metadata on root node
            assert h5f.root._v_attrs.author == 'pytest'
            assert h5f.root._v_attrs.run_number == 0

            rec_demand = h5f.get_node('/outputs/demand').read()
            rec_storage = h5f.get_node('/storage/reservoir').read()

            # model starts with no demand saving
            demand_baseline = 50.0
            demand_factor = 0.9  # jan-apr
            demand_saving = 1.0
            assert_allclose(rec_demand[0, 0], demand_baseline * demand_factor * demand_saving)

            # first control curve breached
            demand_saving = 0.95
            assert (rec_storage[4, 0] < (0.8 * max_volume))
            assert_allclose(rec_demand[5, 0], demand_baseline * demand_factor * demand_saving)

            # second control curve breached
            demand_saving = 0.5
            assert (rec_storage[11, 0] < (0.5 * max_volume))
            assert_allclose(rec_demand[12, 0], demand_baseline * demand_factor * demand_saving)
Ejemplo n.º 7
0
    def load_model(self,
                   root_dir,
                   model_path,
                   bucket=None,
                   network_key=None,
                   check_graph=False):

        os.chdir(root_dir)

        # needed when loading JSON file
        root_path = 's3://{}/{}/'.format(bucket, network_key)
        os.environ['ROOT_S3_PATH'] = root_path

        # Step 1: Load and register policies
        sys.path.insert(0, os.getcwd())
        policy_folder = '_parameters'
        for filename in os.listdir(policy_folder):
            if '__init__' in filename:
                continue
            policy_name = os.path.splitext(filename)[0]
            policy_module = '.{policy_name}'.format(policy_name=policy_name)
            # package = '.{}'.format(policy_folder)
            import_module(policy_module, policy_folder)

        # from domains import Hydropower, InstreamFlowRequirement

        modules = [('.IFRS', 'policies'), ('.domains', 'domains')]
        for name, package in modules:
            try:
                import_module(name, package)
            except Exception as err:
                logger.warning("""{name} could not be imported from {package}
                Here's the real error:
                {error_type}
                {err}
                """.format(name=name,
                           package=package,
                           error_type=type(err),
                           err=err))

        # Step 2: Load and run model
        self.model = Model.load(model_path, path=model_path)

        # check network graph
        if check_graph:
            try:
                self.model.check_graph()
            except Exception as err:
                raise Exception('Pywr error: {}'.format(err))

        self.setup()

        return
Ejemplo n.º 8
0
def create_planning_model(model_path):
    root, filename = os.path.split(model_path)
    base, ext = os.path.splitext(filename)
    new_filename = '{}_monthly'.format(base) + ext
    monthly_model_path = os.path.join(root, new_filename)
    prepare_planning_model(model_path, monthly_model_path)
    # monthly_model = load_model(root_dir, monthly_model_path, bucket=bucket, network_key=network_key, mode='planning')
    monthly_model = Model.load(monthly_model_path, path=monthly_model_path)
    setattr(monthly_model, 'mode', 'planning')
    monthly_model.setup()
    print('Monthly model setup complete')
    return monthly_model
Ejemplo n.º 9
0
def load_model(filename=None, data=None, solver=None):
    '''Load a test model and check it'''
    if data is None:
        path = os.path.join(os.path.dirname(__file__), 'models')
        with open(os.path.join(path, filename), 'r') as f:
            data = f.read()
    else:
        path = None

    model = Model.loads(data, path=path)
    model.check()
    return model
Ejemplo n.º 10
0
def create_model():
    # create a model
    model = Model(start="2016-01-01", end="2019-12-31", timestep=7)

    # create three nodes (an input, a link, and an output)
    A = Input(model, name="A", max_flow=10.0)
    B = Link(model, name="B", cost=1.0)
    C = Output(model, name="C", max_flow=5.0, cost=-2.0)

    # connect nodes
    A.connect(B)
    B.connect(C)

    return model
Ejemplo n.º 11
0
def simple_linear_model(request, solver):
    """
    Make a simple model with a single Input and Output.

    Input -> Link -> Output

    """
    model = Model(solver=solver)
    inpt = Input(model, name="Input")
    lnk = Link(model, name="Link", cost=1.0)
    inpt.connect(lnk)
    otpt = Output(model, name="Output")
    lnk.connect(otpt)

    return model
Ejemplo n.º 12
0
def test_invalid_parameter_values():
    """
    Test that `load_parameter_values` returns a ValueError rather than KeyError.

    This is useful to catch and give useful messages when no valid reference to
    a data location is given.

    Regression test for Issue #247 (https://github.com/pywr/pywr/issues/247)
    """

    from pywr.parameters._parameters import load_parameter_values

    m = Model()
    data = {'name': 'my_parameter', 'type': 'AParameterThatShouldHaveValues'}
    with pytest.raises(ValueError):
        load_parameter_values(model, data)
Ejemplo n.º 13
0
def test_catchment_many_successors(solver):
    """Test if node with fixed flow can have multiple successors. See #225"""
    model = Model(solver=solver)
    catchment = Catchment(model, "catchment", flow=100)
    out1 = Output(model, "out1", max_flow=10, cost=-100)
    out2 = Output(model, "out2", max_flow=15, cost=-50)
    out3 = Output(model, "out3")
    catchment.connect(out1)
    catchment.connect(out2)
    catchment.connect(out3)
    model.check()
    model.run()
    assert_allclose(out1.flow, 10)
    assert_allclose(out2.flow, 15)
    assert_allclose(out3.flow, 75)
Ejemplo n.º 14
0
def test_reset_timestepper_recorder(solver):
    model = Model(
        solver=solver,
        start=pandas.to_datetime('2016-01-01'),
        end=pandas.to_datetime('2016-01-01')
    )

    inpt = Input(model, "input", max_flow=10)
    otpt = Output(model, "output", max_flow=50, cost=-10)
    inpt.connect(otpt)

    rec = NumpyArrayNodeRecorder(model, otpt)

    model.run()

    model.timestepper.end = pandas.to_datetime("2016-01-02")

    model.run()
Ejemplo n.º 15
0
def test_loading_csv_recorder_from_json(solver, tmpdir):
    """
    Test the CSV Recorder which is loaded from json
    """

    filename = 'csv_recorder.json'

    # This is a bit horrible, but need to edit the JSON dynamically
    # so that the output.h5 is written in the temporary directory
    path = os.path.join(os.path.dirname(__file__), 'models')
    with open(os.path.join(path, filename), 'r') as f:
        data = f.read()
    data = json.loads(data)

    # Make an absolute, but temporary, path for the recorder
    url = data['recorders']['model_out']['url']
    data['recorders']['model_out']['url'] = str(tmpdir.join(url))

    model = Model.load(data, path=path, solver=solver)

    csvfile = tmpdir.join('output.csv')
    model.run()
    import csv
    with open(str(csvfile), 'r') as fh:
        dialect = csv.Sniffer().sniff(fh.read(1024))
        fh.seek(0)
        reader = csv.reader(fh, dialect)
        for irow, row in enumerate(reader):
            if irow == 0:
                expected = ['Datetime', 'inpt', 'otpt']
                actual = row
            else:
                dt = model.timestepper.start+(irow-1)*model.timestepper.delta
                expected = [dt.isoformat()]
                actual = [row[0]]
                assert np.all((np.array([float(v) for v in row[1:]]) - 10.0) < 1e-12)
            assert expected == actual
Ejemplo n.º 16
0
from pywr.core import Model, Input, Output
model = Model()
a = Input(model, 'a', max_flow=10.0)
b = Output(model, 'b', max_flow=5.0, cost=-1)
a.connect(b)
model.run()
assert(b.flow[0] == 5.0)
Ejemplo n.º 17
0
def model(request):
    model = Model()
    return model
Ejemplo n.º 18
0
    A.connect(Z)
    B.connect(Z)

    agg = AggregatedNode(model, "agg", [A, B])
    agg.flow_weights = flow_weights
    agg.max_flow = 30.0

    model.run()

    assert_allclose(agg.flow, expected_agg_flow)
    assert_allclose(A.flow, expected_A_flow)
    assert_allclose(B.flow, expected_B_flow)


@pytest.mark.skipif(Model().solver.name == "lpsolve", reason="Not supported in lpsolve.")
def test_aggregated_node_max_flow_parameter(model):
    """Nodes constrained by the max_flow of their AggregatedNode using a Parameter """
    A = Input(model, "A", max_flow=20.0, cost=1)
    B = Input(model, "B", max_flow=20.0, cost=2)
    Z = Output(model, "Z", max_flow=100, cost=-10)

    A.connect(Z)
    B.connect(Z)

    agg = AggregatedNode(model, "agg", [A, B])
    agg.max_flow = ConstantParameter(model, 30.0)

    model.run()

    assert_allclose(agg.flow, 30.0)
Ejemplo n.º 19
0
    def create_model(self, network, template, initial_volumes=None):

        model = Model(solver='glpk-edge')

        # -----------------GENERATE NETWORK STRUCTURE -----------------------

        output_ids = []
        input_ids = []

        non_storage_types = list(output_types.keys()) + list(
            input_types.keys()) + list(node_types.keys())

        # create node dictionaries by name and id
        node_lookup = {}
        for node in network['nodes']:
            name = '{} (node)'.format(node['name'])
            types = [
                t for t in node['types'] if t['template_id'] == template['id']
            ]
            if not types:
                continue
            if len(types) > 1:
                msg = "Type is ambiguous for {}. Please remove extra types.".format(
                    name)
                raise Exception(msg)
            type_name = types[-1]['name']
            node_lookup[node.get("id")] = {
                'type': type_name,
                'name': name,
                'connect_in': 0,
                'connect_out': 0,
            }
            if type_name in output_types:
                output_ids.append(node['id'])
            elif type_name in input_types:
                input_ids.append(node['id'])

        # create link lookups and pywr links
        link_lookup = {}
        for link in network['links']:
            name = '{} (link)'.format(link['name'])
            types = [
                t for t in link['types'] if t['template_id'] == template['id']
            ]
            if not types:
                continue
            type_name = types[-1]['name']
            link_id = link['id']
            node_1_id = link['node_1_id']
            node_2_id = link['node_2_id']
            node_lookup[node_1_id]['connect_out'] += 1
            node_lookup[node_2_id]['connect_in'] += 1
            link_lookup[link_id] = {
                'name': name,
                'type': type_name,
                'node_1_id': node_1_id,
                'node_2_id': node_2_id,
                'from_slot': node_lookup[node_1_id]['connect_out'] - 1,
                'to_slot': node_lookup[node_2_id]['connect_in'] - 1,
            }

            if node_1_id in output_ids:
                node = node_lookup[node_1_id]
                msg = 'Topology error: Output {} appears to be upstream of {}'.format(
                    node['name'], name)
                raise Exception(msg)
            elif node_2_id in input_ids:
                node = node_lookup[node_2_id]
                msg = 'Topology error: Input {} appears to be downstream of {}'.format(
                    node['name'], name)
                raise Exception(msg)

            LinkType = link_types.get(type_name, Link)
            self.non_storage[('link', link_id)] = LinkType(model, name=name)

        # Q/C

        # remove unconnected links
        d = []
        for link_id, link in link_lookup.items():
            if link['node_1_id'] not in node_lookup or link[
                    'node_2_id'] not in node_lookup:
                d.append(link_id)
        for link_id in d:
            del link_lookup[link_id]

        connected_nodes = []
        for link_id, link in link_lookup.items():
            connected_nodes.append(link['node_1_id'])
            connected_nodes.append(link['node_2_id'])

        # remove unconnected nodes
        d = []
        for node_id in node_lookup:
            if node_id not in connected_nodes:
                d.append(node_id)
        for node_id in d:
            del node_lookup[node_id]

        # create pywr nodes dictionary with format ["name" = pywr type + 'name']
        # for storage and non storage

        # TODO: change looping variable notation
        for node_id, node in node_lookup.items():
            type_name = node['type']
            name = node['name']
            connect_in = node.get('connect_in', 0)
            connect_out = node.get('connect_out', 0)
            if (type_name in storage_types
                    or connect_out > 1) and type_name not in non_storage_types:
                initial_volume = initial_volumes.get(
                    node_id, 0.0) if initial_volumes is not None else 0.0
                self.storage[node_id] = Storage(model,
                                                name=name,
                                                num_outputs=connect_in,
                                                num_inputs=connect_out,
                                                initial_volume=initial_volume)
                if type_name not in storage_types:
                    self.storage[node_id].max_volume = 0.0
            else:

                if type_name in input_types:
                    NodeType = input_types[type_name]
                elif type_name in output_types:
                    NodeType = output_types[type_name]
                elif type_name in node_types:
                    NodeType = node_types[type_name]
                elif connect_in > 1:
                    NodeType = River
                else:
                    NodeType = Link

                self.non_storage[('node', node_id)] = NodeType(model,
                                                               name=name)

        # create network connections
        # must assign connection slots for storage
        # TODO: change looping variable notation
        for link_id, link in link_lookup.items():
            node_1_id = link['node_1_id']
            node_2_id = link['node_2_id']

            _link = self.non_storage[('link', link_id)]
            up_storage = self.storage.get(node_1_id)
            up_node = self.non_storage.get(('node', node_1_id))
            down_storage = self.storage.get(node_2_id)
            down_node = self.non_storage.get(('node', node_2_id))

            if up_storage:
                up_storage.connect(_link, from_slot=link['from_slot'])
            else:
                up_node.connect(_link)

            if down_storage:
                _link.connect(down_storage, to_slot=link['to_slot'])
            else:
                _link.connect(down_node)

        self.model = model
Ejemplo n.º 20
0
def model(request, solver):
    model = Model(solver=solver)
    return model
Ejemplo n.º 21
0
def run_model(basin, network_key):
    # ========================
    # Set up model environment
    # ========================

    root_dir = os.path.join(os.getcwd(), basin)
    bucket = 'openagua-networks'
    model_path = os.path.join(root_dir, 'pywr_model.json')

    # setup_model(root_dir, model_path, bucket=bucket, network_key=network_key)
    os.chdir(root_dir)

    # needed when loading JSON file
    # root_path = 's3://{}/{}/'.format(bucket, network_key)
    root_path = '../data'
    os.environ['ROOT_S3_PATH'] = root_path

    # =========================================
    # Load and register custom model parameters
    # =========================================

    sys.path.insert(0, os.getcwd())
    policy_folder = '_parameters'
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = '.{policy_name}'.format(policy_name=policy_name)
        # package = '.{}'.format(policy_folder)
        import_module(policy_module, policy_folder)

    modules = [('.IFRS', 'policies'), ('.domains', 'domains')]
    for name, package in modules:
        try:
            import_module(name, package)
        except Exception as err:
            print(' [-] WARNING: {} could not be imported from {}'.format(
                name, package))
            print(type(err))
            print(err)

    # ==================
    # Create daily model
    # ==================
    include_monthly = False
    daily_model = Model.load(model_path, path=model_path)
    print('Daily model loaded')
    daily_model.setup()
    print('Daily model setup completed')
    # =====================
    # Create planning model
    # =====================

    # create and initialize monthly model
    if include_monthly:
        monthly_model = create_planning_model(model_path)

    timesteps = range(len(daily_model.timestepper))

    # run model
    # note that tqdm + step adds a little bit of overhead.
    # use model.run() instead if seeing progress is not important

    for step in tqdm(timesteps, ncols=80):

        today = daily_model.timestepper.current if step else daily_model.timestepper.start

        try:

            # Step 1: run planning model & update daily model

            if include_monthly and today.day == 1:

                # Step 1a: update planning model

                # ...update time steps
                monthly_model.timestepper.start = today
                monthly_model.timestepper.end = today

                # ...update initial conditions (not needed for the first step)
                if step > 0:
                    for node in monthly_model.nodes:
                        if node['type'] != 'Storage':
                            continue

                        node['initial_volume'] = daily_model.node[
                            node['name']].volume

                # Step 1b: run planning model
                print('Running planning model')
                monthly_model.step(
                )  # redundant with run, since only one timestep

                # Step 1c: update daily model with planning model results
                print('Updating daily model')

            # Step 3: run daily model
            daily_model.step()
        except Exception as err:
            print('\nFailed at step {}'.format(today))
            print(err)
            # continue
            break

    # save results to CSV

    results = daily_model.to_dataframe()
    results_path = './results'
    results.columns = results.columns.droplevel(1)
    if not os.path.exists(results_path):
        os.makedirs(results_path)
    results.to_csv(os.path.join(results_path, 'system.csv'))
    attributes = {}
    for c in results.columns:
        attribute = c.split('/')[-1]
        if attribute in attributes:
            attributes[attribute].append(c)
        else:
            attributes[attribute] = [c]
    for attribute in attributes:
        path = os.path.join(results_path, '{}.csv'.format(attribute))
        df = results[attributes[attribute]]
        df.columns = [c.split('/')[-2] for c in df.columns]
        df.to_csv(path)

        if attribute == 'flow':
            df2 = df[[c for c in df.columns if c[-3:] == ' PH']]
            path2 = os.path.join(results_path, 'powerhouse flow.csv')
            df2.to_csv(path2)
Ejemplo n.º 22
0
def model():
    return Model()
Ejemplo n.º 23
0
def model(solver):
    return Model(solver=solver)
Ejemplo n.º 24
0
        sc_index = self.model.scenarios.multiindex

        return pandas.DataFrame(data=np.array(self._data), index=index, columns=sc_index)

    @classmethod
    def load(cls, model, data):
        node = model._get_node_from_ref(model, data.pop("node"))
        if "storage_node" in data:
            storage_node = model._get_node_from_ref(model, data.pop("storage_node"))
        else:
            storage_node = None

        return cls(model, node, storage_node=storage_node, **data)
HydroPowerRecorder.register()


if __name__ == '__main__':

    m = Model.load('hydropower_example.json')
    stats = m.run()
    print(stats)

    print(m.recorders["turbine1_energy"].values())

    df = m.to_dataframe()
    print(df.head())

    from matplotlib import pyplot as plt
    df.plot(subplots=True)
    plt.show()
Ejemplo n.º 25
0
def test_keating_aquifer(solver):
    model = Model(
        solver=solver,
        start=pandas.to_datetime('2016-01-01'),
        end=pandas.to_datetime('2016-01-01'),
    )

    aqfer = KeatingAquifer(
        model,
        'keating',
        num_streams,
        num_additional_inputs,
        stream_flow_levels,
        transmissivity,
        coefficient,
        levels,
        area=area,
        storativity=storativity,
    )

    catchment = Input(model, 'catchment', max_flow=0)
    stream = Output(model, 'stream', max_flow=np.inf, cost=0)
    abstraction = Output(model, 'abstraction', max_flow=15, cost=-999)

    catchment.connect(aqfer)
    aqfer.connect(stream, from_slot=0)
    aqfer.connect(abstraction, from_slot=1)

    rec_level = NumpyArrayLevelRecorder(model, aqfer)
    rec_volume = NumpyArrayStorageRecorder(model, aqfer)
    rec_stream = NumpyArrayNodeRecorder(model, stream)
    rec_abstraction = NumpyArrayNodeRecorder(model, abstraction)

    model.check()

    assert(len(aqfer.inputs) == (num_streams + num_additional_inputs))

    for initial_level in (50, 100, 110, 150):
        # set the inital aquifer level and therefor the initial volume
        aqfer.initial_level = initial_level
        initial_volume = aqfer.initial_volume
        assert(initial_volume == (area * storativity[0] * initial_level * 0.001))
        # run the model (for one timestep only)
        model.run()
        # manually calculate keating streamflow and check model flows are OK
        Qp = 2 * transmissivity[0] * max(initial_level - stream_flow_levels[0][0], 0) * coefficient
        Qe = 2 * transmissivity[1] * max(initial_level - stream_flow_levels[0][1], 0) * coefficient
        delta_storage = initial_volume - rec_volume.data[0, 0]
        abs_flow = rec_abstraction.data[0, 0]
        stream_flow = rec_stream.data[0, 0]
        assert(delta_storage == (stream_flow + abs_flow))
        assert(stream_flow == (Qp+Qe))

    A_VERY_LARGE_NUMBER = 9999999999999
    model.timestepper.end = pandas.to_datetime('2016-01-02')

    # fill the aquifer completely
    # there is no spill for the storage so it should find no feasible solution
    with pytest.raises(RuntimeError):
        catchment.max_flow = A_VERY_LARGE_NUMBER
        catchment.min_flow = A_VERY_LARGE_NUMBER
        model.run()

    # drain the aquifer completely
    catchment.min_flow = 0
    catchment.max_flow = 0
    abstraction.max_flow = A_VERY_LARGE_NUMBER
    model.run()
    assert(rec_volume.data[1, 0] == 0)
    abs_flow = rec_abstraction.data[1, 0]
    stream_flow = rec_stream.data[1, 0]
    assert(stream_flow == 0)
    assert(abs_flow == 0)
Ejemplo n.º 26
0
results_folder = 'C:/Users/Josh Soper/Documents/Mexico/Summer 2018/Analysis'

# ----------------- CREATE MODEL -----------------------

# TODO: End time needs to be last day of final model year, not first day of next year
for scenarios in data['network']['scenarios']:
    if scenarios['name'] == option:
        meta = scenarios
        start = datetime.strptime(meta['start_time'],
                                  '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')
        # end = datetime.strptime(meta['end_time'], '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')
        end = '2015-12-31'
        if meta['time_step'] == 'day':
            ts = 1

model = Model(start=start, end=end, timestep=ts, solver='glpk')

# -----------------GENERATE NETWORK STRUCTURE -----------------------

# create node dictionaries by name and id

node_lookup_name = {}
node_lookup_id = {}

for node in nodes_list:
    types = [
        t for t in node['types']
        if abs(t['template_id']) == abs(template['id'])
    ]
    node_lookup_name[node.get('name')] = {
        'type': types[0]['name'] if types else None,
Ejemplo n.º 27
0
    def create_model(self, network, template, constants=None, variables=None, policies=None, initial_volumes=None):

        model = Model(solver='glpk-edge')

        # -----------------GENERATE NETWORK STRUCTURE -----------------------
        # ...and add initial parameter values

        output_ids = []
        input_ids = []

        non_storage_types = list(output_types.keys()) + list(input_types.keys()) + list(node_types.keys())

        def add_value_to_node(res_attr_idx, type_name, attr_name):
            pywr_param = None
            constant = constants.pop(res_attr_idx, None)
            if constant:
                pywr_param = ConstantParameter(model, constant)
            elif variables:
                variable = variables.pop(res_attr_idx, None)
                if variable:
                    values = list(variable['values'].values())
                    pywr_param = ArrayIndexedParameter(model, values)
            elif policies:
                policy = policies.pop(res_attr_idx, None)
                if policy:
                    pywr_param = self.create_register_policy(policy)

            if pywr_param is not None:
                type_name = type_name.lower()
                attr_name = attr_name.lower()
                (resource_type, resource_id, attr_id) = res_attr_idx
                try:
                    self.update_param(resource_type, resource_id, type_name, attr_name, value=pywr_param)
                except:
                    raise

        # create node dictionaries by name and id
        node_lookup = {}
        for node in network['nodes']:
            name = '{} (node)'.format(node['name'])
            types = [t for t in node['types'] if t['template_id'] == template['id']]
            if not types:
                continue
            if len(types) > 1:
                msg = "Type is ambiguous for {}. Please remove extra types.".format(name)
                raise Exception(msg)
            type_name = types[-1]['name']
            node_lookup[node.get("id")] = {
                'type': type_name,
                'name': name,
                'connect_in': 0,
                'connect_out': 0,
                'attributes': node['attributes']
            }
            if type_name in output_types:
                output_ids.append(node['id'])
            elif type_name in input_types:
                input_ids.append(node['id'])

        # create link lookups and pywr links
        link_lookup = {}
        for link in network['links']:
            residx = ('link', link['id'])
            name = '{} (link)'.format(link['name'])
            types = [t for t in link['types'] if t['template_id'] == template['id']]
            if not types:
                continue
            type_name = types[-1]['name']
            link_id = link['id']
            node_1_id = link['node_1_id']
            node_2_id = link['node_2_id']
            try:
                node_lookup[node_1_id]['connect_out'] += 1
            except:
                raise Exception('Topology error for {}. Upstream node ID {} not found.'.format(name, node_1_id))
            try:
                node_lookup[node_2_id]['connect_in'] += 1
            except:
                raise Exception('Topology error for {}. Downstream node ID {} not found.'.format(name, node_2_id))
            link_lookup[link_id] = {
                'name': name,
                'type': type_name,
                'node_1_id': node_1_id,
                'node_2_id': node_2_id,
                'from_slot': node_lookup[node_1_id]['connect_out'] - 1,
                'to_slot': node_lookup[node_2_id]['connect_in'] - 1,
            }

            if node_1_id in output_ids:
                node = node_lookup[node_1_id]
                msg = 'Topology error: Output {} appears to be upstream of {}'.format(node['name'], name)
                raise Exception(msg)
            elif node_2_id in input_ids:
                node = node_lookup[node_2_id]
                msg = 'Topology error: Input {} appears to be downstream of {}'.format(node['name'], name)
                raise Exception(msg)

            LinkType = link_types.get(type_name, Link)

            self.non_storage[residx] = LinkType(model, name=name)

            for ra in link['attributes']:
                res_attr_idx = ('link', link['id'], ra['attr_id'])
                add_value_to_node(res_attr_idx, type_name, ra['attr_name'])

        # Q/C

        # remove unconnected links
        d = []
        for link_id, link in link_lookup.items():
            if link['node_1_id'] not in node_lookup or link['node_2_id'] not in node_lookup:
                d.append(link_id)
        for link_id in d:
            del link_lookup[link_id]

        connected_nodes = []
        for link_id, link in link_lookup.items():
            connected_nodes.append(link['node_1_id'])
            connected_nodes.append(link['node_2_id'])

        # remove unconnected nodes
        d = []
        for node_id in node_lookup:
            if node_id not in connected_nodes:
                d.append(node_id)
        for node_id in d:
            del node_lookup[node_id]

        # create pywr nodes dictionary with format ["name" = pywr type + 'name']
        # for storage and non storage

        for node_id, node in node_lookup.items():
            residx = ('node', node_id)
            type_name = node['type']
            name = node['name']
            connect_in = node.get('connect_in', 0)
            connect_out = node.get('connect_out', 0)
            if (type_name in storage_types or connect_out > 1) and type_name not in non_storage_types:
                self.storage[node_id] = Storage(
                    model,
                    name=name,
                    num_outputs=connect_in,
                    num_inputs=connect_out,
                    initial_volume=initial_volumes.get(node_id, 0.0) if initial_volumes is not None else 0.0
                )
                if type_name not in storage_types:
                    self.storage[node_id].max_volume = 0.0
            else:

                if type_name in input_types:
                    NodeType = input_types[type_name]
                elif type_name in output_types:
                    NodeType = output_types[type_name]
                elif type_name in node_types:
                    NodeType = node_types[type_name]
                elif connect_in > 1:
                    NodeType = River
                else:
                    NodeType = Link

                self.non_storage[residx] = NodeType(model, name=name)

            for ra in node['attributes']:
                res_attr_idx = ('node', node_id, ra['attr_id'])
                try:
                    add_value_to_node(res_attr_idx, type_name, ra['attr_name'])
                except Exception as err:
                    print(err)
                    raise

        # create network connections
        # must assign connection slots for storage
        # TODO: change looping variable notation
        for link_id, link in link_lookup.items():
            node_1_id = link['node_1_id']
            node_2_id = link['node_2_id']

            _link = self.non_storage[('link', link_id)]
            up_storage = self.storage.get(node_1_id)
            up_node = self.non_storage.get(('node', node_1_id))
            down_storage = self.storage.get(node_2_id)
            down_node = self.non_storage.get(('node', node_2_id))

            if up_storage:
                up_storage.connect(_link, from_slot=link['from_slot'])
            else:
                up_node.connect(_link)

            if down_storage:
                _link.connect(down_storage, to_slot=link['to_slot'])
            else:
                _link.connect(down_node)

        self.model = model
Ejemplo n.º 28
0
from pywr.core import Model


if __name__ == '__main__':
    import sys

    m = Model.load(sys.argv[1])
    m.run()

    for node in m.nodes:
        print(node, node.flow)

Ejemplo n.º 29
0
def _run_model(climate,
               basin,
               start=None,
               end=None,
               years=None,
               run_name="default",
               include_planning=False,
               simplify=True,
               use_multiprocessing=False,
               debug=False,
               planning_months=12,
               scenarios=None,
               show_progress=False,
               data_path=None,
               file_suffix=None):
    logger.info("Running \"{}\" scenario for {} basin, {} climate".format(
        run_name, basin.upper(), climate.upper()))

    climate_set, climate_scenario = climate.split('/')

    if debug:
        from sierra.utilities import check_nan
        basin_path = os.path.join(data_path,
                                  basin.replace('_', ' ').title() + ' River')
        total_nan = check_nan(basin_path, climate)

        try:
            assert (total_nan == 0)
            logger.info('No NaNs found in data files')
        except AssertionError:
            logger.warning('{} NaNs found in data files.'.format(total_nan))

    # if debug:
    #     from sierra import create_schematic

    # Some adjustments
    if basin in ['merced', 'tuolumne']:
        include_planning = False

    # Set up dates

    if start is None or end is None:
        # TODO: get start and end years from outside, not hard coded
        if climate_scenario == 'Livneh':
            start_year = 1950
            end_year = 2012
        elif climate_set == 'gcms':
            start_year = 2030
            end_year = 2060
        elif climate_set == 'sequences':
            # name format is N01_S01, where N01 refers to the number of drought years
            # the total number of years is 1 + N + 2 (1 year at the end as a buffer)
            N = int(climate_scenario.split('Y')[1].split('_')[0])
            start_year = 2000
            end_year = start_year + N
        else:
            raise Exception("Climate scenario unknown")
        start = '{}-10-01'.format(start_year)
        end = '{}-09-30'.format(end_year)

    # ========================
    # Set up model environment
    # ========================

    here = os.path.dirname(os.path.realpath(__file__))
    os.chdir(here)

    root_dir = os.path.join(here, 'models', basin)
    temp_dir = os.path.join(root_dir, 'temp')
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)

    bucket = 'openagua-networks'
    base_filename = 'pywr_model.json'
    model_filename_base = 'pywr_model_{}'.format(climate_scenario)
    model_filename = model_filename_base + '.json'

    base_path = os.path.join(root_dir, base_filename)
    model_path = os.path.join(temp_dir, model_filename)

    # first order of business: update file paths in json file
    with open(base_path) as f:
        base_model = json.load(f)

    # update model with scenarios, if any
    def update_model(scenario_path):
        if os.path.exists(scenario_path):
            with open(scenario_path) as f:
                scenario_model = json.load(f)
            for key, scenario_items in scenario_model.items():
                if key in base_model:
                    if type(scenario_items) == dict:
                        base_model[key].update(scenario_items)
                    else:
                        base_model[key].extend(scenario_items)
                elif key in ['scenarios', 'nodes']:
                    items = {
                        item['name']: item
                        for item in base_model.get(key, [])
                    }
                    new_items = {item['name']: item for item in scenario_items}
                    items.update(new_items)
                    base_model[key] = list(items.values())
        else:
            raise Exception(
                'Scenario path {} does not exist.'.format(scenario_path))

    if scenarios is not None:
        for s in scenarios:
            # update from scenarios folder
            scenario_path = os.path.join(data_path, 'metadata',
                                         'scenario_definitions',
                                         '{}.json'.format(s))
            update_model(scenario_path)

    new_model_parts = {}
    for model_part in ['tables', 'parameters']:
        if model_part not in base_model:
            continue
        new_model_parts[model_part] = {}
        for pname, param in base_model[model_part].items():
            if 'observed' in pname.lower():
                continue
            url = param.get('url')
            if url:
                if data_path:
                    url = url.replace('../data', data_path)
                url = url.replace('historical/Livneh', climate)
                param['url'] = url
            new_model_parts[model_part][pname] = param

    base_model.update(new_model_parts)
    base_model['timestepper']['start'] = start
    base_model['timestepper']['end'] = end
    with open(model_path, 'w') as f:
        json.dump(base_model, f, indent=4)

    # =========================================
    # Load and register global model parameters
    # =========================================

    # sys.path.insert(0, os.getcwd())
    policy_folder = 'parameters'
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = 'sierra.parameters.{policy_name}'.format(
            policy_name=policy_name)
        import_module(policy_module, policy_folder)

    # =========================================
    # Load and register custom model parameters
    # =========================================

    sys.path.insert(0, os.getcwd())
    policy_folder = os.path.join('models', basin, '_parameters')
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = 'models.{basin}._parameters.{policy_name}'.format(
            basin=basin, policy_name=policy_name)
        import_module(policy_module, policy_folder)

    # import domains
    import_module('.domains', 'domains')
    if debug:
        logger.info("Domains imported")

    # import custom policies
    try:
        import_module('{}.policies'.format(basin))
    except:
        pass

    # =========================================
    # Load and register custom model recorders
    # =========================================

    from recorders.hydropower import HydropowerEnergyRecorder
    HydropowerEnergyRecorder.register()

    # prepare the model files
    if simplify or include_planning:
        with open(model_path, 'r') as f:
            model_json = json.load(f)

    if simplify:
        # simplify model
        simplified_filename = model_filename_base + '_simplified.json'
        simplified_model_path = os.path.join(temp_dir, simplified_filename)

        model_json = simplify_network(model_json,
                                      basin=basin,
                                      climate=climate,
                                      delete_gauges=True,
                                      delete_observed=True)
        with open(simplified_model_path, 'w') as f:
            f.write(json.dumps(model_json, indent=4))

        if debug:
            try:
                create_schematic(basin, 'simplified')
            except FileNotFoundError as err:
                logger.warning('Could not create schematic from Livneh model.')
            except ExecutableNotFound:
                logger.warning(
                    'Could not create daily schematic from Livneh model.')

        model_path = simplified_model_path

    # Area for testing monthly model
    save_results = debug
    planning_model = None
    df_planning = None

    if include_planning:

        logger.info('Creating planning model (this may take a minute or two)')

        # create filenames, etc.
        monthly_filename = model_filename_base + '_monthly.json'
        planning_model_path = os.path.join(temp_dir, monthly_filename)

        prepare_planning_model(model_json,
                               basin,
                               climate,
                               planning_model_path,
                               steps=planning_months,
                               debug=debug,
                               remove_rim_dams=True)

        if debug:
            try:
                create_schematic(basin, 'monthly')
            except ExecutableNotFound:
                logger.warning(
                    'Graphviz executable not found. Monthly schematic not created.'
                )

        # create pywr model
        try:
            planning_model = Model.load(planning_model_path,
                                        path=planning_model_path)
        except Exception as err:
            logger.error("Planning model failed to load")
            # logger.error(err)
            raise

        # set model mode to planning
        planning_model.mode = 'planning'
        planning_model.blocks = {}

        # set time steps
        # start = planning_model.timestepper.start
        end = planning_model.timestepper.end
        end -= relativedelta(months=planning_months)

        planning_model.setup()

        # if debug == 'm':
        #     test_planning_model(planning_model, months=planning_months, save_results=save_results)
        #     return

    # ==================
    # Create daily model
    # ==================
    logger.info('Loading daily model')
    try:
        model = Model.load(model_path, path=model_path)
    except Exception as err:
        logger.error(err)
        raise

    model.blocks = {}
    model.setup()

    # run model
    # note that tqdm + step adds a little bit of overhead.
    # use model.run() instead if seeing progress is not important

    # IMPORTANT: The following can be embedded into the scheduling model via
    # the 'before' and 'after' functions.
    days_to_omit = 0
    if include_planning:
        end = model.timestepper.end
        new_end = end + relativedelta(months=-planning_months)
        model.timestepper.end = new_end
    step = -1
    now = datetime.now()
    monthly_seconds = 0
    model.mode = 'scheduling'
    model.planning = None
    if include_planning:
        model.planning = planning_model
        model.planning.scheduling = model

    disable_progress_bar = not debug and not show_progress
    n_timesteps = len(model.timestepper.datetime_index)
    for date in tqdm(model.timestepper.datetime_index,
                     ncols=60,
                     disable=disable_progress_bar):
        step += 1
        if disable_progress_bar and date.month == 9 and date.day == 30:
            logger.info('{}% complete (finsished year {})'.format(
                round(step / n_timesteps * 100), date.year))
        try:

            # Step 1: run planning model
            if include_planning and date.day == 1:

                # update planning model
                model.planning.reset(start=date.to_timestamp())

                # run planning model (intial conditions are set within the model step)
                model.planning.step()

                if debug and save_results:
                    df_month = get_planning_dataframe(model.planning)
                    if df_planning is None:
                        df_planning = df_month
                    else:
                        df_planning = pd.concat([df_planning, df_month])

            # Step 2: run daily model
            model.step()
        except Exception as err:
            traceback.print_exc()
            logger.error('Failed at step {}'.format(date))
            raise

    if debug:
        total_seconds = (datetime.now() - now).total_seconds()
        logger.debug('Total run: {} seconds'.format(total_seconds))
        monthly_pct = monthly_seconds / total_seconds * 100
        logger.debug('Monthly overhead: {} seconds ({:02}% of total)'.format(
            monthly_seconds, monthly_pct))

    # save results to CSV
    # results_path = os.path.join('./results', run_name, basin, climate)
    if debug:
        base_results_path = '../results'
    else:
        base_results_path = os.environ.get('SIERRA_RESULTS_PATH', '../results')

    suffix = ' - {}'.format(file_suffix) if file_suffix else ''
    run_folder = run_name + suffix
    results_path = os.path.join(base_results_path, run_folder, basin, climate)
    save_model_results(model, results_path, file_suffix, debug=debug)
Ejemplo n.º 30
0
def model(solver):
    model = Model(solver=solver)
    model.timestepper.start = Timestamp("2016-01-01")
    model.timestepper.end = Timestamp("2016-01-02")
    return model
Ejemplo n.º 31
0
    A.connect(Z)
    B.connect(Z)

    agg = AggregatedNode(model, "agg", [A, B])
    agg.flow_weights = flow_weights
    agg.max_flow = 30.0

    model.run()

    assert_allclose(agg.flow, expected_agg_flow)
    assert_allclose(A.flow, expected_A_flow)
    assert_allclose(B.flow, expected_B_flow)


@pytest.mark.skipif(Model().solver.name == "lpsolve",
                    reason="Not supported in lpsolve.")
def test_aggregated_node_max_flow_parameter(model):
    """Nodes constrained by the max_flow of their AggregatedNode using a Parameter """
    A = Input(model, "A", max_flow=20.0, cost=1)
    B = Input(model, "B", max_flow=20.0, cost=2)
    Z = Output(model, "Z", max_flow=100, cost=-10)

    A.connect(Z)
    B.connect(Z)

    agg = AggregatedNode(model, "agg", [A, B])
    agg.max_flow = ConstantParameter(model, 30.0)

    model.run()