Ejemplo n.º 1
0
    def test_demand_saving_with_indexed_array(self, solver, tmpdir):
        """Test recording various items from demand saving example.

        This time the TablesRecorder is defined in JSON.
        """
        import os, json, tables
        filename = "demand_saving_with_tables_recorder.json"
        # This is a bit horrible, but need to edit the JSON dynamically
        # so that the output.h5 is written in the temporary directory
        path = os.path.join(os.path.dirname(__file__), 'models')
        with open(os.path.join(path, filename), 'r') as f:
            data = f.read()
        data = json.loads(data)

        # Make an absolute, but temporary, path for the recorder
        url = data['recorders']['database']['url']
        data['recorders']['database']['url'] = str(tmpdir.join(url))

        model = Model.load(data, path=path, solver=solver)

        model.timestepper.end = "2016-01-31"
        model.check()

        # run model
        model.run()

        # run model again (to test reset behaviour)
        model.run()
        max_volume = model.nodes["Reservoir"].max_volume

        h5file = tmpdir.join('output.h5')
        with tables.open_file(str(h5file), 'r') as h5f:
            assert model.metadata['title'] == h5f.title
            # Check metadata on root node
            assert h5f.root._v_attrs.author == 'pytest'
            assert h5f.root._v_attrs.run_number == 0

            rec_demand = h5f.get_node('/outputs/demand').read()
            rec_storage = h5f.get_node('/storage/reservoir').read()

            # model starts with no demand saving
            demand_baseline = 50.0
            demand_factor = 0.9  # jan-apr
            demand_saving = 1.0
            assert_allclose(rec_demand[0, 0], demand_baseline * demand_factor * demand_saving)

            # first control curve breached
            demand_saving = 0.95
            assert (rec_storage[4, 0] < (0.8 * max_volume))
            assert_allclose(rec_demand[5, 0], demand_baseline * demand_factor * demand_saving)

            # second control curve breached
            demand_saving = 0.5
            assert (rec_storage[11, 0] < (0.5 * max_volume))
            assert_allclose(rec_demand[12, 0], demand_baseline * demand_factor * demand_saving)
Ejemplo n.º 2
0
    def load_model(self,
                   root_dir,
                   model_path,
                   bucket=None,
                   network_key=None,
                   check_graph=False):

        os.chdir(root_dir)

        # needed when loading JSON file
        root_path = 's3://{}/{}/'.format(bucket, network_key)
        os.environ['ROOT_S3_PATH'] = root_path

        # Step 1: Load and register policies
        sys.path.insert(0, os.getcwd())
        policy_folder = '_parameters'
        for filename in os.listdir(policy_folder):
            if '__init__' in filename:
                continue
            policy_name = os.path.splitext(filename)[0]
            policy_module = '.{policy_name}'.format(policy_name=policy_name)
            # package = '.{}'.format(policy_folder)
            import_module(policy_module, policy_folder)

        # from domains import Hydropower, InstreamFlowRequirement

        modules = [('.IFRS', 'policies'), ('.domains', 'domains')]
        for name, package in modules:
            try:
                import_module(name, package)
            except Exception as err:
                logger.warning("""{name} could not be imported from {package}
                Here's the real error:
                {error_type}
                {err}
                """.format(name=name,
                           package=package,
                           error_type=type(err),
                           err=err))

        # Step 2: Load and run model
        self.model = Model.load(model_path, path=model_path)

        # check network graph
        if check_graph:
            try:
                self.model.check_graph()
            except Exception as err:
                raise Exception('Pywr error: {}'.format(err))

        self.setup()

        return
Ejemplo n.º 3
0
def create_planning_model(model_path):
    root, filename = os.path.split(model_path)
    base, ext = os.path.splitext(filename)
    new_filename = '{}_monthly'.format(base) + ext
    monthly_model_path = os.path.join(root, new_filename)
    prepare_planning_model(model_path, monthly_model_path)
    # monthly_model = load_model(root_dir, monthly_model_path, bucket=bucket, network_key=network_key, mode='planning')
    monthly_model = Model.load(monthly_model_path, path=monthly_model_path)
    setattr(monthly_model, 'mode', 'planning')
    monthly_model.setup()
    print('Monthly model setup complete')
    return monthly_model
Ejemplo n.º 4
0
def test_loading_csv_recorder_from_json(solver, tmpdir):
    """
    Test the CSV Recorder which is loaded from json
    """

    filename = 'csv_recorder.json'

    # This is a bit horrible, but need to edit the JSON dynamically
    # so that the output.h5 is written in the temporary directory
    path = os.path.join(os.path.dirname(__file__), 'models')
    with open(os.path.join(path, filename), 'r') as f:
        data = f.read()
    data = json.loads(data)

    # Make an absolute, but temporary, path for the recorder
    url = data['recorders']['model_out']['url']
    data['recorders']['model_out']['url'] = str(tmpdir.join(url))

    model = Model.load(data, path=path, solver=solver)

    csvfile = tmpdir.join('output.csv')
    model.run()
    import csv
    with open(str(csvfile), 'r') as fh:
        dialect = csv.Sniffer().sniff(fh.read(1024))
        fh.seek(0)
        reader = csv.reader(fh, dialect)
        for irow, row in enumerate(reader):
            if irow == 0:
                expected = ['Datetime', 'inpt', 'otpt']
                actual = row
            else:
                dt = model.timestepper.start+(irow-1)*model.timestepper.delta
                expected = [dt.isoformat()]
                actual = [row[0]]
                assert np.all((np.array([float(v) for v in row[1:]]) - 10.0) < 1e-12)
            assert expected == actual
Ejemplo n.º 5
0
        sc_index = self.model.scenarios.multiindex

        return pandas.DataFrame(data=np.array(self._data), index=index, columns=sc_index)

    @classmethod
    def load(cls, model, data):
        node = model._get_node_from_ref(model, data.pop("node"))
        if "storage_node" in data:
            storage_node = model._get_node_from_ref(model, data.pop("storage_node"))
        else:
            storage_node = None

        return cls(model, node, storage_node=storage_node, **data)
HydroPowerRecorder.register()


if __name__ == '__main__':

    m = Model.load('hydropower_example.json')
    stats = m.run()
    print(stats)

    print(m.recorders["turbine1_energy"].values())

    df = m.to_dataframe()
    print(df.head())

    from matplotlib import pyplot as plt
    df.plot(subplots=True)
    plt.show()
Ejemplo n.º 6
0
from pywr.core import Model


if __name__ == '__main__':
    import sys

    m = Model.load(sys.argv[1])
    m.run()

    for node in m.nodes:
        print(node, node.flow)

Ejemplo n.º 7
0
def _run_model(climate,
               basin,
               start=None,
               end=None,
               years=None,
               run_name="default",
               include_planning=False,
               simplify=True,
               use_multiprocessing=False,
               debug=False,
               planning_months=12,
               scenarios=None,
               show_progress=False,
               data_path=None,
               file_suffix=None):
    logger.info("Running \"{}\" scenario for {} basin, {} climate".format(
        run_name, basin.upper(), climate.upper()))

    climate_set, climate_scenario = climate.split('/')

    if debug:
        from sierra.utilities import check_nan
        basin_path = os.path.join(data_path,
                                  basin.replace('_', ' ').title() + ' River')
        total_nan = check_nan(basin_path, climate)

        try:
            assert (total_nan == 0)
            logger.info('No NaNs found in data files')
        except AssertionError:
            logger.warning('{} NaNs found in data files.'.format(total_nan))

    # if debug:
    #     from sierra import create_schematic

    # Some adjustments
    if basin in ['merced', 'tuolumne']:
        include_planning = False

    # Set up dates

    if start is None or end is None:
        # TODO: get start and end years from outside, not hard coded
        if climate_scenario == 'Livneh':
            start_year = 1950
            end_year = 2012
        elif climate_set == 'gcms':
            start_year = 2030
            end_year = 2060
        elif climate_set == 'sequences':
            # name format is N01_S01, where N01 refers to the number of drought years
            # the total number of years is 1 + N + 2 (1 year at the end as a buffer)
            N = int(climate_scenario.split('Y')[1].split('_')[0])
            start_year = 2000
            end_year = start_year + N
        else:
            raise Exception("Climate scenario unknown")
        start = '{}-10-01'.format(start_year)
        end = '{}-09-30'.format(end_year)

    # ========================
    # Set up model environment
    # ========================

    here = os.path.dirname(os.path.realpath(__file__))
    os.chdir(here)

    root_dir = os.path.join(here, 'models', basin)
    temp_dir = os.path.join(root_dir, 'temp')
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)

    bucket = 'openagua-networks'
    base_filename = 'pywr_model.json'
    model_filename_base = 'pywr_model_{}'.format(climate_scenario)
    model_filename = model_filename_base + '.json'

    base_path = os.path.join(root_dir, base_filename)
    model_path = os.path.join(temp_dir, model_filename)

    # first order of business: update file paths in json file
    with open(base_path) as f:
        base_model = json.load(f)

    # update model with scenarios, if any
    def update_model(scenario_path):
        if os.path.exists(scenario_path):
            with open(scenario_path) as f:
                scenario_model = json.load(f)
            for key, scenario_items in scenario_model.items():
                if key in base_model:
                    if type(scenario_items) == dict:
                        base_model[key].update(scenario_items)
                    else:
                        base_model[key].extend(scenario_items)
                elif key in ['scenarios', 'nodes']:
                    items = {
                        item['name']: item
                        for item in base_model.get(key, [])
                    }
                    new_items = {item['name']: item for item in scenario_items}
                    items.update(new_items)
                    base_model[key] = list(items.values())
        else:
            raise Exception(
                'Scenario path {} does not exist.'.format(scenario_path))

    if scenarios is not None:
        for s in scenarios:
            # update from scenarios folder
            scenario_path = os.path.join(data_path, 'metadata',
                                         'scenario_definitions',
                                         '{}.json'.format(s))
            update_model(scenario_path)

    new_model_parts = {}
    for model_part in ['tables', 'parameters']:
        if model_part not in base_model:
            continue
        new_model_parts[model_part] = {}
        for pname, param in base_model[model_part].items():
            if 'observed' in pname.lower():
                continue
            url = param.get('url')
            if url:
                if data_path:
                    url = url.replace('../data', data_path)
                url = url.replace('historical/Livneh', climate)
                param['url'] = url
            new_model_parts[model_part][pname] = param

    base_model.update(new_model_parts)
    base_model['timestepper']['start'] = start
    base_model['timestepper']['end'] = end
    with open(model_path, 'w') as f:
        json.dump(base_model, f, indent=4)

    # =========================================
    # Load and register global model parameters
    # =========================================

    # sys.path.insert(0, os.getcwd())
    policy_folder = 'parameters'
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = 'sierra.parameters.{policy_name}'.format(
            policy_name=policy_name)
        import_module(policy_module, policy_folder)

    # =========================================
    # Load and register custom model parameters
    # =========================================

    sys.path.insert(0, os.getcwd())
    policy_folder = os.path.join('models', basin, '_parameters')
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = 'models.{basin}._parameters.{policy_name}'.format(
            basin=basin, policy_name=policy_name)
        import_module(policy_module, policy_folder)

    # import domains
    import_module('.domains', 'domains')
    if debug:
        logger.info("Domains imported")

    # import custom policies
    try:
        import_module('{}.policies'.format(basin))
    except:
        pass

    # =========================================
    # Load and register custom model recorders
    # =========================================

    from recorders.hydropower import HydropowerEnergyRecorder
    HydropowerEnergyRecorder.register()

    # prepare the model files
    if simplify or include_planning:
        with open(model_path, 'r') as f:
            model_json = json.load(f)

    if simplify:
        # simplify model
        simplified_filename = model_filename_base + '_simplified.json'
        simplified_model_path = os.path.join(temp_dir, simplified_filename)

        model_json = simplify_network(model_json,
                                      basin=basin,
                                      climate=climate,
                                      delete_gauges=True,
                                      delete_observed=True)
        with open(simplified_model_path, 'w') as f:
            f.write(json.dumps(model_json, indent=4))

        if debug:
            try:
                create_schematic(basin, 'simplified')
            except FileNotFoundError as err:
                logger.warning('Could not create schematic from Livneh model.')
            except ExecutableNotFound:
                logger.warning(
                    'Could not create daily schematic from Livneh model.')

        model_path = simplified_model_path

    # Area for testing monthly model
    save_results = debug
    planning_model = None
    df_planning = None

    if include_planning:

        logger.info('Creating planning model (this may take a minute or two)')

        # create filenames, etc.
        monthly_filename = model_filename_base + '_monthly.json'
        planning_model_path = os.path.join(temp_dir, monthly_filename)

        prepare_planning_model(model_json,
                               basin,
                               climate,
                               planning_model_path,
                               steps=planning_months,
                               debug=debug,
                               remove_rim_dams=True)

        if debug:
            try:
                create_schematic(basin, 'monthly')
            except ExecutableNotFound:
                logger.warning(
                    'Graphviz executable not found. Monthly schematic not created.'
                )

        # create pywr model
        try:
            planning_model = Model.load(planning_model_path,
                                        path=planning_model_path)
        except Exception as err:
            logger.error("Planning model failed to load")
            # logger.error(err)
            raise

        # set model mode to planning
        planning_model.mode = 'planning'
        planning_model.blocks = {}

        # set time steps
        # start = planning_model.timestepper.start
        end = planning_model.timestepper.end
        end -= relativedelta(months=planning_months)

        planning_model.setup()

        # if debug == 'm':
        #     test_planning_model(planning_model, months=planning_months, save_results=save_results)
        #     return

    # ==================
    # Create daily model
    # ==================
    logger.info('Loading daily model')
    try:
        model = Model.load(model_path, path=model_path)
    except Exception as err:
        logger.error(err)
        raise

    model.blocks = {}
    model.setup()

    # run model
    # note that tqdm + step adds a little bit of overhead.
    # use model.run() instead if seeing progress is not important

    # IMPORTANT: The following can be embedded into the scheduling model via
    # the 'before' and 'after' functions.
    days_to_omit = 0
    if include_planning:
        end = model.timestepper.end
        new_end = end + relativedelta(months=-planning_months)
        model.timestepper.end = new_end
    step = -1
    now = datetime.now()
    monthly_seconds = 0
    model.mode = 'scheduling'
    model.planning = None
    if include_planning:
        model.planning = planning_model
        model.planning.scheduling = model

    disable_progress_bar = not debug and not show_progress
    n_timesteps = len(model.timestepper.datetime_index)
    for date in tqdm(model.timestepper.datetime_index,
                     ncols=60,
                     disable=disable_progress_bar):
        step += 1
        if disable_progress_bar and date.month == 9 and date.day == 30:
            logger.info('{}% complete (finsished year {})'.format(
                round(step / n_timesteps * 100), date.year))
        try:

            # Step 1: run planning model
            if include_planning and date.day == 1:

                # update planning model
                model.planning.reset(start=date.to_timestamp())

                # run planning model (intial conditions are set within the model step)
                model.planning.step()

                if debug and save_results:
                    df_month = get_planning_dataframe(model.planning)
                    if df_planning is None:
                        df_planning = df_month
                    else:
                        df_planning = pd.concat([df_planning, df_month])

            # Step 2: run daily model
            model.step()
        except Exception as err:
            traceback.print_exc()
            logger.error('Failed at step {}'.format(date))
            raise

    if debug:
        total_seconds = (datetime.now() - now).total_seconds()
        logger.debug('Total run: {} seconds'.format(total_seconds))
        monthly_pct = monthly_seconds / total_seconds * 100
        logger.debug('Monthly overhead: {} seconds ({:02}% of total)'.format(
            monthly_seconds, monthly_pct))

    # save results to CSV
    # results_path = os.path.join('./results', run_name, basin, climate)
    if debug:
        base_results_path = '../results'
    else:
        base_results_path = os.environ.get('SIERRA_RESULTS_PATH', '../results')

    suffix = ' - {}'.format(file_suffix) if file_suffix else ''
    run_folder = run_name + suffix
    results_path = os.path.join(base_results_path, run_folder, basin, climate)
    save_model_results(model, results_path, file_suffix, debug=debug)
Ejemplo n.º 8
0
def run_model(basin, network_key):
    # ========================
    # Set up model environment
    # ========================

    root_dir = os.path.join(os.getcwd(), basin)
    bucket = 'openagua-networks'
    model_path = os.path.join(root_dir, 'pywr_model.json')

    # setup_model(root_dir, model_path, bucket=bucket, network_key=network_key)
    os.chdir(root_dir)

    # needed when loading JSON file
    # root_path = 's3://{}/{}/'.format(bucket, network_key)
    root_path = '../data'
    os.environ['ROOT_S3_PATH'] = root_path

    # =========================================
    # Load and register custom model parameters
    # =========================================

    sys.path.insert(0, os.getcwd())
    policy_folder = '_parameters'
    for filename in os.listdir(policy_folder):
        if '__init__' in filename:
            continue
        policy_name = os.path.splitext(filename)[0]
        policy_module = '.{policy_name}'.format(policy_name=policy_name)
        # package = '.{}'.format(policy_folder)
        import_module(policy_module, policy_folder)

    modules = [('.IFRS', 'policies'), ('.domains', 'domains')]
    for name, package in modules:
        try:
            import_module(name, package)
        except Exception as err:
            print(' [-] WARNING: {} could not be imported from {}'.format(
                name, package))
            print(type(err))
            print(err)

    # ==================
    # Create daily model
    # ==================
    include_monthly = False
    daily_model = Model.load(model_path, path=model_path)
    print('Daily model loaded')
    daily_model.setup()
    print('Daily model setup completed')
    # =====================
    # Create planning model
    # =====================

    # create and initialize monthly model
    if include_monthly:
        monthly_model = create_planning_model(model_path)

    timesteps = range(len(daily_model.timestepper))

    # run model
    # note that tqdm + step adds a little bit of overhead.
    # use model.run() instead if seeing progress is not important

    for step in tqdm(timesteps, ncols=80):

        today = daily_model.timestepper.current if step else daily_model.timestepper.start

        try:

            # Step 1: run planning model & update daily model

            if include_monthly and today.day == 1:

                # Step 1a: update planning model

                # ...update time steps
                monthly_model.timestepper.start = today
                monthly_model.timestepper.end = today

                # ...update initial conditions (not needed for the first step)
                if step > 0:
                    for node in monthly_model.nodes:
                        if node['type'] != 'Storage':
                            continue

                        node['initial_volume'] = daily_model.node[
                            node['name']].volume

                # Step 1b: run planning model
                print('Running planning model')
                monthly_model.step(
                )  # redundant with run, since only one timestep

                # Step 1c: update daily model with planning model results
                print('Updating daily model')

            # Step 3: run daily model
            daily_model.step()
        except Exception as err:
            print('\nFailed at step {}'.format(today))
            print(err)
            # continue
            break

    # save results to CSV

    results = daily_model.to_dataframe()
    results_path = './results'
    results.columns = results.columns.droplevel(1)
    if not os.path.exists(results_path):
        os.makedirs(results_path)
    results.to_csv(os.path.join(results_path, 'system.csv'))
    attributes = {}
    for c in results.columns:
        attribute = c.split('/')[-1]
        if attribute in attributes:
            attributes[attribute].append(c)
        else:
            attributes[attribute] = [c]
    for attribute in attributes:
        path = os.path.join(results_path, '{}.csv'.format(attribute))
        df = results[attributes[attribute]]
        df.columns = [c.split('/')[-2] for c in df.columns]
        df.to_csv(path)

        if attribute == 'flow':
            df2 = df[[c for c in df.columns if c[-3:] == ' PH']]
            path2 = os.path.join(results_path, 'powerhouse flow.csv')
            df2.to_csv(path2)
Ejemplo n.º 9
0
from pywr.core import Model
from pywr.recorders import Recorder
from pywr.recorders._recorders import NodeRecorder
import pandas
import numpy as np

if __name__ == "__main__":

    m = Model.load("hydropower_example.json")
    stats = m.run()
    print(stats)

    print(m.recorders["turbine1_energy"].values())

    df = m.to_dataframe()
    print(df.head())

    from matplotlib import pyplot as plt

    df.plot(subplots=True)
    plt.show()