def historic_run(): """Run the model.""" logger.info(f'Version: {VERSION}') logger.info('Initialising cloud storage client ...') client = init_azure_storage() download_hydrology(client) logger.info('Starting model run ...') # Run the model if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, os.path.join(OUT_DIR, 'thames_output.h5'), parameters=[p for p in model.parameters]) ProgressRecorder(model) # Run the model stats = model.run() logger.info(stats) # Upload the results logger.info('Uploading outputs ...') upload_outputs(client) # Print stats stats_df = stats.to_dataframe() logger.info(stats_df)
def load_pywr_model(self, solver=None): """ Create a Pywr model from the exported data. """ pywr_data = self.get_pywr_data() model = Model.load(pywr_data, solver=solver) self.model = model return pywr_data
def main(filename): base, ext = os.path.splitext(filename) m = Model.load(filename, solver='glpk-dcopf') gen1 = NumpyArrayNodeRecorder(m, m.nodes['gen1']) pv2 = NumpyArrayNodeRecorder(m, m.nodes['pv2']) ProgressRecorder(m) CSVRecorder(m, f'{base}.csv') m.setup() stats = m.run() print(stats.to_dataframe()) df = pandas.concat({'gen1': gen1.to_dataframe(), 'pv2': pv2.to_dataframe()}, axis=1) fig, ax = plt.subplots(figsize=(8, 4)) df.plot(ax=ax) df.resample('D').mean().plot(ax=ax, color='black') ax.set_ylabel('MW') fig.savefig(f'{base}.png', dpi=300) fig, ax = plt.subplots(figsize=(8, 4)) df.resample('M').sum().plot(ax=ax) ax.set_ylabel('MWh per month') fig.savefig(f'{base}-monthly.png', dpi=300) plt.show()
def test_basic_losses(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'basic-losses.json'), solver='glpk-dcopf') m.setup() m.run() np.testing.assert_allclose(m.nodes['gen1'].flow, [50]) np.testing.assert_allclose(m.nodes['gen2'].flow, [100]) np.testing.assert_allclose(m.nodes['load1'].flow, [135])
def test_export(db_with_pywr_network, logged_in_client): client = logged_in_client pywr_network_id, pywr_scenario_id, pywr_json_filename = db_with_pywr_network exporter = PywrHydraExporter.from_network_id(client, pywr_network_id, pywr_scenario_id) pywr_data_exported = exporter.get_pywr_data() # Check transformed data is about right with open(pywr_json_filename) as fh: pywr_data = json.load(fh) assert_identical_pywr_data(pywr_data, pywr_data_exported) m = Model.load(pywr_data) m.run()
def test_simple(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'simple.json'), solver='glpk-dcopf') m.setup() m.run() np.testing.assert_allclose(m.nodes['gen1'].flow, [100.0]) np.testing.assert_allclose(m.nodes['gen2'].flow, [50.0]) np.testing.assert_allclose(m.nodes['load3'].flow, [150.0]) np.testing.assert_allclose(m.nodes['line12'].flow, [50 / 3]) np.testing.assert_allclose(m.nodes['line13'].flow, [100.0 - 50 / 3]) np.testing.assert_allclose(m.nodes['line23'].flow, [50.0 + 50 / 3])
def test_simple_line_constraints(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'simple-line-constraints.json'), solver='glpk-dcopf') m.setup() m.run() np.testing.assert_allclose(m.nodes['gen1'].flow, [25.0]) np.testing.assert_allclose(m.nodes['gen2'].flow, [100.0]) np.testing.assert_allclose(m.nodes['load3'].flow, [125.0]) np.testing.assert_allclose(m.nodes['line12'].flow, [-25.0]) np.testing.assert_allclose(m.nodes['line13'].flow, [50.0]) np.testing.assert_allclose(m.nodes['line23'].flow, [75.0])
def test_from_model(): json_path = os.path.join(os.path.dirname(__file__), "models", "river1.json") model = Model.load(json_path) json_dict = pywr_model_to_d3_json(model, attributes=True) assert "nodes" in json_dict.keys() assert "links" in json_dict.keys() node_names = ["catchment1", "river1", "abs1", "link1", "term1", "demand1"] for node in json_dict["nodes"]: assert node["name"] in node_names catchment = get_node(json_dict["nodes"], "catchment1") catchment_max_flow = get_node_attribute(catchment, "max_flow") assert catchment_max_flow["value"] == "5.0"
def test_pv_generator(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'pv-generator.json'), solver='glpk-dcopf') gen1 = NumpyArrayNodeRecorder(m, m.nodes['gen1']) pv2 = NumpyArrayNodeRecorder(m, m.nodes['pv2']) m.setup() m.run() df = pandas.concat({ 'gen1': gen1.to_dataframe(), 'pv2': pv2.to_dataframe() }, axis=1) assert df.shape[0] == 745
def test_simple_battery(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'simple-battery.json'), solver='glpk-dcopf') gen1 = NumpyArrayNodeRecorder(m, m.nodes['gen1']) pv2 = NumpyArrayNodeRecorder(m, m.nodes['pv2']) battery1 = NumpyArrayStorageRecorder(m, m.nodes['battery1']) m.setup() m.run() df = pandas.concat( { 'gen1': gen1.to_dataframe(), 'pv2': pv2.to_dataframe(), 'battery1': battery1.to_dataframe() }, axis=1) assert df.shape[0] == 745
def test_simple_losses(): m = Model.load(os.path.join(TEST_FOLDER, 'models', 'simple-losses.json'), solver='glpk-dcopf') m.setup() m.run() # Gen1 losses line12_losses = 0.1 * 100 / 9 line13_losses = 0.1 * (100 - 100 / 9) line23_losses = 0.1 * (100 / 9 - line12_losses) gen1_losses = line12_losses + line13_losses + line23_losses gen2 = (150 - 100 + gen1_losses) / (1 - 0.1) np.testing.assert_allclose(m.nodes['gen1'].flow, [100.0]) np.testing.assert_allclose(m.nodes['gen2'].flow, [gen2]) np.testing.assert_allclose(m.nodes['load3'].flow, [150.0]) np.testing.assert_allclose(m.nodes['line12'].flow, [100 / 9]) np.testing.assert_allclose(m.nodes['line13'].flow, [100.0 - 100 / 9]) np.testing.assert_allclose(m.nodes['line23'].flow, [gen2 + 100 / 9 - line12_losses])
def load_pywr_model(self): """ Create a Pywr model from the exported data. """ pywr_data = self.get_pywr_data() model = Model.load(pywr_data) self.model = model
def run(): # Run the model model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, 'thames_output.h5', parameters=[p for p in model.parameters]) # Run the model stats = model.run() print(stats) stats_df = stats.to_dataframe() print(stats_df) keys_to_plot = ( 'time_taken_before', 'solver_stats.bounds_update_nonstorage', 'solver_stats.bounds_update_storage', 'solver_stats.objective_update', 'solver_stats.lp_solve', 'solver_stats.result_update', 'time_taken_after', ) keys_to_tabulate = ( 'timesteps', 'time_taken', 'solver', 'num_scenarios', 'speed', 'solver_name' 'solver_stats.total', 'solver_stats.number_of_rows', 'solver_stats.number_of_cols', 'solver_stats.number_of_nonzero', 'solver_stats.number_of_routes', 'solver_stats.number_of_nodes', ) values = [] labels = [] explode = [] solver_sub_total = 0.0 for k in keys_to_plot: v = stats_df.loc[k][0] values.append(v) label = k.split('.', 1)[-1].replace('_', ' ').capitalize() explode.append(0.0) if k.startswith('solver_stats'): labels.append('Solver - {}'.format(label)) solver_sub_total += v else: labels.append(label) values.append(stats_df.loc['solver_stats.total'][0] - solver_sub_total) labels.append('Solver - Other') explode.append(0.0) values.append(stats_df.loc['time_taken'][0] - sum(values)) values = np.array(values) / sum(values) labels.append('Other') explode.append(0.0) fig, (ax1, ax2) = plt.subplots(figsize=(12, 4), ncols=2, sharey='row', gridspec_kw={'width_ratios': [2, 1]}) print(values, labels) ax1.pie(values, explode=explode, labels=labels, autopct='%1.1f%%', startangle=90) ax1.axis( 'equal') # Equal aspect ratio ensures that pie is drawn as a circle. cell_text = [] for index, value in stats_df.iterrows(): if index not in keys_to_tabulate: continue v = value[0] if isinstance(v, (float, np.float64, np.float32)): v = f'{v:.2f}' cell_text.append([index, v]) tbl = ax2.table(cellText=cell_text, colLabels=['Statistic', 'Value'], loc='center') tbl.scale(1.5, 1.5) # may help tbl.set_fontsize(14) ax2.axis('off') fig.savefig('run_statistics_w_tables.png', dpi=300) fig.savefig('run_statistics_w_tables.eps') plt.show()
def run(): # Run the model model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, "thames_output.h5", parameters=[p for p in model.parameters]) # Run the model stats = model.run() print(stats) stats_df = stats.to_dataframe() print(stats_df) keys_to_plot = ( "time_taken_before", "solver_stats.bounds_update_nonstorage", "solver_stats.bounds_update_storage", "solver_stats.objective_update", "solver_stats.lp_solve", "solver_stats.result_update", "time_taken_after", ) keys_to_tabulate = ( "timesteps", "time_taken", "solver", "num_scenarios", "speed", "solver_name" "solver_stats.total", "solver_stats.number_of_rows", "solver_stats.number_of_cols", "solver_stats.number_of_nonzero", "solver_stats.number_of_routes", "solver_stats.number_of_nodes", ) values = [] labels = [] explode = [] solver_sub_total = 0.0 for k in keys_to_plot: v = stats_df.loc[k][0] values.append(v) label = k.split(".", 1)[-1].replace("_", " ").capitalize() explode.append(0.0) if k.startswith("solver_stats"): labels.append("Solver - {}".format(label)) solver_sub_total += v else: labels.append(label) values.append(stats_df.loc["solver_stats.total"][0] - solver_sub_total) labels.append("Solver - Other") explode.append(0.0) values.append(stats_df.loc["time_taken"][0] - sum(values)) values = np.array(values) / sum(values) labels.append("Other") explode.append(0.0) fig, (ax1, ax2) = plt.subplots( figsize=(12, 4), ncols=2, sharey="row", gridspec_kw={"width_ratios": [2, 1]} ) print(values, labels) ax1.pie(values, explode=explode, labels=labels, autopct="%1.1f%%", startangle=90) ax1.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle. cell_text = [] for index, value in stats_df.iterrows(): if index not in keys_to_tabulate: continue v = value[0] if isinstance(v, (float, np.float64, np.float32)): v = f"{v:.2f}" cell_text.append([index, v]) tbl = ax2.table(cellText=cell_text, colLabels=["Statistic", "Value"], loc="center") tbl.scale(1.5, 1.5) # may help tbl.set_fontsize(14) ax2.axis("off") fig.savefig("run_statistics_w_tables.png", dpi=300) fig.savefig("run_statistics_w_tables.eps") plt.show()