def historic_run(): """Run the model.""" logger.info(f'Version: {VERSION}') logger.info('Initialising cloud storage client ...') client = init_azure_storage() download_hydrology(client) logger.info('Starting model run ...') # Run the model if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, os.path.join(OUT_DIR, 'thames_output.h5'), parameters=[p for p in model.parameters]) ProgressRecorder(model) # Run the model stats = model.run() logger.info(stats) # Upload the results logger.info('Uploading outputs ...') upload_outputs(client) # Print stats stats_df = stats.to_dataframe() logger.info(stats_df)
def test_multiple_scenarios(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ from pywr.parameters import ConstantScenarioParameter model = simple_linear_model scA = Scenario(model, name='A', size=4) scB = Scenario(model, name='B', size=2) otpt = model.nodes['Output'] inpt = model.nodes['Input'] inpt.max_flow = ConstantScenarioParameter(model, scA, [10, 20, 30, 40]) otpt.max_flow = ConstantScenarioParameter(model, scB, [20, 40]) otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f) model.run() for node_name in model.nodes.keys(): ca = h5f.get_node('/', node_name) assert ca.shape == (365, 4, 2) np.testing.assert_allclose(ca[0, ...], [[10, 10], [20, 20], [20, 30], [20, 40]]) scenarios = h5f.get_node('/scenarios') for i, s in enumerate(model.scenarios.scenarios): row = scenarios[i] assert row['name'] == s.name.encode('utf-8') assert row['size'] == s.size
def plot_res(ext, show): end_year = "2105" data = {} for name, df in TablesRecorder.generate_dataframes("thames_output.h5"): df.columns = ["Very low", "Low", "Central", "High", "Very high"] data[name] = df fig1, ax1 = plt.subplots(figsize=(16, 5), dpi=300) data["reservoir1"].loc[:end_year, "Central"].plot(ax=ax1) ax1.set_ylabel("Volume [$Mm^3$]") plt.tight_layout() fig2, ax2 = plt.subplots(figsize=(16, 5), dpi=300) data["demand_saving_level"].loc[:end_year, "Central"].plot(ax=ax2) ax2.set_ylabel("Demand saving level") plt.tight_layout() fig3, ax3 = plt.subplots(figsize=(16, 5), dpi=300) data["demand_max_flow"].loc[:end_year, "Central"].plot(ax=ax3) ax3.set_ylabel("Demand [$Mm^3/day$]") plt.tight_layout() for ax in (ax1, ax2, ax3): ax.grid(True) if ext is not None: fig1.savefig(f"Reservoir.{ext}", dpi=300) fig2.savefig(f"Demand saving level.{ext}", dpi=300) fig3.savefig(f"Demand.{ext}", dpi=300) if show: plt.show()
def test_nodes_with_str(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ from pywr.parameters import ConstantParameter model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) p = ConstantParameter(model, 10.0, name='max_flow') inpt.max_flow = p otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: nodes = ['Output', 'Input', 'Sum'] where = "/agroup" rec = TablesRecorder(model, h5f, nodes=nodes, parameters=[p, ], where=where) model.run() for node_name in ['Output', 'Input', 'Sum', 'max_flow']: ca = h5f.get_node("/agroup/" + node_name) assert ca.shape == (365, 1) if node_name == 'Sum': np.testing.assert_allclose(ca, 20.0) else: np.testing.assert_allclose(ca, 10.0)
def test_parameters(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ from pywr.parameters import ConstantParameter model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] p = ConstantParameter(10.0, name='max_flow') inpt.max_flow = p agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) inpt.max_flow = 10.0 otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f, parameters=[p, ]) model.run() for node_name in model.nodes.keys(): ca = h5f.get_node('/', node_name) assert ca.shape == (365, 1) if node_name == 'Sum': np.testing.assert_allclose(ca, 20.0) else: np.testing.assert_allclose(ca, 10.0)
def plot_res(ext, show): end_year = '2105' data = {} for name, df in TablesRecorder.generate_dataframes('thames_output.h5'): df.columns = ['Very low', 'Low', 'Central', 'High', 'Very high'] data[name] = df fig1, ax1 = plt.subplots(figsize=(16, 5), dpi=300) data['reservoir1'].loc[:end_year, 'Central'].plot(ax=ax1) ax1.set_ylabel('Volume [$Mm^3$]') plt.tight_layout() fig2, ax2 = plt.subplots(figsize=(16, 5), dpi=300) data['demand_saving_level'].loc[:end_year, 'Central'].plot(ax=ax2) ax2.set_ylabel('Demand saving level') plt.tight_layout() fig3, ax3 = plt.subplots(figsize=(16, 5), dpi=300) data['demand_max_flow'].loc[:end_year, 'Central'].plot(ax=ax3) ax3.set_ylabel('Demand [$Mm^3/day$]') plt.tight_layout() for ax in (ax1, ax2, ax3): ax.grid(True) if ext is not None: fig1.savefig(f'Reservoir.{ext}', dpi=300) fig2.savefig(f'Demand saving level.{ext}', dpi=300) fig3.savefig(f'Demand.{ext}', dpi=300) if show: plt.show()
def figures(ext, show): for name, df in TablesRecorder.generate_dataframes('thames_output.h5'): df.columns = ['Very low', 'Low', 'Central', 'High', 'Very high'] fig, (ax1, ax2) = plt.subplots(figsize=(12, 4), ncols=2, sharey='row', gridspec_kw={'width_ratios': [3, 1]}) df['2100':'2125'].plot(ax=ax1) df.quantile(np.linspace(0, 1)).plot(ax=ax2) if name.startswith('reservoir'): ax1.set_ylabel('Volume [$Mm^3$]') else: ax1.set_ylabel('Flow [$Mm^3/day$]') for ax in (ax1, ax2): ax.set_title(name) ax.grid(True) plt.tight_layout() if ext is not None: fig.savefig(f'{name}.{ext}', dpi=300) if show: plt.show()
def test_routes_multiple_scenarios(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ from pywr.parameters import ConstantScenarioParameter model = simple_linear_model scA = Scenario(model, name='A', size=4) scB = Scenario(model, name='B', size=2) otpt = model.nodes['Output'] inpt = model.nodes['Input'] inpt.max_flow = ConstantScenarioParameter(model, scA, [10, 20, 30, 40]) otpt.max_flow = ConstantScenarioParameter(model, scB, [20, 40]) otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f, routes_flows='flows') model.run() flows = h5f.get_node('/flows') assert flows.shape == (365, 1, 4, 2) np.testing.assert_allclose(flows[0, 0], [[10, 10], [20, 20], [20, 30], [20, 40]])
def figures(ext, show): for name, df in TablesRecorder.generate_dataframes("thames_output.h5"): df.columns = ["Very low", "Low", "Central", "High", "Very high"] fig, (ax1, ax2) = plt.subplots( figsize=(12, 4), ncols=2, sharey="row", gridspec_kw={"width_ratios": [3, 1]} ) df["2100":"2125"].plot(ax=ax1) df.quantile(np.linspace(0, 1)).plot(ax=ax2) if name.startswith("reservoir"): ax1.set_ylabel("Volume [$Mm^3$]") else: ax1.set_ylabel("Flow [$Mm^3/day$]") for ax in (ax1, ax2): ax.set_title(name) ax.grid(True) plt.tight_layout() if ext is not None: fig.savefig(f"{name}.{ext}", dpi=300) if show: plt.show()
def test_routes(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) inpt.max_flow = 10.0 otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f, routes_flows='flows') model.run() flows = h5f.get_node('/flows') assert flows.shape == (365, 1, 1) np.testing.assert_allclose(flows.read(), np.ones((365, 1, 1))*10) routes = h5f.get_node('/routes') assert routes.shape[0] == 1 row = routes[0] row['start'] = "Input" row['end'] = "Output" from datetime import date, timedelta d = date(2015, 1, 1) time = h5f.get_node('/time') for i in range(len(model.timestepper)): row = time[i] assert row['year'] == d.year assert row['month'] == d.month assert row['day'] == d.day d += timedelta(1) scenarios = h5f.get_node('/scenarios') for s in model.scenarios.scenarios: row = scenarios[i] assert row['name'] == s.name assert row['size'] == s.size model.reset() model.run() time = h5f.get_node('/time') assert len(time) == len(model.timestepper)
def test_nodes(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) inpt.max_flow = 10.0 otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f) model.run() for node_name in model.nodes.keys(): ca = h5f.get_node('/', node_name) assert ca.shape == (365, 1) if node_name == 'Sum': np.testing.assert_allclose(ca, 20.0) else: np.testing.assert_allclose(ca, 10.0) from datetime import date, timedelta d = date(2015, 1, 1) time = h5f.get_node('/time') for i in range(len(model.timestepper)): row = time[i] assert row['year'] == d.year assert row['month'] == d.month assert row['day'] == d.day d += timedelta(1) scenarios = h5f.get_node('/scenarios') for i, s in enumerate(model.scenarios.scenarios): row = scenarios[i] assert row['name'] == s.name.encode('utf-8') assert row['size'] == s.size model.reset() model.run() time = h5f.get_node('/time') assert len(time) == len(model.timestepper)
def test_parameters(self, simple_linear_model, tmpdir): """ Test the TablesRecorder """ from pywr.parameters import ConstantParameter model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] p = ConstantParameter(model, 10.0, name='max_flow') inpt.max_flow = p # ensure TablesRecorder can handle parameters with a / in the name p_slash = ConstantParameter(model, 0.0, name='name with a / in it') inpt.min_flow = p_slash agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) inpt.max_flow = 10.0 otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: with pytest.warns(ParameterNameWarning): rec = TablesRecorder(model, h5f, parameters=[p, p_slash]) # check parameters have been added to the component tree # this is particularly important for parameters which update their # values in `after`, e.g. DeficitParameter (see #465) assert(not model.find_orphaned_parameters()) assert(p in rec.children) assert(p_slash in rec.children) with pytest.warns(tables.NaturalNameWarning): model.run() for node_name in model.nodes.keys(): ca = h5f.get_node('/', node_name) assert ca.shape == (365, 1) if node_name == 'Sum': np.testing.assert_allclose(ca, 20.0) elif "name with a" in node_name: assert(node_name == "name with a _ in it") np.testing.assert_allclose(ca, 0.0) else: np.testing.assert_allclose(ca, 10.0)
def test_demand_saving_with_indexed_array(self, solver, tmpdir): """Test recording various items from demand saving example """ model = load_model("demand_saving2.json", solver=solver) model.timestepper.end = "2016-01-31" model.check() h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: nodes = [ ('/outputs/demand', 'Demand'), ('/storage/reservoir', 'Reservoir'), ] parameters = [ ('/parameters/demand_saving_level', 'demand_saving_level'), ] rec = TablesRecorder(model, h5f, nodes=nodes, parameters=parameters) model.run() max_volume = model.nodes["Reservoir"].max_volume rec_demand = h5f.get_node('/outputs/demand', 'Demand').read() rec_storage = h5f.get_node('/storage/reservoir', 'Reservoir').read() # model starts with no demand saving demand_baseline = 50.0 demand_factor = 0.9 # jan-apr demand_saving = 1.0 assert_allclose(rec_demand[0, 0], demand_baseline * demand_factor * demand_saving) # first control curve breached demand_saving = 0.95 assert (rec_storage[4, 0] < (0.8 * max_volume)) assert_allclose(rec_demand[5, 0], demand_baseline * demand_factor * demand_saving) # second control curve breached demand_saving = 0.5 assert (rec_storage[11, 0] < (0.5 * max_volume)) assert_allclose(rec_demand[12, 0], demand_baseline * demand_factor * demand_saving)
def test_create_directory(self, simple_linear_model, tmpdir): """ Test TablesRecorder to create a new directory """ model = simple_linear_model otpt = model.nodes['Output'] inpt = model.nodes['Input'] agg_node = AggregatedNode(model, 'Sum', [otpt, inpt]) inpt.max_flow = 10.0 otpt.cost = -2.0 # Make a path with a new directory folder = tmpdir.join('outputs') h5file = folder.join('output.h5') assert(not folder.exists()) rec = TablesRecorder(model, str(h5file), create_directories=True) model.run() assert(folder.exists()) assert(h5file.exists())
def plot_res2(ext, show): end_year = "2105" data = {} for name, df in TablesRecorder.generate_dataframes("thames_output.h5"): df.columns = ["Very low", "Low", "Central", "High", "Very high"] data[name] = df fig1, ax1 = plt.subplots(figsize=(16, 5), dpi=300) data["reservoir1"].loc[:end_year].plot(ax=ax1) ax1.set_ylabel("Volume [$Mm^3$]") plt.legend() plt.tight_layout() fig2, ax2 = plt.subplots(figsize=(16, 5), dpi=300) data["reservoir1"].quantile(np.linspace(0, 1)).plot(ax=ax2) ax2.set_ylabel("Volume [$Mm^3$]") ax2.set_xlabel("Quantile") plt.tight_layout() fig3, ax3 = plt.subplots(figsize=(16, 5), dpi=300) df = data["demand_saving_level"].apply(pandas.Series.value_counts) df /= df.sum(axis=0) df.plot.bar(ax=ax3) ax3.set_ylabel("Proportion of time.") ax3.set_xlabel("Demand saving level") plt.tight_layout() for ax in (ax1, ax2, ax3): ax.grid(True) if ext is not None: fig1.savefig(f"Reservoir (scenarios).{ext}", dpi=300) fig2.savefig(f"Reservoir SDC (scenarios).{ext}", dpi=300) fig3.savefig(f"Demand saving level count (scenarios).{ext}", dpi=300) if show: plt.show()
def plot_res2(ext, show): end_year = '2105' data = {} for name, df in TablesRecorder.generate_dataframes('thames_output.h5'): df.columns = ['Very low', 'Low', 'Central', 'High', 'Very high'] data[name] = df fig1, ax1 = plt.subplots(figsize=(16, 5), dpi=300) data['reservoir1'].loc[:end_year].plot(ax=ax1) ax1.set_ylabel('Volume [$Mm^3$]') plt.legend() plt.tight_layout() fig2, ax2 = plt.subplots(figsize=(16, 5), dpi=300) data['reservoir1'].quantile(np.linspace(0, 1)).plot(ax=ax2) ax2.set_ylabel('Volume [$Mm^3$]') ax2.set_xlabel('Quantile') plt.tight_layout() fig3, ax3 = plt.subplots(figsize=(16, 5), dpi=300) df = data['demand_saving_level'].apply(pandas.Series.value_counts) df /= df.sum(axis=0) df.plot.bar(ax=ax3) ax3.set_ylabel('Proportion of time.') ax3.set_xlabel('Demand saving level') plt.tight_layout() for ax in (ax1, ax2, ax3): ax.grid(True) if ext is not None: fig1.savefig(f'Reservoir (scenarios).{ext}', dpi=300) fig2.savefig(f'Reservoir SDC (scenarios).{ext}', dpi=300) fig3.savefig(f'Demand saving level count (scenarios).{ext}', dpi=300) if show: plt.show()
def test_user_scenarios(self, simple_linear_model, tmpdir): """ Test the TablesRecorder with user defined scenario subset """ from pywr.parameters import ConstantScenarioParameter model = simple_linear_model scA = Scenario(model, name='A', size=4) scB = Scenario(model, name='B', size=2) # Use first and last combinations model.scenarios.user_combinations = [[0, 0], [3, 1]] otpt = model.nodes['Output'] inpt = model.nodes['Input'] inpt.max_flow = ConstantScenarioParameter(model, scA, [10, 20, 30, 40]) otpt.max_flow = ConstantScenarioParameter(model, scB, [20, 40]) otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f) model.run() for node_name in model.nodes.keys(): ca = h5f.get_node('/', node_name) assert ca.shape == (365, 2) np.testing.assert_allclose(ca[0, ...], [10, 40]) # check combinations table exists combinations = h5f.get_node('/scenario_combinations') for i, comb in enumerate(model.scenarios.user_combinations): row = combinations[i] assert row['A'] == comb[0] assert row['B'] == comb[1]
def test_routes_user_scenarios(self, simple_linear_model, tmpdir): """ Test the TablesRecorder with user defined scenario subset """ from pywr.parameters import ConstantScenarioParameter model = simple_linear_model scA = Scenario(model, name='A', size=4) scB = Scenario(model, name='B', size=2) # Use first and last combinations model.scenarios.user_combinations = [[0, 0], [3, 1]] otpt = model.nodes['Output'] inpt = model.nodes['Input'] inpt.max_flow = ConstantScenarioParameter(model, scA, [10, 20, 30, 40]) otpt.max_flow = ConstantScenarioParameter(model, scB, [20, 40]) otpt.cost = -2.0 h5file = tmpdir.join('output.h5') import tables with tables.open_file(str(h5file), 'w') as h5f: rec = TablesRecorder(model, h5f, routes_flows='flows') model.run() flows = h5f.get_node('/flows') assert flows.shape == (365, 1, 2) np.testing.assert_allclose(flows[0, 0], [10, 40]) # check combinations table exists combinations = h5f.get_node('/scenario_combinations') for i, comb in enumerate(model.scenarios.user_combinations): row = combinations[i] assert row['A'] == comb[0] assert row['B'] == comb[1] # This part of the test requires IPython (see `pywr.notebook`) pytest.importorskip("IPython") # triggers a skip of the test if IPython not found. from pywr.notebook.sankey import routes_to_sankey_links links = routes_to_sankey_links(str(h5file), 'flows') # Value is mean of 10 and 40 link = links[0] assert link['source'] == 'Input' assert link['target'] == 'Output' np.testing.assert_allclose(link['value'], 25.0) links = routes_to_sankey_links(str(h5file), 'flows', scenario_slice=0) link = links[0] assert link['source'] == 'Input' assert link['target'] == 'Output' np.testing.assert_allclose(link['value'], 10.0) links = routes_to_sankey_links(str(h5file), 'flows', scenario_slice=1, time_slice=0) link = links[0] assert link['source'] == 'Input' assert link['target'] == 'Output' np.testing.assert_allclose(link['value'], 40.0)
def run(): # Run the model model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, "thames_output.h5", parameters=[p for p in model.parameters]) # Run the model stats = model.run() print(stats) stats_df = stats.to_dataframe() print(stats_df) keys_to_plot = ( "time_taken_before", "solver_stats.bounds_update_nonstorage", "solver_stats.bounds_update_storage", "solver_stats.objective_update", "solver_stats.lp_solve", "solver_stats.result_update", "time_taken_after", ) keys_to_tabulate = ( "timesteps", "time_taken", "solver", "num_scenarios", "speed", "solver_name" "solver_stats.total", "solver_stats.number_of_rows", "solver_stats.number_of_cols", "solver_stats.number_of_nonzero", "solver_stats.number_of_routes", "solver_stats.number_of_nodes", ) values = [] labels = [] explode = [] solver_sub_total = 0.0 for k in keys_to_plot: v = stats_df.loc[k][0] values.append(v) label = k.split(".", 1)[-1].replace("_", " ").capitalize() explode.append(0.0) if k.startswith("solver_stats"): labels.append("Solver - {}".format(label)) solver_sub_total += v else: labels.append(label) values.append(stats_df.loc["solver_stats.total"][0] - solver_sub_total) labels.append("Solver - Other") explode.append(0.0) values.append(stats_df.loc["time_taken"][0] - sum(values)) values = np.array(values) / sum(values) labels.append("Other") explode.append(0.0) fig, (ax1, ax2) = plt.subplots( figsize=(12, 4), ncols=2, sharey="row", gridspec_kw={"width_ratios": [2, 1]} ) print(values, labels) ax1.pie(values, explode=explode, labels=labels, autopct="%1.1f%%", startangle=90) ax1.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle. cell_text = [] for index, value in stats_df.iterrows(): if index not in keys_to_tabulate: continue v = value[0] if isinstance(v, (float, np.float64, np.float32)): v = f"{v:.2f}" cell_text.append([index, v]) tbl = ax2.table(cellText=cell_text, colLabels=["Statistic", "Value"], loc="center") tbl.scale(1.5, 1.5) # may help tbl.set_fontsize(14) ax2.axis("off") fig.savefig("run_statistics_w_tables.png", dpi=300) fig.savefig("run_statistics_w_tables.eps") plt.show()
def run(): # Run the model model = Model.load(MODEL_FILENAME) # Add a storage recorder TablesRecorder(model, 'thames_output.h5', parameters=[p for p in model.parameters]) # Run the model stats = model.run() print(stats) stats_df = stats.to_dataframe() print(stats_df) keys_to_plot = ( 'time_taken_before', 'solver_stats.bounds_update_nonstorage', 'solver_stats.bounds_update_storage', 'solver_stats.objective_update', 'solver_stats.lp_solve', 'solver_stats.result_update', 'time_taken_after', ) keys_to_tabulate = ( 'timesteps', 'time_taken', 'solver', 'num_scenarios', 'speed', 'solver_name' 'solver_stats.total', 'solver_stats.number_of_rows', 'solver_stats.number_of_cols', 'solver_stats.number_of_nonzero', 'solver_stats.number_of_routes', 'solver_stats.number_of_nodes', ) values = [] labels = [] explode = [] solver_sub_total = 0.0 for k in keys_to_plot: v = stats_df.loc[k][0] values.append(v) label = k.split('.', 1)[-1].replace('_', ' ').capitalize() explode.append(0.0) if k.startswith('solver_stats'): labels.append('Solver - {}'.format(label)) solver_sub_total += v else: labels.append(label) values.append(stats_df.loc['solver_stats.total'][0] - solver_sub_total) labels.append('Solver - Other') explode.append(0.0) values.append(stats_df.loc['time_taken'][0] - sum(values)) values = np.array(values) / sum(values) labels.append('Other') explode.append(0.0) fig, (ax1, ax2) = plt.subplots(figsize=(12, 4), ncols=2, sharey='row', gridspec_kw={'width_ratios': [2, 1]}) print(values, labels) ax1.pie(values, explode=explode, labels=labels, autopct='%1.1f%%', startangle=90) ax1.axis( 'equal') # Equal aspect ratio ensures that pie is drawn as a circle. cell_text = [] for index, value in stats_df.iterrows(): if index not in keys_to_tabulate: continue v = value[0] if isinstance(v, (float, np.float64, np.float32)): v = f'{v:.2f}' cell_text.append([index, v]) tbl = ax2.table(cellText=cell_text, colLabels=['Statistic', 'Value'], loc='center') tbl.scale(1.5, 1.5) # may help tbl.set_fontsize(14) ax2.axis('off') fig.savefig('run_statistics_w_tables.png', dpi=300) fig.savefig('run_statistics_w_tables.eps') plt.show()