def calc_caloric_production_on_uri_list(input_uri_list, output_csv_uri): first_lulc = True baseline_calories = 0 output = [] for uri in input_uri_list: row = [] row.append(nd.explode_uri(uri)['file_root']) af = nd.ArrayFrame(uri) # nd.pp(nd.enumerate_array_as_odict(af.data)) sum_calories = calc_caloric_production_from_lulc(uri) row.append(str(sum_calories)) if first_lulc: baseline_calories = sum_calories first_lulc = False else: row.append(str(sum_calories - baseline_calories)) output.append(row) hb.python_object_to_csv(output, output_csv_uri)
def plot_global_production(**kw): af = nd.ArrayFrame(kw['production_per_cell_uri']) af.show(vmin=0, vmax=1500, use_basemap=True, title='Meat Production', cbar_label='Tons per cell')
def calc_caloric_production_on_uri_list(input_uri_list, output_csv_uri): first_lulc = True baseline_calories = 0 output = [] af_list = [] for uri in input_uri_list: row = [] row.append(nd.explode_uri(uri)['file_root']) af = nd.ArrayFrame(uri) sum_calories, af = calc_caloric_production_from_lulc(uri) row.append(str(sum_calories)) # af.show() af_list.append(af) if first_lulc: baseline_calories = sum_calories first_lulc = False else: row.append(str(sum_calories - baseline_calories)) output.append(row) hb.python_object_to_csv(output, output_csv_uri) return af_list
def calc_caloric_production_from_lulc(input_lulc_uri): # Data from Johnson et al 2016. calories_per_cell_uri = os.path.join(ag_dir, 'calories_per_cell.tif') calories_per_cell = nd.ArrayFrame(calories_per_cell_uri) ndv = calories_per_cell.no_data_value # Project the full global to robinson (to match volta projeciton and also to allow np.clip_by_shape) calories_per_cell_projected_uri = os.path.join( project_dir, 'calories_per_cell_projected.tif') if not os.path.exists(calories_per_cell_projected_uri): calories_per_cell_projected = nd.reproject( calories_per_cell, calories_per_cell_projected_uri, epsg_code=54030, no_data_value=ndv) else: calories_per_cell_projected = nd.ArrayFrame( calories_per_cell_projected_uri) # Polygon of the Volta (also in Robinson projection) aoi_uri = 'input/Baseline/PASOS_cuencas_robinson.shp' # Clip the global data to the volta, but keep at 5 min for math summation reasons later clipped_calories_per_5m_cell_uri = 'input/Baseline/clipped_calories_per_5m_cell.tif' if not os.path.exists(clipped_calories_per_5m_cell_uri): clipped_calories_per_5m_cell = calories_per_cell_projected.clip_by_shape( aoi_uri, output_uri=clipped_calories_per_5m_cell_uri, no_data_value=ndv) else: clipped_calories_per_5m_cell = nd.ArrayFrame( clipped_calories_per_5m_cell_uri) # Load the baseline lulc for adjustment factor calculation and as a match_af lulc_uri = 'input/Baseline/lulc.tif' lulc = nd.ArrayFrame(lulc_uri) lulc_ndv = lulc.no_data_value # resample to LULC's resolution. Note that this will change the sum of calories. calories_resampled_uri = 'input/Baseline/calories_resampled.tif' if not os.path.exists(calories_resampled_uri): calories_resampled = clipped_calories_per_5m_cell.resample( lulc, output_uri=calories_resampled_uri, no_data_value=ndv) else: calories_resampled = nd.ArrayFrame(calories_resampled_uri) # Base on teh assumption that full ag is twice as contianing of calroies as mosaic, allocate the # caloric presence to these two ag locations. Note that these are still not scaled, but they are # correct relative to each other. # This simplification means we are doing the equivilent to the invest crop model beacause # the cells to allocate are lower res than the target. unscaled_calories_baseline = np.where(lulc.data == 12, calories_resampled.data, 0) unscaled_calories_baseline = np.where(lulc.data == 14, 0.5 * calories_resampled.data, unscaled_calories_baseline) # Multiply the unscaled calories by this adjustment factor, which is the ratio between the actual calories present # calculated from the 5 min resolution data, and the unscaled. n_calories_present = np.sum(clipped_calories_per_5m_cell) n_unscaled_calories_in_baseline = np.sum(unscaled_calories_baseline) adjustment_factor = n_calories_present / n_unscaled_calories_in_baseline calc_baseline_calories = False if calc_baseline_calories: baseline_calories = unscaled_calories_baseline * adjustment_factor baseline_calories_uri = 'input/Baseline/baseline_calories.tif' # NOTE, this uses numdal in a weired way because it has THREE inputs (of which the last is jammed into kwargs). baseline_calories_af = nd.ArrayFrame(baseline_calories, lulc, output_uri=baseline_calories_uri, data_type=6, no_data_value=ndv) input_lulc = nd.ArrayFrame(input_lulc_uri) unscaled_calories_input_lulc = np.where(input_lulc.data == 12, calories_resampled.data, 0) unscaled_calories_input_lulc = np.where(input_lulc.data == 14, 0.5 * calories_resampled.data, unscaled_calories_input_lulc) output_calories = unscaled_calories_input_lulc * adjustment_factor output_calories_uri = os.path.join( project_dir, 'calories_in_' + nd.explode_uri(input_lulc_uri)['file_root'] + '.tif') output_calories_af = nd.ArrayFrame(output_calories, lulc, data_type=7, no_data_value=ndv, output_uri=output_calories_uri) # output_calories_af.save(output_uri=output_calories_uri) sum_calories = output_calories_af.sum() print('Calories from ' + input_lulc_uri + ': ' + str(sum_calories)) return sum_calories
def create_scenario_calorie_csv(input_dir, output_uri): scenario_results = [[ '', 'carbon', 'wy', 'n_export', 'p_export', 'sed_export', 'caloric_production', 'carbon_diff_from_baseline', 'wy_diff_from_baseline', 'n_export_diff_from_baseline', 'p_export_diff_from_baseline', 'sed_export_diff_from_baseline', 'caloric_production_diff_from_baseline', 'carbon_percent_diff_from_baseline', 'wy_percent_diff_from_baseline', 'n_export_percent_diff_from_baseline', 'p_export_percent_diff_from_baseline', 'sed_export_percent_diff_from_baseline', 'caloric_production_percent_diff_from_baseline', 'carbon_diff_from_baseline', 'wy_diff_from_baseline', 'n_export_diff_from_baseline', 'p_export_diff_from_baseline', 'sed_export_diff_from_baseline', 'caloric_production_diff_from_baseline', 'carbon_percent_diff_from_bau', 'wy_percent_diff_from_bau', 'n_export_percent_diff_from_bau', 'p_export_percent_diff_from_bau', 'sed_export_percent_diff_from_bau', 'caloric_production_percent_diff_from_bau', ]] # Calculate Sum baseline_results = [] bau_results = [] for scenario in scenarios: scenario_result = [] scenario_results.append(scenario_result) scenario_result.append(scenario) carbon_result_uri = os.path.join(runs_folder, run_name, scenario, 'carbon/tot_c_cur.tif') carbon = nd.ArrayFrame(carbon_result_uri) wy = nd.ArrayFrame( os.path.join(runs_folder, run_name, scenario, 'hydropower_water_yield/output/per_pixel/wyield.tif')) n_export = nd.ArrayFrame( os.path.join(runs_folder, run_name, scenario, 'ndr/n_export.tif')) p_export = nd.ArrayFrame( os.path.join(runs_folder, run_name, scenario, 'ndr/p_export.tif')) sed_export = nd.ArrayFrame( os.path.join(runs_folder, run_name, scenario, 'sdr/sed_export.tif')) # calories = nd.ArrayFrame(os.path.join(input_dir, scenario, 'caloric_production.tif' )) calories = nd.ArrayFrame( os.path.join(runs_folder, run_name, scenario, 'nutritional_adequacy/caloric_production.tif')) if scenario == 'Baseline': baseline_results.append(carbon.sum( )) #np.sum(carbon[carbon.data!=carbon.no_data_value]) baseline_results.append(wy.sum()) baseline_results.append(n_export.sum()) baseline_results.append(p_export.sum()) baseline_results.append(sed_export.sum()) baseline_results.append(calories.sum()) # TODO Generalize BAU elif scenario == 'Trend': bau_results.append(carbon.sum()) bau_results.append(wy.sum()) bau_results.append(n_export.sum()) bau_results.append(p_export.sum()) bau_results.append(sed_export.sum()) bau_results.append(calories.sum()) scenario_result.append(str(float(carbon.sum()))) scenario_result.append(str(float(wy.sum()))) scenario_result.append(str(float(n_export.sum()))) scenario_result.append(str(float(p_export.sum()))) scenario_result.append(str(float(sed_export.sum()))) scenario_result.append(str(float(calories.sum()))) if scenario not in ['Baseline']: scenario_result.append( str(float(carbon.sum() - baseline_results[0]) * 100)) scenario_result.append( str(float(wy.sum() - baseline_results[1]) * 100)) scenario_result.append( str(float(n_export.sum() - baseline_results[2]) * -1.0 * 100)) # -1 means interpret as retention scenario_result.append( str(float(p_export.sum() - baseline_results[3]) * -1.0 * 100)) scenario_result.append( str( float(sed_export.sum() - baseline_results[4]) * -1.0 * 100)) scenario_result.append( str(float(calories.sum() - baseline_results[5]) * 100)) scenario_result.append( str( float((carbon.sum() - baseline_results[0]) / baseline_results[0]) * 100)) scenario_result.append( str( float( (wy.sum() - baseline_results[1]) / baseline_results[1]) * 100)) scenario_result.append( str( float((n_export.sum() - baseline_results[2]) / baseline_results[2]) * -1.0 * 100)) scenario_result.append( str( float((p_export.sum() - baseline_results[3]) / baseline_results[3]) * -1.0 * 100)) scenario_result.append( str( float((sed_export.sum() - baseline_results[4]) / baseline_results[4]) * -1.0 * 100)) scenario_result.append( str( float((calories.sum() - baseline_results[5]) / baseline_results[5]) * 100)) if scenario not in ['Baseline', 'BAU', 'Trend']: scenario_result.append( str(float(carbon.sum() - bau_results[0]) * 100)) scenario_result.append(str(float(wy.sum() - bau_results[1]) * 100)) scenario_result.append( str(float(n_export.sum() - bau_results[2]) * -1.0 * 100)) scenario_result.append( str(float(p_export.sum() - bau_results[3]) * -1.0 * 100)) scenario_result.append( str(float(sed_export.sum() - bau_results[4]) * -1.0 * 100)) scenario_result.append( str(float(calories.sum() - bau_results[5]) * 100)) scenario_result.append( str( float((carbon.sum() - bau_results[0]) / bau_results[0]) * 100)) scenario_result.append( str(float((wy.sum() - bau_results[1]) / bau_results[1]) * 100)) scenario_result.append( str( float((n_export.sum() - bau_results[2]) / bau_results[2]) * -1.0 * 100)) scenario_result.append( str( float((p_export.sum() - bau_results[3]) / bau_results[3]) * -1.0 * 100)) scenario_result.append( str( float( (sed_export.sum() - bau_results[4]) / bau_results[4]) * -1.0 * 100)) scenario_result.append( str( float((calories.sum() - bau_results[5]) / bau_results[5]) * 100)) nd.pp(scenario_results) hazelbean.python_object_to_csv(scenario_results, os.path.join(output_uri))
] run_name = 'r3' scenarios = [ "Baseline", "Trend", "ILM", "TAR", ] overall_results_dir = os.path.join(runs_folder, run_name) match_uri = os.path.join(runs_folder, run_name, 'Baseline', 'carbon/tot_c_cur.tif') match = nd.ArrayFrame(match_uri) # results_csv_uri = os.path.join(runs_folder, run_name, 'calories_results.csv') do_create_scenario_calorie_csv = 1 if do_create_scenario_calorie_csv: create_scenario_calorie_csv(input_dir, results_csv_uri) do_create_percent_difference_from_baseline_bar_chart = 1 if do_create_percent_difference_from_baseline_bar_chart: difference_from_baseline_csv_uri = os.path.join( runs_folder, run_name, 'proportion_differences_from_bau.csv') difference_from_baseline_barchart_uri = os.path.join( runs_folder, run_name, 'difference_from_baseline.png') create_percent_difference_from_baseline_bar_chart( results_csv_uri, difference_from_baseline_barchart_uri)
def write_pasture_production_to_raster(**kw): L.info('Running write_pasture_production_to_raster') ids = nd.ArrayFrame(kw['country_ids_raster_uri']) ids = ids.set_data_type(7) ids = ids.set_no_data_value(-9999.0) ids_present = nd.get_value_count_odict_from_array(ids.data) df = pd.read_csv(kw['pasture_csv_uri']) rules = dict(zip(df['id'], df['Value'])) # For countries that are not in the database, we don't want to write the id value and instead want zero. for k, v in ids_present.items(): if k not in rules: rules[k] = 0 # Write the values in the rules (Production tons) to the locations of the countrys. production_by_country_array = nd.reclassify_int_array_by_dict_to_floats( ids.data.astype(np.int), rules).astype(np.float64) production_by_country = nd.ArrayFrame( production_by_country_array, ids, data_type=7, output_uri=kw['production_by_country_uri']) proportion_pasture = nd.ArrayFrame(kw['proportion_pasture_uri']) global_production = np.zeros(proportion_pasture.shape) for country_id, production_total in rules.items(): L.info(str(country_id) + ', ' + str(production_total)) if production_total > 0: proportion_in_country = np.where(ids.data == country_id, proportion_pasture.data, 0) total_proportion_in_country = np.sum(proportion_in_country) if total_proportion_in_country > 0: L.info('production_total ' + str(production_total)) L.info('total_proportion_in_country ' + str(total_proportion_in_country)) production_per_proportion = production_total / total_proportion_in_country L.info('production_per_proportion ' + str(production_per_proportion)) # production = production_per_proportion * proportion_pasture.data production = np.where( ids.data == country_id, production_per_proportion * proportion_pasture.data, 0) L.info('production ' + str(np.sum(production))) global_production += production L.info('global_production ' + str(np.sum(global_production))) global_production_af = nd.ArrayFrame( global_production, proportion_pasture, output_uri=kw['production_per_cell_uri']) return kw
def calc_caloric_production_from_lulc_uri(input_lulc_uri, aoi_uri, output_uri): # First check that the required files exist, creating them if not. # Data from Johnson et al 2016. working_dir = os.path.split(os.path.split(input_lulc_uri)[0])[0] baseline_dir = os.path.join(working_dir, 'Baseline') scenario_dir = os.path.split(input_lulc_uri)[0] calories_resampled_uri = os.path.join(scenario_dir, 'calories_per_ha_2000.tif') if not os.path.exists(calories_resampled_uri): # Get global 5m calories map from base data calories_per_cell_uri = os.path.join(ag_dir, 'calories_per_cell.tif') calories_per_cell = nd.ArrayFrame(calories_per_cell_uri) ndv = calories_per_cell.no_data_value # Project the full global map to projection of lulc calories_per_cell_projected_uri = os.path.join(baseline_dir, 'calories_per_cell_projected.tif') output_wkt = nd.get_projection_from_uri(input_lulc_uri) # print('projection', projection) # output_wkt = projection.ExportToWkt() calories_per_cell_projected = nd.reproject(calories_per_cell, calories_per_cell_projected_uri, output_wkt=output_wkt, no_data_value=ndv) # Clip the global data to the project aoi, but keep at 5 min for math summation reasons later clipped_calories_per_5m_cell_uri = 'input/Baseline/clipped_calories_per_5m_cell.tif' clipped_calories_per_5m_cell = calories_per_cell_projected.clip_by_shape(aoi_uri, output_uri=clipped_calories_per_5m_cell_uri, no_data_value=ndv) # Load the baseline lulc for adjustment factor calculation and as a match_af baseline_lulc_uri = os.path.join(baseline_dir, 'lulc.tif') baseline_lulc = nd.ArrayFrame(baseline_lulc_uri) input_lulc = nd.ArrayFrame(input_lulc_uri) # Resample baseline lulc to the intput_lulc (a slight size change happens with the scenario generator) baseline_resampled_lulc = baseline_lulc.resample(input_lulc, discard_at_exit=True) # Resample calories to lulc calories_resampled = clipped_calories_per_5m_cell.resample(input_lulc, output_uri=calories_resampled_uri, no_data_value=ndv) calories_resampled = None input_lulc = nd.ArrayFrame(input_lulc_uri) ndv = input_lulc.no_data_value calories_resampled = nd.ArrayFrame(calories_resampled_uri) baseline_lulc_uri = os.path.join(baseline_dir, 'lulc.tif') baseline_lulc = nd.ArrayFrame(baseline_lulc_uri) baseline_resampled_lulc = baseline_lulc.resample(input_lulc, discard_at_exit=True) # baseline_lulc_uri = os.path.join(baseline_dir, 'lulc.tif') # print('baseline_lulc_uri', baseline_lulc_uri) # baseline_lulc = nd.ArrayFrame(baseline_lulc_uri) clipped_calories_per_5m_cell_uri = 'input/Baseline/clipped_calories_per_5m_cell.tif' clipped_calories_per_5m_cell = nd.ArrayFrame(clipped_calories_per_5m_cell_uri) # Base on teh assumption that full ag is twice as contianing of calroies as mosaic, allocate the # caloric presence to these two ag locations. Note that these are still not scaled, but they are # correct relative to each other. # This simplification means we are doing the equivilent to the invest crop model beacause # the cells to allocate are lower res than the target. unscaled_calories_baseline = np.where(baseline_resampled_lulc.data == 12, calories_resampled.data, 0) unscaled_calories_baseline = np.where(baseline_resampled_lulc.data == 14, 0.5 * calories_resampled.data, unscaled_calories_baseline) # Multiply the unscaled calories by this adjustment factor, which is the ratio between the actual calories present # calculated from the 5 min resolution data, and the unscaled. n_calories_present = np.sum(clipped_calories_per_5m_cell) n_unscaled_calories_in_baseline = np.sum(unscaled_calories_baseline) adjustment_factor = n_calories_present / n_unscaled_calories_in_baseline unscaled_calories_input_lulc = np.where(input_lulc.data == 12, calories_resampled.data, 0) unscaled_calories_input_lulc = np.where(input_lulc.data == 14, 0.5 * calories_resampled.data, unscaled_calories_input_lulc) output_calories = unscaled_calories_input_lulc * adjustment_factor output_calories_af = nd.ArrayFrame(output_calories, input_lulc, data_type=6, no_data_value=ndv, output_uri=output_uri) print('Sum of ' + output_calories_af.uri + ': ' + str(output_calories_af.sum()))
def create_scenario_outputs_csv(**kw): input_dir = kw.get('input_dir') runs_dir = kw.get('runs_dir') run_name = kw.get('run_name') current_run_dir = kw.get('current_run_dir') scenario_outputs_csv_uri = kw.get('scenario_outputs_csv_uri') scenarios = kw.get('scenario_names') # TODO Generalize scenario_results = [[ '', 'carbon', 'wy', 'n_export', 'p_export', 'sed_export', 'caloric_production', 'carbon_diff_from_baseline', 'wy_diff_from_baseline', 'n_export_diff_from_baseline', 'p_export_diff_from_baseline', 'sed_export_diff_from_baseline', 'caloric_production_diff_from_baseline', 'carbon_percent_diff_from_baseline', 'wy_percent_diff_from_baseline', 'n_export_percent_diff_from_baseline', 'p_export_percent_diff_from_baseline', 'sed_export_percent_diff_from_baseline', 'caloric_production_percent_diff_from_baseline', 'carbon_diff_from_baseline', 'wy_diff_from_baseline', 'n_export_diff_from_baseline', 'p_export_diff_from_baseline', 'sed_export_diff_from_baseline', 'caloric_production_diff_from_baseline', 'carbon_percent_diff_from_bau', 'wy_percent_diff_from_bau', 'n_export_percent_diff_from_bau', 'p_export_percent_diff_from_bau', 'sed_export_percent_diff_from_bau', 'caloric_production_percent_diff_from_bau', ]] # Calculate Sum baseline_results = [] bau_results = [] for scenario in scenarios: print('Adding ', scenario, ' to output csv.') scenario_result = [] scenario_results.append(scenario_result) scenario_result.append(scenario) carbon_result_uri = os.path.join(runs_dir, run_name, scenario, 'carbon/tot_c_cur.tif') carbon = nd.ArrayFrame(carbon_result_uri) wy = nd.ArrayFrame( os.path.join(runs_dir, run_name, scenario, 'hydropower_water_yield/output/per_pixel/wyield.tif')) n_export = nd.ArrayFrame( os.path.join(runs_dir, run_name, scenario, 'ndr/n_export.tif')) p_export = nd.ArrayFrame( os.path.join(runs_dir, run_name, scenario, 'ndr/p_export.tif')) sed_export = nd.ArrayFrame( os.path.join(runs_dir, run_name, scenario, 'sdr/sed_export.tif')) calories = nd.ArrayFrame( os.path.join(input_dir, scenario, 'caloric_production.tif')) if scenario == kw['baseline_scenario_name']: baseline_results.append(carbon.sum( )) # np.sum(carbon[carbon.data!=carbon.no_data_value]) baseline_results.append(wy.sum()) baseline_results.append(n_export.sum()) baseline_results.append(p_export.sum()) baseline_results.append(sed_export.sum()) baseline_results.append(calories.sum()) elif scenario == kw['bau_scenario_name']: bau_results.append(carbon.sum()) bau_results.append(wy.sum()) bau_results.append(n_export.sum()) bau_results.append(p_export.sum()) bau_results.append(sed_export.sum()) bau_results.append(calories.sum()) scenario_result.append(str(float(carbon.sum()))) scenario_result.append(str(float(wy.sum()))) scenario_result.append(str(float(n_export.sum()))) scenario_result.append(str(float(p_export.sum()))) scenario_result.append(str(float(sed_export.sum()))) scenario_result.append(str(float(calories.sum()))) if scenario not in [kw['baseline_scenario_name']]: scenario_result.append( str(float(carbon.sum() - baseline_results[0]) * 100)) scenario_result.append( str(float(wy.sum() - baseline_results[1]) * 100)) scenario_result.append( str(float(n_export.sum() - baseline_results[2]) * -1.0 * 100)) # -1 means interpret as retention scenario_result.append( str(float(p_export.sum() - baseline_results[3]) * -1.0 * 100)) scenario_result.append( str( float(sed_export.sum() - baseline_results[4]) * -1.0 * 100)) scenario_result.append( str(float(calories.sum() - baseline_results[5]) * 100)) scenario_result.append( str( float((carbon.sum() - baseline_results[0]) / baseline_results[0]) * 100)) scenario_result.append( str( float( (wy.sum() - baseline_results[1]) / baseline_results[1]) * 100)) scenario_result.append( str( float((n_export.sum() - baseline_results[2]) / baseline_results[2]) * -1.0 * 100)) scenario_result.append( str( float((p_export.sum() - baseline_results[3]) / baseline_results[3]) * -1.0 * 100)) scenario_result.append( str( float((sed_export.sum() - baseline_results[4]) / baseline_results[4]) * -1.0 * 100)) scenario_result.append( str( float((calories.sum() - baseline_results[5]) / baseline_results[5]) * 100)) if scenario not in [ kw['baseline_scenario_name'], kw['bau_scenario_name'] ]: scenario_result.append( str(float(carbon.sum() - bau_results[0]) * 100)) scenario_result.append(str(float(wy.sum() - bau_results[1]) * 100)) scenario_result.append( str(float(n_export.sum() - bau_results[2]) * -1.0 * 100)) scenario_result.append( str(float(p_export.sum() - bau_results[3]) * -1.0 * 100)) scenario_result.append( str(float(sed_export.sum() - bau_results[4]) * -1.0 * 100)) scenario_result.append( str(float(calories.sum() - bau_results[5]) * 100)) scenario_result.append( str( float((carbon.sum() - bau_results[0]) / bau_results[0]) * 100)) scenario_result.append( str(float((wy.sum() - bau_results[1]) / bau_results[1]) * 100)) scenario_result.append( str( float((n_export.sum() - bau_results[2]) / bau_results[2]) * -1.0 * 100)) scenario_result.append( str( float((p_export.sum() - bau_results[3]) / bau_results[3]) * -1.0 * 100)) scenario_result.append( str( float( (sed_export.sum() - bau_results[4]) / bau_results[4]) * -1.0 * 100)) scenario_result.append( str( float((calories.sum() - bau_results[5]) / bau_results[5]) * 100)) scenario_result.to_file return kw
runs_folder = os.path.join(output_folder, 'runs') results_names = ['carbon', 'wy', 'n_export', 'p_export', 'sed_export'] run_name = 'full2' scenarios = ['Baseline', 'Trend2030', 'ILM2030'] overall_results_dir = os.path.join(runs_folder, run_name) do_marginal_ranking = False if do_marginal_ranking: carbon_result_uri = os.path.join(runs_folder, run_name, 'Baseline', 'carbon/tot_c_cur.tif') carbon = nd.ArrayFrame(carbon_result_uri) # carbon.show(output_uri=os.path.join(runs_folder, run_name, 'Baseline', 'tot_c_cur.png')) wy = nd.ArrayFrame( os.path.join(runs_folder, run_name, 'Baseline', 'hydropower_water_yield/output/per_pixel/wyield.tif')) # wy.show(output_uri=os.path.join(runs_folder, run_name, 'Baseline', 'wyield.png')) n_export = nd.ArrayFrame( os.path.join(runs_folder, run_name, 'Baseline', 'ndr/n_export.tif')) # n_export.show(output_uri=os.path.join(runs_folder, run_name, 'Baseline', 'n_export.png')) p_export = nd.ArrayFrame( os.path.join(runs_folder, run_name, 'Baseline', 'ndr/p_export.tif')) # p_export.show(output_uri=os.path.join(runs_folder, run_name, 'Baseline', 'p_export.png'))