Example #1
0
def calc_caloric_production_on_uri_list(input_uri_list, output_csv_uri):
    first_lulc = True
    baseline_calories = 0

    output = []

    for uri in input_uri_list:
        row = []
        row.append(nd.explode_uri(uri)['file_root'])

        af = nd.ArrayFrame(uri)
        # nd.pp(nd.enumerate_array_as_odict(af.data))
        sum_calories = calc_caloric_production_from_lulc(uri)

        row.append(str(sum_calories))

        if first_lulc:
            baseline_calories = sum_calories
            first_lulc = False
        else:
            row.append(str(sum_calories - baseline_calories))

        output.append(row)

    hb.python_object_to_csv(output, output_csv_uri)
def calc_caloric_production_on_uri_list(input_uri_list, output_csv_uri):
    first_lulc = True
    baseline_calories = 0

    output = []

    af_list = []
    for uri in input_uri_list:
        row = []
        row.append(nd.explode_uri(uri)['file_root'])

        af = nd.ArrayFrame(uri)
        sum_calories, af = calc_caloric_production_from_lulc(uri)
        row.append(str(sum_calories))

        # af.show()
        af_list.append(af)
        if first_lulc:
            baseline_calories = sum_calories
            first_lulc = False
        else:
            row.append(str(sum_calories - baseline_calories))

        output.append(row)

    hb.python_object_to_csv(output, output_csv_uri)

    return af_list
Example #3
0
def create_scenario_calorie_csv(input_dir, output_uri):
    scenario_results = [[
        '',
        'carbon',
        'wy',
        'n_export',
        'p_export',
        'sed_export',
        'caloric_production',
        'carbon_diff_from_baseline',
        'wy_diff_from_baseline',
        'n_export_diff_from_baseline',
        'p_export_diff_from_baseline',
        'sed_export_diff_from_baseline',
        'caloric_production_diff_from_baseline',
        'carbon_percent_diff_from_baseline',
        'wy_percent_diff_from_baseline',
        'n_export_percent_diff_from_baseline',
        'p_export_percent_diff_from_baseline',
        'sed_export_percent_diff_from_baseline',
        'caloric_production_percent_diff_from_baseline',
        'carbon_diff_from_baseline',
        'wy_diff_from_baseline',
        'n_export_diff_from_baseline',
        'p_export_diff_from_baseline',
        'sed_export_diff_from_baseline',
        'caloric_production_diff_from_baseline',
        'carbon_percent_diff_from_bau',
        'wy_percent_diff_from_bau',
        'n_export_percent_diff_from_bau',
        'p_export_percent_diff_from_bau',
        'sed_export_percent_diff_from_bau',
        'caloric_production_percent_diff_from_bau',
    ]]

    # Calculate Sum
    baseline_results = []
    bau_results = []
    for scenario in scenarios:
        scenario_result = []
        scenario_results.append(scenario_result)
        scenario_result.append(scenario)

        carbon_result_uri = os.path.join(runs_folder, run_name, scenario,
                                         'carbon/tot_c_cur.tif')

        carbon = nd.ArrayFrame(carbon_result_uri)
        wy = nd.ArrayFrame(
            os.path.join(runs_folder, run_name, scenario,
                         'hydropower_water_yield/output/per_pixel/wyield.tif'))
        n_export = nd.ArrayFrame(
            os.path.join(runs_folder, run_name, scenario, 'ndr/n_export.tif'))
        p_export = nd.ArrayFrame(
            os.path.join(runs_folder, run_name, scenario, 'ndr/p_export.tif'))
        sed_export = nd.ArrayFrame(
            os.path.join(runs_folder, run_name, scenario,
                         'sdr/sed_export.tif'))
        # calories = nd.ArrayFrame(os.path.join(input_dir, scenario, 'caloric_production.tif' ))
        calories = nd.ArrayFrame(
            os.path.join(runs_folder, run_name, scenario,
                         'nutritional_adequacy/caloric_production.tif'))

        if scenario == 'Baseline':
            baseline_results.append(carbon.sum(
            ))  #np.sum(carbon[carbon.data!=carbon.no_data_value])
            baseline_results.append(wy.sum())
            baseline_results.append(n_export.sum())
            baseline_results.append(p_export.sum())
            baseline_results.append(sed_export.sum())
            baseline_results.append(calories.sum())

        # TODO Generalize BAU
        elif scenario == 'Trend':
            bau_results.append(carbon.sum())
            bau_results.append(wy.sum())
            bau_results.append(n_export.sum())
            bau_results.append(p_export.sum())
            bau_results.append(sed_export.sum())
            bau_results.append(calories.sum())

        scenario_result.append(str(float(carbon.sum())))
        scenario_result.append(str(float(wy.sum())))
        scenario_result.append(str(float(n_export.sum())))
        scenario_result.append(str(float(p_export.sum())))
        scenario_result.append(str(float(sed_export.sum())))
        scenario_result.append(str(float(calories.sum())))

        if scenario not in ['Baseline']:
            scenario_result.append(
                str(float(carbon.sum() - baseline_results[0]) * 100))
            scenario_result.append(
                str(float(wy.sum() - baseline_results[1]) * 100))
            scenario_result.append(
                str(float(n_export.sum() - baseline_results[2]) * -1.0 *
                    100))  # -1 means interpret as retention
            scenario_result.append(
                str(float(p_export.sum() - baseline_results[3]) * -1.0 * 100))
            scenario_result.append(
                str(
                    float(sed_export.sum() - baseline_results[4]) * -1.0 *
                    100))
            scenario_result.append(
                str(float(calories.sum() - baseline_results[5]) * 100))

            scenario_result.append(
                str(
                    float((carbon.sum() - baseline_results[0]) /
                          baseline_results[0]) * 100))
            scenario_result.append(
                str(
                    float(
                        (wy.sum() - baseline_results[1]) / baseline_results[1])
                    * 100))
            scenario_result.append(
                str(
                    float((n_export.sum() - baseline_results[2]) /
                          baseline_results[2]) * -1.0 * 100))
            scenario_result.append(
                str(
                    float((p_export.sum() - baseline_results[3]) /
                          baseline_results[3]) * -1.0 * 100))
            scenario_result.append(
                str(
                    float((sed_export.sum() - baseline_results[4]) /
                          baseline_results[4]) * -1.0 * 100))
            scenario_result.append(
                str(
                    float((calories.sum() - baseline_results[5]) /
                          baseline_results[5]) * 100))

        if scenario not in ['Baseline', 'BAU', 'Trend']:
            scenario_result.append(
                str(float(carbon.sum() - bau_results[0]) * 100))
            scenario_result.append(str(float(wy.sum() - bau_results[1]) * 100))
            scenario_result.append(
                str(float(n_export.sum() - bau_results[2]) * -1.0 * 100))
            scenario_result.append(
                str(float(p_export.sum() - bau_results[3]) * -1.0 * 100))
            scenario_result.append(
                str(float(sed_export.sum() - bau_results[4]) * -1.0 * 100))
            scenario_result.append(
                str(float(calories.sum() - bau_results[5]) * 100))

            scenario_result.append(
                str(
                    float((carbon.sum() - bau_results[0]) / bau_results[0]) *
                    100))
            scenario_result.append(
                str(float((wy.sum() - bau_results[1]) / bau_results[1]) * 100))
            scenario_result.append(
                str(
                    float((n_export.sum() - bau_results[2]) / bau_results[2]) *
                    -1.0 * 100))
            scenario_result.append(
                str(
                    float((p_export.sum() - bau_results[3]) / bau_results[3]) *
                    -1.0 * 100))
            scenario_result.append(
                str(
                    float(
                        (sed_export.sum() - bau_results[4]) / bau_results[4]) *
                    -1.0 * 100))
            scenario_result.append(
                str(
                    float((calories.sum() - bau_results[5]) / bau_results[5]) *
                    100))

    nd.pp(scenario_results)

    hazelbean.python_object_to_csv(scenario_results, os.path.join(output_uri))
        sed_export.show(output_uri=os.path.join(runs_folder, run_name,
                                                scenario, 'sed_export.png'))

        scenario_result.append(str(float(carbon.sum())))
        scenario_result.append(str(float(wy.sum())))
        scenario_result.append(str(float(n_export.sum())))
        scenario_result.append(str(float(p_export.sum())))
        scenario_result.append(str(float(sed_export.sum())))

        print('carbon sum: ', carbon.sum())
        print('wy sum: ', scenario, wy.sum())
        print('n_export sum: ', scenario, n_export.sum())
        print('p_export sum: ', scenario, p_export.sum())
        print('sed_export sum: ', scenario, sed_export.sum())

    # differences = []
    # print('scenario_results', scenario_results)
    # for i in range(len(results_names)):
    #     bau_difference = scenario_results[1][i] - scenario_results[0][i]
    #     cons_difference = scenario_results[2][i] - scenario_results[0][i]
    #     differences.append([bau_difference, cons_difference])
    #
    #     print(results_names[i] + ' bau difference ' + str(bau_difference) + ', cons difference ' + str(cons_difference))

    nd.pp(scenario_results)

    csv_output_uri = os.path.join(runs_folder, run_name, 'results.csv')

    hazelbean.python_object_to_csv(scenario_results,
                                   os.path.join(csv_output_uri))
def zonal_statistics_flex(input_raster,
                          zone_vector_path,
                          zone_ids_raster_path=None,
                          id_column_label=None,
                          data_type=None,
                          ndv=None,
                          zones_ndv=None,
                          values_ndv=None,
                          all_touched=None,
                          assert_projections_same=True,
                          use_iterblocks=True,
                          unique_zone_ids=None,
                          csv_output_path=None,
                          verbose=True,
                          rewrite_zone_ids_raster=True):
    """ if zone_ids_raster_path is set, use it and/or create it for later processing speed-ups.

     Still todo, in the case where there is a STR labeled column, generate some ids. For now it HAS to be ints.
     """

    input_path = hb.get_flex_as_path(input_raster)
    base_raster_path_band = (input_path, 1)

    # Test that input_raster and shapefile are in the same projection. Sillyness results if not.
    if assert_projections_same:
        hb.assert_gdal_paths_in_same_projection([input_raster, zone_vector_path])
    else:
        a = hb.assert_gdal_paths_in_same_projection([input_raster, zone_vector_path], return_result=True)
        if not a:
            L.critical('Ran zonal_statistics_flex but the inputs werent in identical projections.')

    # if zone_ids_raster_path is not defined, use the PGP version, which doesn't use a rasterized approach.
    if not zone_ids_raster_path and rewrite_zone_ids_raster is False:
        to_return = pgp.zonal_statistics(
            base_raster_path_band, zone_vector_path,
            aggregate_layer_name=None, ignore_nodata=True,
            polygons_might_overlap=True, working_dir=None)
        if csv_output_path is not None:
            hb.python_object_to_csv(to_return, csv_output_path)
        return to_return

    # if zone_ids_raster_path is defined, then we are using a rasterized approach.
    # NOTE that by construction, this type of zonal statistics cannot handle overlapping polygons (each polygon is just represented by its id int value in the raster).
    else:
        if zones_ndv is None:
            zones_ndv = -9999

        if ndv is not None:
            values_ndv = ndv
        if values_ndv is None:
            values_ndv = hb.get_raster_info_hb(input_raster)['nodata'][0]

            if values_ndv is None:
                values_ndv = -9999.0

        # if zone_ids_raster_path is given, use it to speed up processing (creating it first if it doesnt exist)
        if not os.path.exists(zone_ids_raster_path) and rewrite_zone_ids_raster is not False:
            # Calculate the id raster and save it
            if verbose:
                L.info('Creating id_raster with convert_polygons_to_id_raster')
            hb.convert_polygons_to_id_raster(zone_vector_path, zone_ids_raster_path, input_raster, id_column_label=id_column_label, data_type=data_type,
                                             ndv=zones_ndv, all_touched=all_touched)
        else:
            if verbose:
                L.info('Zone_ids_raster_path existed, so not creating it.')
            # hb.assert_gdal_paths_have_same_geotransform([zone_ids_raster_path, input_raster])
        if verbose:
            L.info('Starting zonal_statistics_rasterized using zone_ids_raster_path at ' + str(zone_ids_raster_path))

        # Call zonal_statistics_rasterized to parse vars into cython-format and go from there.
        unique_ids, sums, counts = hb.zonal_statistics_rasterized(zone_ids_raster_path, input_raster, zones_ndv=zones_ndv, values_ndv=values_ndv,
                                                                  use_iterblocks=use_iterblocks, unique_zone_ids=unique_zone_ids, verbose=verbose)

        df = pd.DataFrame(index=unique_ids, data={'sum': sums})
        print(df)
        df[df == 0] = np.nan
        df.dropna(inplace=True)
        if csv_output_path is not None:
            df.to_csv(csv_output_path)

        # # Convert the raw arrays to a 2d rc odict
        # to_return = OrderedDict()
        # for i in unique_ids:
        #     to_return[i] = {'sum': sums[i], 'count': counts[i]}
        #
        #
        # if csv_output_path is not None:
        #     hb.python_object_to_csv(to_return, csv_output_path, csv_type='cr_2d_odict')
        return df