def test_aggregate_raster_values_all_polys(self):
        """PyGeoprocessing: test aggregate raster values as a lump."""
        import pygeoprocessing

        base_raster_path = os.path.join(TEST_DATA,
                                        'aggregate_raster_values_data',
                                        'base_raster.tif')
        shapefile_path = os.path.join(TEST_DATA,
                                      'aggregate_raster_values_data',
                                      'watershed.shp')

        result = pygeoprocessing.aggregate_raster_values_uri(
            base_raster_path, shapefile_path)

        expected_result = pygeoprocessing.AggregatedValues(
            total={9999: 398428.0},
            pixel_mean={9999: 1.0},
            hectare_mean={9999: 11.111194773692587},
            n_pixels={9999: 398428.0},
            pixel_min={9999: 1.0},
            pixel_max={9999: 1.0})

        for metric in [
                'total', 'pixel_mean', 'hectare_mean', 'n_pixels', 'pixel_min',
                'pixel_max'
        ]:
            _assert_deep_almost_equal(self,
                                      getattr(expected_result, metric),
                                      getattr(result, metric),
                                      places=6)
    def test_aggregate_raster_values_include_nodata(self):
        """PyGeoprocessing: test aggregate raster values, include nodata."""
        import pygeoprocessing

        base_raster_path = os.path.join(TEST_DATA,
                                        'aggregate_raster_values_data',
                                        'base_raster.tif')

        shapefile_path = os.path.join(TEST_DATA,
                                      'aggregate_raster_values_data',
                                      'overlap_watershed.shp')

        result = pygeoprocessing.aggregate_raster_values_uri(
            base_raster_path,
            shapefile_path,
            shapefile_field='DN',
            all_touched=False,
            polygons_might_overlap=True,
            ignore_nodata=False)

        expected_result = pygeoprocessing.AggregatedValues(
            total={
                1: 3.0,
                2: 398425.0,
                3: 5.0
            },
            pixel_mean={
                1: 1.0,
                2: 1.0,
                3: 0.41666666666666669
            },
            hectare_mean={
                1: 11.111111110950143,
                2: 11.111111110937156,
                3: 3.6282805923682009
            },
            n_pixels={
                1: 3.0,
                2: 398425.0,
                3: 12.0
            },
            pixel_min={
                1: 1.0,
                2: 1.0,
                3: 1.0
            },
            pixel_max={
                1: 1.0,
                2: 1.0,
                3: 1.0
            })

        for metric in [
                'total', 'pixel_mean', 'hectare_mean', 'n_pixels', 'pixel_min',
                'pixel_max'
        ]:
            _assert_deep_almost_equal(self,
                                      getattr(expected_result, metric),
                                      getattr(result, metric),
                                      places=6)
    def test_aggregate_raster_bad_fid_type(self):
        """PyGeoprocessing: test aggregate raster bad fieldtype."""
        import pygeoprocessing

        base_raster_path = os.path.join(TEST_DATA,
                                        'aggregate_raster_values_data',
                                        'base_raster.tif')

        shapefile_path = os.path.join(TEST_DATA,
                                      'aggregate_raster_values_data',
                                      'watershed.shp')

        with self.assertRaises(TypeError):
            pygeoprocessing.aggregate_raster_values_uri(
                base_raster_path,
                shapefile_path,
                shapefile_field='stringfiel',
                all_touched=True,
                polygons_might_overlap=True)
    def test_aggregate_raster_values_missing_fid(self):
        """PyGeoprocessing: test aggregate raster field id incorrect."""
        import pygeoprocessing

        base_raster_path = os.path.join(TEST_DATA,
                                        'aggregate_raster_values_data',
                                        'base_raster.tif')

        shapefile_path = os.path.join(TEST_DATA,
                                      'aggregate_raster_values_data',
                                      'watershed.shp')

        with self.assertRaises(AttributeError):
            pygeoprocessing.aggregate_raster_values_uri(
                base_raster_path,
                shapefile_path,
                shapefile_field='badname',
                all_touched=True,
                polygons_might_overlap=True)
Example #5
0
def execute(args):
    """Main entry point for GLOBIO model.

        The model operates in two modes.  Mode (a) generates a landcover map
            based on a base landcover map and information about crop yields,
            infrastructure, and more.  Mode (b) assumes the globio landcover
            map is generated.  These modes are used below to describe input
            parameters.

        args['workspace_dir'] - (string) output directory for intermediate,
            temporary, and final files
        args['predefined_globio'] - (boolean) if True then "mode (b)" else
            "mode (a)"
        args['results_suffix'] - (optional) (string) string to append to any
            output files
        args['lulc_uri'] - (string) used in "mode (a)" path to a base landcover
            map with integer codes
        args['lulc_to_globio_table_uri'] - (string) used in "mode (a)" path to
            table that translates the land-cover args['lulc_uri'] to
            intermediate GLOBIO classes, from which they will be further
            differentiated using the additional data in the model.

                'lucode': Land use and land cover class code of the dataset
                    used. LULC codes match the 'values' column in the LULC
                    raster of mode (b) and must be numeric and unique.
                'globio_lucode': The LULC code corresponding to the GLOBIO class
                    to which it should be converted, using intermediate codes
                    described in the example below.

        args['infrastructure_dir'] - (string) used in "mode (a)" a path to a
            folder containing maps of any forms of infrastructure to
            consider in the calculation of MSAI. These data may be in either
            raster or vector format.
        args['pasture_uri'] - (string) used in "mode (a)" path to pasture raster
        args['potential_vegetation_uri'] - (string) used in "mode (a)" path to
            potential vegetation raster
        args['intensification_uri'] - (string) used in "mode (a)" a path to
            intensification raster
        args['pasture_threshold'] - (float) used in "mode (a)"
        args['intensification_threshold'] - (float) used in "mode (a)"
        args['primary_threshold'] - (float) used in "mode (a)"
        args['msa_parameters_uri'] - (string) path to MSA classification
            parameters
        args['aoi_uri'] - (string) (optional) if it exists then final MSA raster
            is summarized by AOI
        args['globio_lulc_uri'] - (string) used in "mode (b)" path to predefined
            globio raster.
    """

    msa_parameter_table = load_msa_parameter_table(args['msa_parameters_uri'])

    #append a _ to the suffix if it's not empty and doens't already have one
    try:
        file_suffix = args['results_suffix']
        if file_suffix != "" and not file_suffix.startswith('_'):
            file_suffix = '_' + file_suffix
    except KeyError:
        file_suffix = ''

    #create working directories
    output_dir = os.path.join(args['workspace_dir'], 'output')
    intermediate_dir = os.path.join(args['workspace_dir'], 'intermediate')
    tmp_dir = os.path.join(args['workspace_dir'], 'tmp')

    pygeoprocessing.geoprocessing.create_directories(
        [output_dir, intermediate_dir, tmp_dir])

    #the cell size should be based on the landcover map
    if not args['predefined_globio']:
        out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
            args['lulc_uri'])
        globio_lulc_uri = _calculate_globio_lulc_map(args, file_suffix,
                                                     intermediate_dir, tmp_dir,
                                                     out_pixel_size)
    else:
        out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
            args['globio_lulc_uri'])
        LOGGER.info('no need to calcualte GLOBIO LULC because it is passed in')
        globio_lulc_uri = args['globio_lulc_uri']

    globio_nodata = pygeoprocessing.get_nodata_from_uri(globio_lulc_uri)

    #load the infrastructure layers from disk
    infrastructure_filenames = []
    infrastructure_nodata_list = []
    for root_directory, _, filename_list in os.walk(
            args['infrastructure_dir']):

        for filename in filename_list:
            if filename.lower().endswith(".tif"):
                infrastructure_filenames.append(
                    os.path.join(root_directory, filename))
                infrastructure_nodata_list.append(
                    pygeoprocessing.geoprocessing.get_nodata_from_uri(
                        infrastructure_filenames[-1]))
            if filename.lower().endswith(".shp"):
                infrastructure_tmp_raster = (os.path.join(
                    tmp_dir, os.path.basename(filename.lower() + ".tif")))
                pygeoprocessing.geoprocessing.new_raster_from_base_uri(
                    globio_lulc_uri,
                    infrastructure_tmp_raster,
                    'GTiff',
                    -1.0,
                    gdal.GDT_Int32,
                    fill_value=0)
                pygeoprocessing.geoprocessing.rasterize_layer_uri(
                    infrastructure_tmp_raster,
                    os.path.join(root_directory, filename),
                    burn_values=[1],
                    option_list=["ALL_TOUCHED=TRUE"])
                infrastructure_filenames.append(infrastructure_tmp_raster)
                infrastructure_nodata_list.append(
                    pygeoprocessing.geoprocessing.get_nodata_from_uri(
                        infrastructure_filenames[-1]))

    if len(infrastructure_filenames) == 0:
        raise ValueError(
            "infrastructure directory didn't have any GeoTIFFS or "
            "Shapefiles at %s", args['infrastructure_dir'])

    infrastructure_nodata = -1
    infrastructure_uri = os.path.join(
        intermediate_dir, 'combined_infrastructure%s.tif' % file_suffix)

    def _collapse_infrastructure_op(*infrastructure_array_list):
        """Combines all input infrastructure into a single map where if any
            pixel on the stack is 1 gets passed through, any nodata pixel
            masks out all of them"""
        nodata_mask = (
            infrastructure_array_list[0] == infrastructure_nodata_list[0])
        infrastructure_result = infrastructure_array_list[0] > 0
        for index in range(1, len(infrastructure_array_list)):
            current_nodata = (infrastructure_array_list[index] ==
                              infrastructure_nodata_list[index])

            infrastructure_result = (infrastructure_result | (
                (infrastructure_array_list[index] > 0) & ~current_nodata))

            nodata_mask = (nodata_mask & current_nodata)

        return numpy.where(nodata_mask, infrastructure_nodata,
                           infrastructure_result)

    LOGGER.info('collapse infrastructure into one raster')
    pygeoprocessing.geoprocessing.vectorize_datasets(
        infrastructure_filenames,
        _collapse_infrastructure_op,
        infrastructure_uri,
        gdal.GDT_Byte,
        infrastructure_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    #calc_msa_f
    primary_veg_mask_uri = os.path.join(tmp_dir,
                                        'primary_veg_mask%s.tif' % file_suffix)
    primary_veg_mask_nodata = -1

    def _primary_veg_mask_op(lulc_array):
        """masking out natural areas"""
        nodata_mask = lulc_array == globio_nodata
        result = (lulc_array == 1)
        return numpy.where(nodata_mask, primary_veg_mask_nodata, result)

    LOGGER.info("create mask of primary veg areas")
    pygeoprocessing.geoprocessing.vectorize_datasets(
        [globio_lulc_uri],
        _primary_veg_mask_op,
        primary_veg_mask_uri,
        gdal.GDT_Int32,
        primary_veg_mask_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    LOGGER.info('gaussian filter primary veg')
    sigma = 9.0
    gaussian_kernel_uri = os.path.join(tmp_dir,
                                       'gaussian_kernel%s.tif' % file_suffix)
    make_gaussian_kernel_uri(sigma, gaussian_kernel_uri)
    smoothed_primary_veg_mask_uri = os.path.join(
        tmp_dir, 'smoothed_primary_veg_mask%s.tif' % file_suffix)
    pygeoprocessing.geoprocessing.convolve_2d_uri(
        primary_veg_mask_uri, gaussian_kernel_uri,
        smoothed_primary_veg_mask_uri)

    primary_veg_smooth_uri = os.path.join(
        intermediate_dir, 'primary_veg_smooth%s.tif' % file_suffix)

    def _primary_veg_smooth_op(primary_veg_mask_array,
                               smoothed_primary_veg_mask):
        """mask out ffqi only where there's an ffqi"""
        return numpy.where(primary_veg_mask_array != primary_veg_mask_nodata,
                           primary_veg_mask_array * smoothed_primary_veg_mask,
                           primary_veg_mask_nodata)

    LOGGER.info('calculate primary_veg_smooth')
    pygeoprocessing.geoprocessing.vectorize_datasets(
        [primary_veg_mask_uri, smoothed_primary_veg_mask_uri],
        _primary_veg_smooth_op,
        primary_veg_smooth_uri,
        gdal.GDT_Float32,
        primary_veg_mask_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    msa_nodata = -1

    msa_f_table = msa_parameter_table['msa_f']
    msa_f_values = sorted(msa_f_table)

    def _msa_f_op(primary_veg_smooth):
        """calcualte msa fragmentation"""
        nodata_mask = primary_veg_mask_nodata == primary_veg_smooth

        msa_f = numpy.empty(primary_veg_smooth.shape)

        for value in reversed(msa_f_values):
            #special case if it's a > or < value
            if value == '>':
                msa_f[primary_veg_smooth > msa_f_table['>'][0]] = (
                    msa_f_table['>'][1])
            elif value == '<':
                continue
            else:
                msa_f[primary_veg_smooth <= value] = msa_f_table[value]

        if '<' in msa_f_table:
            msa_f[primary_veg_smooth < msa_f_table['<'][0]] = (
                msa_f_table['<'][1])

        msa_f[nodata_mask] = msa_nodata

        return msa_f

    LOGGER.info('calculate msa_f')
    msa_f_uri = os.path.join(output_dir, 'msa_f%s.tif' % file_suffix)
    pygeoprocessing.geoprocessing.vectorize_datasets(
        [primary_veg_smooth_uri],
        _msa_f_op,
        msa_f_uri,
        gdal.GDT_Float32,
        msa_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    #calc_msa_i
    msa_f_values = sorted(msa_f_table)
    msa_i_other_table = msa_parameter_table['msa_i_other']
    msa_i_primary_table = msa_parameter_table['msa_i_primary']
    msa_i_other_values = sorted(msa_i_other_table)
    msa_i_primary_values = sorted(msa_i_primary_table)

    def _msa_i_op(lulc_array, distance_to_infrastructure):
        """calculate msa infrastructure"""

        distance_to_infrastructure *= out_pixel_size  #convert to meters
        msa_i_primary = numpy.empty(lulc_array.shape)
        msa_i_other = numpy.empty(lulc_array.shape)

        for value in reversed(msa_i_primary_values):
            #special case if it's a > or < value
            if value == '>':
                msa_i_primary[distance_to_infrastructure >
                              msa_i_primary_table['>'][0]] = (
                                  msa_i_primary_table['>'][1])
            elif value == '<':
                continue
            else:
                msa_i_primary[distance_to_infrastructure <= value] = (
                    msa_i_primary_table[value])

        if '<' in msa_i_primary_table:
            msa_i_primary[distance_to_infrastructure < msa_i_primary_table['<']
                          [0]] = (msa_i_primary_table['<'][1])

        for value in reversed(msa_i_other_values):
            #special case if it's a > or < value
            if value == '>':
                msa_i_other[distance_to_infrastructure > msa_i_other_table['>']
                            [0]] = (msa_i_other_table['>'][1])
            elif value == '<':
                continue
            else:
                msa_i_other[distance_to_infrastructure <= value] = (
                    msa_i_other_table[value])

        if '<' in msa_i_other_table:
            msa_i_other[distance_to_infrastructure < msa_i_other_table['<']
                        [0]] = (msa_i_other_table['<'][1])

        msa_i = numpy.where((lulc_array >= 1) & (lulc_array <= 5),
                            msa_i_primary, 1.0)
        msa_i = numpy.where((lulc_array >= 6) & (lulc_array <= 12),
                            msa_i_other, msa_i)
        return msa_i

    LOGGER.info('calculate msa_i')
    distance_to_infrastructure_uri = os.path.join(
        intermediate_dir, 'distance_to_infrastructure%s.tif' % file_suffix)
    pygeoprocessing.geoprocessing.distance_transform_edt(
        infrastructure_uri, distance_to_infrastructure_uri)
    msa_i_uri = os.path.join(output_dir, 'msa_i%s.tif' % file_suffix)
    pygeoprocessing.geoprocessing.vectorize_datasets(
        [globio_lulc_uri, distance_to_infrastructure_uri],
        _msa_i_op,
        msa_i_uri,
        gdal.GDT_Float32,
        msa_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    #calc_msa_lu
    msa_lu_uri = os.path.join(output_dir, 'msa_lu%s.tif' % file_suffix)
    LOGGER.info('calculate msa_lu')
    pygeoprocessing.geoprocessing.reclassify_dataset_uri(
        globio_lulc_uri,
        msa_parameter_table['msa_lu'],
        msa_lu_uri,
        gdal.GDT_Float32,
        globio_nodata,
        exception_flag='values_required')

    LOGGER.info('calculate msa')
    msa_uri = os.path.join(output_dir, 'msa%s.tif' % file_suffix)

    def _msa_op(msa_f, msa_lu, msa_i):
        """Calculate the MSA which is the product of the sub msas"""
        return numpy.where(msa_f != globio_nodata, msa_f * msa_lu * msa_i,
                           globio_nodata)

    pygeoprocessing.geoprocessing.vectorize_datasets(
        [msa_f_uri, msa_lu_uri, msa_i_uri],
        _msa_op,
        msa_uri,
        gdal.GDT_Float32,
        msa_nodata,
        out_pixel_size,
        "intersection",
        dataset_to_align_index=0,
        assert_datasets_projected=False,
        vectorize_op=False)

    if 'aoi_uri' in args:
        #copy the aoi to an output shapefile
        original_datasource = ogr.Open(args['aoi_uri'])
        summary_aoi_uri = os.path.join(output_dir,
                                       'aoi_summary%s.shp' % file_suffix)
        #If there is already an existing shapefile with the same name and path,
        # delete it
        if os.path.isfile(summary_aoi_uri):
            os.remove(summary_aoi_uri)
        #Copy the input shapefile into the designated output folder
        esri_driver = ogr.GetDriverByName('ESRI Shapefile')
        datasource_copy = esri_driver.CopyDataSource(original_datasource,
                                                     summary_aoi_uri)
        layer = datasource_copy.GetLayer()
        msa_summary_field_def = ogr.FieldDefn('msa_mean', ogr.OFTReal)
        layer.CreateField(msa_summary_field_def)

        #make an identifying id per polygon that can be used for aggregation
        layer_defn = layer.GetLayerDefn()
        while True:
            #last 8 characters because shapefile fields are limited to 8 chars
            poly_id_field = str(uuid.uuid4())[-8:]
            if layer_defn.GetFieldIndex(poly_id_field) == -1:
                break
        layer_id_field = ogr.FieldDefn(poly_id_field, ogr.OFTInteger)
        layer.CreateField(layer_id_field)
        for poly_index, poly_feat in enumerate(layer):
            poly_feat.SetField(poly_id_field, poly_index)
            layer.SetFeature(poly_feat)
        layer.SyncToDisk()

        #aggregate by ID
        msa_summary = pygeoprocessing.aggregate_raster_values_uri(
            msa_uri, summary_aoi_uri, shapefile_field=poly_id_field)

        #add new column to output file
        for feature_id in xrange(layer.GetFeatureCount()):
            feature = layer.GetFeature(feature_id)
            key_value = feature.GetFieldAsInteger(poly_id_field)
            feature.SetField('msa_mean',
                             float(msa_summary.pixel_mean[key_value]))
            layer.SetFeature(feature)

        # don't need a random poly id anymore
        layer.DeleteField(layer_defn.GetFieldIndex(poly_id_field))
Example #6
0
out_path = '/shared/mean_elevation_exercise/matching_pixels.tif'
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
    os.makedirs(out_dir)

dem_path = '/shared/grasslands_demo/joined_dem.tif'
pygeoprocessing.vectorize_datasets(
    dataset_uri_list=['/data/landcover.tif', dem_path],
    dataset_pixel_op=_dem_values_under_evergreen_forest,
    dataset_out_uri=out_path,
    datatype_out=gdal.GDT_Int16,
    nodata_out=-1,
    pixel_size_out=pygeoprocessing.get_cell_size_from_uri(dem_path),
    bounding_box_mode='intersection')

stats = pygeoprocessing.aggregate_raster_values_uri(out_path,
                                                    '/data/yosemite.shp')

print stats.pixel_mean[9999]

################

aligned_lulc = os.path.join(out_dir, 'aligned_lulc.tif')
aligned_dem = os.path.join(out_dir, 'aligned_dem.tif')
pygeoprocessing.align_dataset_list(
    datset_uri_list=['/data/landcover.tif', dem_path],
    dataset_out_uri_list=[aligned_lulc, aligned_dem],
    resample_method_list=['nearest', 'nearest'],
    out_pixel_size=pygeoprocessing.get_cell_size_from_uri(dem_path),
    mode='intersection',
    dataset_to_align_index=0)
Example #7
0
 def sum(self):
     vector = Vector.from_shapely(self.get_aoi(), self.get_projection())
     t = pygeo.aggregate_raster_values_uri(self.uri, vector.uri)
     return t.total[9999]
Example #8
0
def _aggregate_carbon_map(aoi_uri, carbon_map_uri, aoi_datasource_filename):
    """Helper function to aggregate carbon values for the given serviceshed.
    Generates a new shapefile that's a copy of 'aoi_uri' in
    'workspace_dir' with mean and sum values from the raster at
    'carbon_map_uri'

    Parameters:
        aoi_uri (string): path to shapefile that will be used to
            aggregate raster at'carbon_map_uri'
        workspace_dir (string): path to a directory that function can copy
            the shapefile at aoi_uri into.
        carbon_map_uri (string): path to raster that will be aggregated by
            the given serviceshed polygons
        aoi_datasource_filename (string): path to an ESRI shapefile that
            will be created by this function as the aggregating output.

    Returns:
        None"""

    esri_driver = ogr.GetDriverByName('ESRI Shapefile')
    original_serviceshed_datasource = ogr.Open(aoi_uri)
    if (os.path.normpath(aoi_uri) == os.path.normpath(aoi_datasource_filename)
        ):
        raise ValueError(
            "The input and output serviceshed filenames are the same, "
            "please choose a different workspace or move the serviceshed "
            "out of the current workspace %s" % aoi_datasource_filename)

    if os.path.exists(aoi_datasource_filename):
        os.remove(aoi_datasource_filename)
    serviceshed_result = esri_driver.CopyDataSource(
        original_serviceshed_datasource, aoi_datasource_filename)
    original_serviceshed_datasource = None
    serviceshed_layer = serviceshed_result.GetLayer()

    # make an identifying id per polygon that can be used for aggregation
    while True:
        serviceshed_defn = serviceshed_layer.GetLayerDefn()
        poly_id_field = str(uuid.uuid4())[-8:]
        if serviceshed_defn.GetFieldIndex(poly_id_field) == -1:
            break
    layer_id_field = ogr.FieldDefn(poly_id_field, ogr.OFTInteger)
    serviceshed_layer.CreateField(layer_id_field)
    for poly_index, poly_feat in enumerate(serviceshed_layer):
        poly_feat.SetField(poly_id_field, poly_index)
        serviceshed_layer.SetFeature(poly_feat)
    serviceshed_layer.SyncToDisk()

    # aggregate carbon stocks by the new ID field
    serviceshed_stats = pygeoprocessing.aggregate_raster_values_uri(
        carbon_map_uri,
        aoi_datasource_filename,
        shapefile_field=poly_id_field,
        ignore_nodata=True,
        threshold_amount_lookup=None,
        ignore_value_list=[],
        process_pool=None,
        all_touched=False)

    # don't need a random poly id anymore
    serviceshed_layer.DeleteField(
        serviceshed_defn.GetFieldIndex(poly_id_field))

    carbon_sum_field = ogr.FieldDefn('c_sum', ogr.OFTReal)
    carbon_mean_field = ogr.FieldDefn('c_ha_mean', ogr.OFTReal)
    serviceshed_layer.CreateField(carbon_sum_field)
    serviceshed_layer.CreateField(carbon_mean_field)

    serviceshed_layer.ResetReading()
    for poly_index, poly_feat in enumerate(serviceshed_layer):
        poly_feat.SetField('c_sum', serviceshed_stats.total[poly_index])
        poly_feat.SetField('c_ha_mean',
                           serviceshed_stats.hectare_mean[poly_index])
        serviceshed_layer.SetFeature(poly_feat)
Example #9
0
print 'Iterblocks took %ss' % (time.time() - start_time)
print 'Park pixels: %s' % num_park_pixels
print 'High-elevation pixels %s' % num_3500_pixels
print 'Percentage of park land above 3500m: %s%%' % round(
    (num_3500_pixels / num_park_pixels) * 100, 2)

# Compare iterblocks time with pure-numpy approach.
# Expected: This will be slightly faster than loading into memory, but not by
# much.  Real gains come when iterating over dataset too large to fit into main
# memory.
start_time = time.time()
dem_raster = gdal.Open(dem)
dem_array = dem_raster.ReadAsArray()
num_park_pixels = len(dem_array[dem_array != -1])
num_3500_pixels = len(dem_array[dem_array >= 3500])
print 'numpy took %ss' % (time.time() - start_time)
print 'Park pixels: %s' % num_park_pixels
print 'High-elevation pixels %s' % num_3500_pixels
print 'Percentage of park land above 3500m: %s%%' % round(
    (float(num_3500_pixels) / num_park_pixels) * 100, 2)

# Compare with aggregate_raster_values_uri
yosemite_vector = '/data/yosemite.shp'
stats = pygeoprocessing.aggregate_raster_values_uri(
    raster_uri=dem, shapefile_uri=yosemite_vector)

# Print the mean hight across the park
# 9999 is used as a Feature ID when we aggregate across the whole vector
print stats.pixel_mean[9999]