示例#1
0
def add_elk_to_large_land_mammals():
    """Add the elk raster to the sum of other large land mammals."""
    extract_folder = "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Unzip"
    with tempfile.TemporaryDirectory(dir=extract_folder) as temp_spp_dir:
        raster_list = [
            "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Processed/Large_Land_mammals.tif",
            "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Processed/Elk_AHMG.tif"
        ]
        input_nodata = pygeoprocessing.get_raster_info(
            raster_list[0])['nodata'][0]
        pixel_size = pygeoprocessing.get_raster_info(
            raster_list[0])['pixel_size']
        for raster_path in raster_list:
            test_nodata = pygeoprocessing.get_raster_info(
                raster_path)['nodata'][0]
            if test_nodata != input_nodata:
                reclassify_nodata(raster_path, input_nodata)

        aligned_path_list = [
            os.path.join(temp_spp_dir, os.path.basename(f))
            for f in raster_list
        ]
        pygeoprocessing.align_and_resize_raster_stack(
            raster_list, aligned_path_list, ['near'] * len(raster_list),
            pixel_size, 'union')

        destination_path = "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Processed/Large_Land_Mammals_with_Elk.tif"
        raster_list_sum(aligned_path_list,
                        input_nodata,
                        destination_path,
                        _TARGET_NODATA,
                        _TARGET_DATATYPE,
                        nodata_remove=True)
def threshold_by_raster(base_raster_path, threshold_raster_path,
                        target_raster_path):
    """If base <= threshold, then 1, otherwise 0."""
    base_raster_info = pygeoprocessing.get_raster_info(base_raster_path)
    threshold_raster_info = pygeoprocessing.get_raster_info(
        threshold_raster_path)
    target_nodata = 255

    align_id = (
        f'{os.path.splitext(os.path.basename(base_raster_path))[0]}_'
        f'{os.path.splitext(os.path.basename(threshold_raster_path))[0]}')

    # align rasters
    align_raster_path_list = [
        os.path.join(
            CHURN_DIR, f'aligned_{align_id}_'
            f'{os.path.basename(os.path.splitext(path)[0])}.tif')
        for path in [base_raster_path, threshold_raster_path]
    ]

    pygeoprocessing.align_and_resize_raster_stack(
        [base_raster_path, threshold_raster_path], align_raster_path_list,
        ['near'] * 2, base_raster_info['pixel_size'], 'intersection')

    pygeoprocessing.raster_calculator(
        [(align_raster_path_list[0], 1), (align_raster_path_list[1], 1),
         (base_raster_info['nodata'][0], 'raw'),
         (threshold_raster_info['nodata'][0], 'raw'), (target_nodata, 'raw')],
        threshold_array_op, target_raster_path, gdal.GDT_Byte, target_nodata)
示例#3
0
def sum_of_masked_op(mask_path, value_raster_path, churn_dir):
    temp_dir = tempfile.mkdtemp(dir=churn_dir)
    mask_align_path = os.path.join(temp_dir, 'align_mask.tif')
    value_align_path = os.path.join(temp_dir, 'value_align.tif')
    target_pixel_size = pygeoprocessing.get_raster_info(
        value_raster_path)['pixel_size']

    pygeoprocessing.align_and_resize_raster_stack(
        [mask_path, value_raster_path], [mask_align_path, value_align_path],
        ['near'] * 2, target_pixel_size, 'intersection')

    mask_raster = gdal.OpenEx(mask_align_path, gdal.OF_RASTER)
    value_raster = gdal.OpenEx(value_align_path, gdal.OF_RASTER)
    mask_band = mask_raster.GetRasterBand(1)
    value_band = value_raster.GetRasterBand(1)

    sum_val = 0.0
    for offset_dict in pygeoprocessing.iterblocks((mask_align_path, 1),
                                                  offset_only=True):
        mask_array = mask_band.ReadAsArray(**offset_dict)
        value_array = value_band.ReadAsArray(**offset_dict)
        sum_val += numpy.sum(value_array[mask_array == 1])

    mask_band = None
    value_band = None
    mask_raster = None
    value_raster = None
    shutil.rmtree(temp_dir)
    return sum_val
示例#4
0
def _align_rasters(lulc_raster_filepath, ref_et_raster_filepaths,
                   t_raster_filepaths, dst_lulc_raster_filepath,
                   dst_ref_et_raster_filepaths, dst_t_raster_filepaths):
    with rio.open(lulc_raster_filepath) as src:
        pygeoprocessing.align_and_resize_raster_stack(
            [lulc_raster_filepath] + ref_et_raster_filepaths +
            t_raster_filepaths, [dst_lulc_raster_filepath] +
            dst_ref_et_raster_filepaths + dst_t_raster_filepaths,
            ['near'] + ['bilinear'] *
            (len(ref_et_raster_filepaths) + len(t_raster_filepaths)), src.res,
            'intersection')

    # get the intersection mask
    with rio.open(dst_lulc_raster_filepath) as src:
        meta = src.meta.copy()
        data_mask = src.dataset_mask()
    for dst_raster_filepath in dst_ref_et_raster_filepaths + \
            dst_t_raster_filepaths:
        with rio.open(dst_raster_filepath) as src:
            data_mask &= src.dataset_mask()

    for dst_raster_filepath in [dst_lulc_raster_filepath] + \
            dst_ref_et_raster_filepaths + dst_t_raster_filepaths:
        with rio.open(dst_raster_filepath, 'r+') as ds:
            ds.write(
                np.where(data_mask, ds.read(1),
                         ds.nodata).astype(ds.dtypes[0]), 1)

    return (meta, data_mask.astype(bool), dst_lulc_raster_filepath,
            dst_ref_et_raster_filepaths, dst_t_raster_filepaths)
示例#5
0
def cropped_dem(ctx, dem_fp, watershed_fp, cropped_dem_fp):
    logger = ctx.obj['LOGGER']
    logger.info("Cropping DEM to watershed extent")
    with rasterio.open(dem_fp) as dataset:
        pygeoprocessing.align_and_resize_raster_stack(
            [dem_fp], [cropped_dem_fp], ['near'],
            dataset.res,
            bounding_box_mode='intersection',
            base_vector_path_list=[watershed_fp])
    logger.info("DONE")
示例#6
0
def aligned_raster(ctx, input_raster_fp, reference_raster_fp,
                   output_raster_fp):
    logger = ctx.obj['LOGGER']
    logger.info("Aligning {} to {}".format(input_raster_fp,
                                           reference_raster_fp))
    with rasterio.open(reference_raster_fp) as reference_src:
        pygeoprocessing.align_and_resize_raster_stack(
            [input_raster_fp], [output_raster_fp], ['near'],
            reference_src.res,
            bounding_box_mode=reference_src.bounds,
            target_sr_wkt=reference_src.crs.to_string())
    logger.info("DONE")
示例#7
0
def AK_terrestrial_index_sum():
    """Sum up inputs to the AK terrestrial index v2."""
    # TODO modify with species groups
    # TODO modify to use the new raster_list_sum()
    with tempfile.TemporaryDirectory() as temp_spp_dir:
        raster_list = [
            "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data/AK_IBA.tif",
            "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data24/Amphibians_Habitat_v1.tif",
            "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data25/Sea_Birds_Habitat_v1.tif",
            "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data26/Terrestrial_Mammals_Habitats_Final.tif",
            "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data22/Land_Birds_Habitat_v1.tif"
        ]
        pixel_size = pygeoprocessing.get_raster_info(
            raster_list[0])['pixel_size']
        for raster_path in raster_list:
            test_nodata = pygeoprocessing.get_raster_info(
                raster_path)['nodata'][0]
            if test_nodata != _TARGET_NODATA:
                reclassify_nodata(raster_path, _TARGET_NODATA)

        print("aligning input rasters")
        aligned_path_list = [
            os.path.join(temp_spp_dir, os.path.basename(f))
            for f in raster_list
        ]
        pygeoprocessing.align_and_resize_raster_stack(
            raster_list, aligned_path_list, ['near'] * len(raster_list),
            pixel_size, 'union')

        print("calculating sum")
        intermediate_path = os.path.join(temp_spp_dir, 'terr_unclip.tif')
        raster_list_sum(aligned_path_list,
                        _TARGET_NODATA,
                        intermediate_path,
                        _TARGET_NODATA,
                        _TARGET_DATATYPE,
                        nodata_remove=True)

        print("clipping to region")
        destination_path = "E:/Packages/AK_Wildlife_012721_4c7e75/commondata/raster_data26/AK_Terrestrial_Index_all_v2.tif"
        pygeoprocessing.align_and_resize_raster_stack(
            [intermediate_path], [destination_path], ['near'],
            pixel_size,
            'intersection',
            base_vector_path_list=[_BOUNDARY_PATH],
            raster_align_index=0,
            vector_mask_options={'mask_vector_path': _BOUNDARY_PATH})
示例#8
0
def resample_guam_rasters():
    """Align a few bad rasters to identical pixel size."""
    template_raster = "D:/NFWF_PhaseIII/Guam/FOR CREST/for_upload/GU_Threat_Index_10class_v2.tif"
    pixel_size = pygeoprocessing.get_raster_info(template_raster)['pixel_size']
    with tempfile.NamedTemporaryFile(
            prefix='template_aligned', suffix='.tif',
            delete=False) as template_aligned_file:
        template_aligned_path = template_aligned_file.name
    raw_input_list = [
        template_raster,
        "D:/NFWF_PhaseIII/Guam/FOR CREST/raw_rasters_from_mxd/GU_Slope_v1.tif",
        "D:/NFWF_PhaseIII/Guam/FOR CREST/raw_rasters_from_mxd/GU_Terrestrial_index_4class_v3.tif",
        "D:/NFWF_PhaseIII/Guam/FOR CREST/raw_rasters_from_mxd/wave_exposure_5class_proj.tif"]
    aligned_path_list = [
        template_aligned_path,
        "D:/NFWF_PhaseIII/Guam/FOR CREST/for_upload/GU_Slope_v1.tif",
        "D:/NFWF_PhaseIII/Guam/FOR CREST/for_upload/GU_Terrestrial_index_4class_v3.tif",
        "D:/NFWF_PhaseIII/Guam/FOR CREST/for_upload/wave_exposure_5class_proj.tif"]
    pygeoprocessing.align_and_resize_raster_stack(
        raw_input_list, aligned_path_list, ['near'] * len(raw_input_list),
        pixel_size, 'union')
    check_guam_rasters()
示例#9
0
def align_raster_list(raster_path_list,
                      target_directory,
                      target_sr_wkt=None,
                      align_index=0):
    """Aligns all the raster paths.

    Rasters are aligned using the pixel size of the first raster and use
    the intersection and near interpolation methods.

    Parameters:
        raster_path_list (list): list of str paths to rasters.
        target_directory (str): path to a directory to hold the aligned
            rasters.
        target_sr_wkt (str): if not None this is the projection.
        align_index (int): this index is used to indicate which raster should
            be used for aligned pixel size.

    Returns:
        list of raster paths that are aligned with intersection and near
            interpolation algorithm.

    """
    LOGGER.debug('aligning %s', raster_path_list)
    aligned_path_list = [
        os.path.join(target_directory, os.path.basename(path))
        for path in raster_path_list
    ]
    target_pixel_size = pygeoprocessing.get_raster_info(
        raster_path_list[align_index])['pixel_size']
    LOGGER.debug('about to align: %s', str(raster_path_list))
    pygeoprocessing.align_and_resize_raster_stack(raster_path_list,
                                                  aligned_path_list, ['near'] *
                                                  len(raster_path_list),
                                                  target_pixel_size,
                                                  'intersection',
                                                  target_sr_wkt=target_sr_wkt)
    return aligned_path_list
示例#10
0
def execute(args):
    """Habitat Quality.

    Open files necessary for the portion of the habitat_quality
    model.

    Args:
        workspace_dir (string): a path to the directory that will write output
            and other temporary files (required)
        lulc_cur_path (string): a path to an input land use/land cover raster
            (required)
        lulc_fut_path (string): a path to an input land use/land cover raster
            (optional)
        lulc_bas_path (string): a path to an input land use/land cover raster
            (optional, but required for rarity calculations)
        threat_folder (string): a path to the directory that will contain all
            threat rasters (required)
        threats_table_path (string): a path to an input CSV containing data
            of all the considered threats. Each row is a degradation source
            and each column a different attribute of the source with the
            following names: 'THREAT','MAX_DIST','WEIGHT' (required).
        access_vector_path (string): a path to an input polygon shapefile
            containing data on the relative protection against threats (optional)
        sensitivity_table_path (string): a path to an input CSV file of LULC
            types, whether they are considered habitat, and their sensitivity
            to each threat (required)
        half_saturation_constant (float): a python float that determines
            the spread and central tendency of habitat quality scores
            (required)
        suffix (string): a python string that will be inserted into all
            raster path paths just before the file extension.

    Example Args Dictionary::

        {
            'workspace_dir': 'path/to/workspace_dir',
            'lulc_cur_path': 'path/to/lulc_cur_raster',
            'lulc_fut_path': 'path/to/lulc_fut_raster',
            'lulc_bas_path': 'path/to/lulc_bas_raster',
            'threat_raster_folder': 'path/to/threat_rasters/',
            'threats_table_path': 'path/to/threats_csv',
            'access_vector_path': 'path/to/access_shapefile',
            'sensitivity_table_path': 'path/to/sensitivity_csv',
            'half_saturation_constant': 0.5,
            'suffix': '_results',
        }

    Returns:
        None
    """
    workspace = args['workspace_dir']

    # Append a _ to the suffix if it's not empty and doesn't already have one
    suffix = utils.make_suffix_string(args, 'suffix')

    # Check to see if each of the workspace folders exists.  If not, create the
    # folder in the filesystem.
    inter_dir = os.path.join(workspace, 'intermediate')
    out_dir = os.path.join(workspace, 'output')
    kernel_dir = os.path.join(inter_dir, 'kernels')
    utils.make_directories([inter_dir, out_dir, kernel_dir])

    # get a handle on the folder with the threat rasters
    threat_raster_dir = args['threat_raster_folder']

    threat_dict = utils.build_lookup_from_csv(args['threats_table_path'],
                                              'THREAT',
                                              to_lower=False)
    sensitivity_dict = utils.build_lookup_from_csv(
        args['sensitivity_table_path'], 'LULC', to_lower=False)

    # check that the required headers exist in the sensitivity table.
    # Raise exception if they don't.
    sens_header_list = sensitivity_dict.items()[0][1].keys()
    required_sens_header_list = ['LULC', 'NAME', 'HABITAT']
    missing_sens_header_list = [
        h for h in required_sens_header_list if h not in sens_header_list
    ]
    if missing_sens_header_list:
        raise ValueError('Column(s) %s are missing in the sensitivity table' %
                         (', '.join(missing_sens_header_list)))

    # check that the threat names in the threats table match with the threats
    # columns in the sensitivity table. Raise exception if they don't.
    for threat in threat_dict:
        if 'L_' + threat not in sens_header_list:
            missing_threat_header_list = (set(sens_header_list) -
                                          set(required_sens_header_list))
            raise ValueError(
                'Threat "%s" does not match any column in the sensitivity '
                'table. Possible columns: %s' %
                (threat, missing_threat_header_list))

    # get the half saturation constant
    try:
        half_saturation = float(args['half_saturation_constant'])
    except ValueError:
        raise ValueError('Half-saturation constant is not a numeric number.'
                         'It is: %s' % args['half_saturation_constant'])

    # declare dictionaries to store the land cover and the threat rasters
    # pertaining to the different threats
    lulc_path_dict = {}
    threat_path_dict = {}
    # also store land cover and threat rasters in a list
    lulc_and_threat_raster_list = []
    aligned_raster_list = []
    # declare a set to store unique codes from lulc rasters
    raster_unique_lucodes = set()

    # compile all the threat rasters associated with the land cover
    for lulc_key, lulc_args in (('_c', 'lulc_cur_path'),
                                ('_f', 'lulc_fut_path'), ('_b',
                                                          'lulc_bas_path')):
        if lulc_args in args:
            lulc_path = args[lulc_args]
            lulc_path_dict[lulc_key] = lulc_path
            # save land cover paths in a list for alignment and resize
            lulc_and_threat_raster_list.append(lulc_path)
            aligned_raster_list.append(
                os.path.join(
                    inter_dir,
                    os.path.basename(lulc_path).replace(
                        '.tif', '_aligned.tif')))

            # save unique codes to check if it's missing in sensitivity table
            for _, lulc_block in pygeoprocessing.iterblocks((lulc_path, 1)):
                raster_unique_lucodes.update(numpy.unique(lulc_block))

            # Remove the nodata value from the set of landuser codes.
            nodata = pygeoprocessing.get_raster_info(lulc_path)['nodata'][0]
            try:
                raster_unique_lucodes.remove(nodata)
            except KeyError:
                # KeyError when the nodata value was not encountered in the
                # raster's pixel values.  Same result when nodata value is
                # None.
                pass

            # add a key to the threat dictionary that associates all threat
            # rasters with this land cover
            threat_path_dict['threat' + lulc_key] = {}

            # for each threat given in the CSV file try opening the associated
            # raster which should be found in threat_raster_folder
            for threat in threat_dict:
                # it's okay to have no threat raster for baseline scenario
                threat_path_dict['threat' + lulc_key][threat] = (
                    resolve_ambiguous_raster_path(
                        os.path.join(threat_raster_dir, threat + lulc_key),
                        raise_error=(lulc_key != '_b')))

                # save threat paths in a list for alignment and resize
                threat_path = threat_path_dict['threat' + lulc_key][threat]
                if threat_path:
                    lulc_and_threat_raster_list.append(threat_path)
                    aligned_raster_list.append(
                        os.path.join(
                            inter_dir,
                            os.path.basename(lulc_path).replace(
                                '.tif', '_aligned.tif')))
    # check if there's any lucode from the LULC rasters missing in the
    # sensitivity table
    table_unique_lucodes = set(sensitivity_dict.keys())
    missing_lucodes = raster_unique_lucodes.difference(table_unique_lucodes)
    if missing_lucodes:
        raise ValueError(
            'The following land cover codes were found in your landcover rasters '
            'but not in your sensitivity table. Check your sensitivity table '
            'to see if they are missing: %s. \n\n' %
            ', '.join([str(x) for x in sorted(missing_lucodes)]))

    # Align and resize all the land cover and threat rasters,
    # and tore them in the intermediate folder
    LOGGER.info('Starting aligning and resizing land cover and threat rasters')

    lulc_pixel_size = (pygeoprocessing.get_raster_info(
        args['lulc_cur_path']))['pixel_size']

    aligned_raster_list = [
        os.path.join(inter_dir,
                     os.path.basename(path).replace('.tif', '_aligned.tif'))
        for path in lulc_and_threat_raster_list
    ]

    pygeoprocessing.align_and_resize_raster_stack(
        lulc_and_threat_raster_list, aligned_raster_list,
        ['near'] * len(lulc_and_threat_raster_list), lulc_pixel_size,
        'intersection')

    LOGGER.info('Finished aligning and resizing land cover and threat rasters')

    # Modify paths in lulc_path_dict and threat_path_dict to be aligned rasters
    for lulc_key, lulc_path in lulc_path_dict.iteritems():
        lulc_path_dict[lulc_key] = os.path.join(
            inter_dir,
            os.path.basename(lulc_path).replace('.tif', '_aligned.tif'))
        for threat in threat_dict:
            threat_path = threat_path_dict['threat' + lulc_key][threat]
            if threat_path in lulc_and_threat_raster_list:
                threat_path_dict['threat' + lulc_key][threat] = os.path.join(
                    inter_dir,
                    os.path.basename(threat_path).replace(
                        '.tif', '_aligned.tif'))

    LOGGER.info('Starting habitat_quality biophysical calculations')

    # Rasterize access vector, if value is null set to 1 (fully accessible),
    # else set to the value according to the ACCESS attribute
    cur_lulc_path = lulc_path_dict['_c']
    fill_value = 1.0
    try:
        LOGGER.info('Handling Access Shape')
        access_raster_path = os.path.join(inter_dir,
                                          'access_layer%s.tif' % suffix)
        # create a new raster based on the raster info of current land cover
        pygeoprocessing.new_raster_from_base(cur_lulc_path,
                                             access_raster_path,
                                             gdal.GDT_Float32, [_OUT_NODATA],
                                             fill_value_list=[fill_value])
        pygeoprocessing.rasterize(args['access_vector_path'],
                                  access_raster_path,
                                  burn_values=None,
                                  option_list=['ATTRIBUTE=ACCESS'])

    except KeyError:
        LOGGER.info('No Access Shape Provided, access raster filled with 1s.')

    # calculate the weight sum which is the sum of all the threats' weights
    weight_sum = 0.0
    for threat_data in threat_dict.itervalues():
        # Sum weight of threats
        weight_sum = weight_sum + threat_data['WEIGHT']

    LOGGER.debug('lulc_path_dict : %s', lulc_path_dict)

    # for each land cover raster provided compute habitat quality
    for lulc_key, lulc_path in lulc_path_dict.iteritems():
        LOGGER.info('Calculating habitat quality for landuse: %s', lulc_path)

        # Create raster of habitat based on habitat field
        habitat_raster_path = os.path.join(
            inter_dir, 'habitat%s%s.tif' % (lulc_key, suffix))
        map_raster_to_dict_values(lulc_path,
                                  habitat_raster_path,
                                  sensitivity_dict,
                                  'HABITAT',
                                  _OUT_NODATA,
                                  values_required=False)

        # initialize a list that will store all the threat/threat rasters
        # after they have been adjusted for distance, weight, and access
        deg_raster_list = []

        # a list to keep track of the normalized weight for each threat
        weight_list = numpy.array([])

        # variable to indicate whether we should break out of calculations
        # for a land cover because a threat raster was not found
        exit_landcover = False

        # adjust each threat/threat raster for distance, weight, and access
        for threat, threat_data in threat_dict.iteritems():
            LOGGER.info('Calculating threat: %s.\nThreat data: %s' %
                        (threat, threat_data))

            # get the threat raster for the specific threat
            threat_raster_path = threat_path_dict['threat' + lulc_key][threat]
            LOGGER.info('threat_raster_path %s', threat_raster_path)
            if threat_raster_path is None:
                LOGGER.info(
                    'The threat raster for %s could not be found for the land '
                    'cover %s. Skipping Habitat Quality calculation for this '
                    'land cover.' % (threat, lulc_key))
                exit_landcover = True
                break

            # need the pixel size for the threat raster so we can create
            # an appropriate kernel for convolution
            threat_pixel_size = pygeoprocessing.get_raster_info(
                threat_raster_path)['pixel_size']
            # pixel size tuple could have negative value
            mean_threat_pixel_size = (abs(threat_pixel_size[0]) +
                                      abs(threat_pixel_size[1])) / 2.0

            # convert max distance (given in KM) to meters
            max_dist_m = threat_data['MAX_DIST'] * 1000.0

            # convert max distance from meters to the number of pixels that
            # represents on the raster
            max_dist_pixel = max_dist_m / mean_threat_pixel_size
            LOGGER.debug('Max distance in pixels: %f', max_dist_pixel)

            # blur the threat raster based on the effect of the threat over
            # distance
            decay_type = threat_data['DECAY']
            kernel_path = os.path.join(
                kernel_dir, 'kernel_%s%s%s.tif' % (threat, lulc_key, suffix))
            if decay_type == 'linear':
                make_linear_decay_kernel_path(max_dist_pixel, kernel_path)
            elif decay_type == 'exponential':
                utils.exponential_decay_kernel_raster(max_dist_pixel,
                                                      kernel_path)
            else:
                raise ValueError(
                    "Unknown type of decay in biophysical table, should be "
                    "either 'linear' or 'exponential'. Input was %s for threat"
                    " %s." % (decay_type, threat))

            filtered_threat_raster_path = os.path.join(
                inter_dir, 'filtered_%s%s%s.tif' % (threat, lulc_key, suffix))
            pygeoprocessing.convolve_2d((threat_raster_path, 1),
                                        (kernel_path, 1),
                                        filtered_threat_raster_path)

            # create sensitivity raster based on threat
            sens_raster_path = os.path.join(
                inter_dir, 'sens_%s%s%s.tif' % (threat, lulc_key, suffix))
            map_raster_to_dict_values(lulc_path,
                                      sens_raster_path,
                                      sensitivity_dict,
                                      'L_' + threat,
                                      _OUT_NODATA,
                                      values_required=True)

            # get the normalized weight for each threat
            weight_avg = threat_data['WEIGHT'] / weight_sum

            # add the threat raster adjusted by distance and the raster
            # representing sensitivity to the list to be past to
            # vectorized_rasters below
            deg_raster_list.append(filtered_threat_raster_path)
            deg_raster_list.append(sens_raster_path)

            # store the normalized weight for each threat in a list that
            # will be used below in total_degradation
            weight_list = numpy.append(weight_list, weight_avg)

        # check to see if we got here because a threat raster was missing
        # and if so then we want to skip to the next landcover
        if exit_landcover:
            continue

        def total_degradation(*raster):
            """A vectorized function that computes the degradation value for
                each pixel based on each threat and then sums them together

                *rasters - a list of floats depicting the adjusted threat
                    value per pixel based on distance and sensitivity.
                    The values are in pairs so that the values for each threat
                    can be tracked:
                    [filtered_val_threat1, sens_val_threat1,
                     filtered_val_threat2, sens_val_threat2, ...]
                    There is an optional last value in the list which is the
                    access_raster value, but it is only present if
                    access_raster is not None.

                returns - the total degradation score for the pixel"""

            # we can not be certain how many threats the user will enter,
            # so we handle each filtered threat and sensitivity raster
            # in pairs
            sum_degradation = numpy.zeros(raster[0].shape)
            for index in range(len(raster) / 2):
                step = index * 2
                sum_degradation += (raster[step] * raster[step + 1] *
                                    weight_list[index])

            nodata_mask = numpy.empty(raster[0].shape, dtype=numpy.int8)
            nodata_mask[:] = 0
            for array in raster:
                nodata_mask = nodata_mask | (array == _OUT_NODATA)

            # the last element in raster is access
            return numpy.where(nodata_mask, _OUT_NODATA,
                               sum_degradation * raster[-1])

        # add the access_raster onto the end of the collected raster list. The
        # access_raster will be values from the shapefile if provided or a
        # raster filled with all 1's if not
        deg_raster_list.append(access_raster_path)

        deg_sum_raster_path = os.path.join(
            out_dir, 'deg_sum' + lulc_key + suffix + '.tif')

        LOGGER.info('Starting raster calculation on total_degradation')

        deg_raster_band_list = [(path, 1) for path in deg_raster_list]
        pygeoprocessing.raster_calculator(deg_raster_band_list,
                                          total_degradation,
                                          deg_sum_raster_path,
                                          gdal.GDT_Float32, _OUT_NODATA)

        LOGGER.info('Finished raster calculation on total_degradation')

        # Compute habitat quality
        # ksq: a term used below to compute habitat quality
        ksq = half_saturation**_SCALING_PARAM

        def quality_op(degradation, habitat):
            """Vectorized function that computes habitat quality given
                a degradation and habitat value.

                degradation - a float from the created degradation
                    raster above.
                habitat - a float indicating habitat suitability from
                    from the habitat raster created above.

                returns - a float representing the habitat quality
                    score for a pixel
            """
            degredataion_clamped = numpy.where(degradation < 0, 0, degradation)

            return numpy.where(
                (degradation == _OUT_NODATA) | (habitat == _OUT_NODATA),
                _OUT_NODATA,
                (habitat * (1.0 -
                            ((degredataion_clamped**_SCALING_PARAM) /
                             (degredataion_clamped**_SCALING_PARAM + ksq)))))

        quality_path = os.path.join(out_dir,
                                    'quality' + lulc_key + suffix + '.tif')

        LOGGER.info('Starting raster calculation on quality_op')

        deg_hab_raster_list = [deg_sum_raster_path, habitat_raster_path]

        deg_hab_raster_band_list = [(path, 1) for path in deg_hab_raster_list]
        pygeoprocessing.raster_calculator(deg_hab_raster_band_list, quality_op,
                                          quality_path, gdal.GDT_Float32,
                                          _OUT_NODATA)

        LOGGER.info('Finished raster calculation on quality_op')

    # Compute Rarity if user supplied baseline raster
    if '_b' not in lulc_path_dict:
        LOGGER.info('Baseline not provided to compute Rarity')
    else:
        lulc_base_path = lulc_path_dict['_b']

        # get the area of a base pixel to use for computing rarity where the
        # pixel sizes are different between base and cur/fut rasters
        base_pixel_size = pygeoprocessing.get_raster_info(
            lulc_base_path)['pixel_size']
        base_area = float(abs(base_pixel_size[0]) * abs(base_pixel_size[1]))
        base_nodata = pygeoprocessing.get_raster_info(
            lulc_base_path)['nodata'][0]

        lulc_code_count_b = raster_pixel_count(lulc_base_path)

        # compute rarity for current landscape and future (if provided)
        for lulc_key in ['_c', '_f']:
            if lulc_key not in lulc_path_dict:
                continue
            lulc_path = lulc_path_dict[lulc_key]
            lulc_time = 'current' if lulc_key == '_c' else 'future'

            # get the area of a cur/fut pixel
            lulc_pixel_size = pygeoprocessing.get_raster_info(
                lulc_path)['pixel_size']
            lulc_area = float(
                abs(lulc_pixel_size[0]) * abs(lulc_pixel_size[1]))
            lulc_nodata = pygeoprocessing.get_raster_info(
                lulc_path)['nodata'][0]

            def trim_op(base, cover_x):
                """Trim cover_x to the mask of base.

                Parameters:
                    base (numpy.ndarray): base raster from 'lulc_base'
                    cover_x (numpy.ndarray): either future or current land
                        cover raster from 'lulc_path' above

                Returns:
                    _OUT_NODATA where either array has nodata, otherwise
                    cover_x.
                """
                return numpy.where(
                    (base == base_nodata) | (cover_x == lulc_nodata),
                    base_nodata, cover_x)

            LOGGER.info('Create new cover for %s', lulc_path)

            new_cover_path = os.path.join(
                inter_dir, 'new_cover' + lulc_key + suffix + '.tif')

            LOGGER.info('Starting masking %s land cover to base land cover.' %
                        lulc_time)

            pygeoprocessing.raster_calculator([(lulc_base_path, 1),
                                               (lulc_path, 1)], trim_op,
                                              new_cover_path, gdal.GDT_Float32,
                                              _OUT_NODATA)

            LOGGER.info('Finished masking %s land cover to base land cover.' %
                        lulc_time)

            LOGGER.info('Starting rarity computation on %s land cover.' %
                        lulc_time)

            lulc_code_count_x = raster_pixel_count(new_cover_path)

            # a dictionary to map LULC types to a number that depicts how
            # rare they are considered
            code_index = {}

            # compute rarity index for each lulc code
            # define 0.0 if an lulc code is found in the cur/fut landcover
            # but not the baseline
            for code in lulc_code_count_x.iterkeys():
                if code in lulc_code_count_b:
                    numerator = lulc_code_count_x[code] * lulc_area
                    denominator = lulc_code_count_b[code] * base_area
                    ratio = 1.0 - (numerator / denominator)
                    code_index[code] = ratio
                else:
                    code_index[code] = 0.0

            rarity_path = os.path.join(out_dir,
                                       'rarity' + lulc_key + suffix + '.tif')

            pygeoprocessing.reclassify_raster((new_cover_path, 1), code_index,
                                              rarity_path, gdal.GDT_Float32,
                                              _RARITY_NODATA)

            LOGGER.info('Finished rarity computation on %s land cover.' %
                        lulc_time)
    LOGGER.info('Finished habitat_quality biophysical calculations')
示例#11
0
def execute(args):
    """Coastal Blue Carbon Preprocessor.

    The preprocessor accepts a list of rasters and checks for cell-transitions
    across the rasters.  The preprocessor outputs a CSV file representing a
    matrix of land cover transitions, each cell pre-filled with a string
    indicating whether carbon accumulates or is disturbed as a result of the
    transition, if a transition occurs.

    Args:
        workspace_dir (string): directory path to workspace
        results_suffix (string): append to outputs directory name if provided
        lulc_lookup_uri (string): filepath of lulc lookup table
        lulc_snapshot_list (list): a list of filepaths to lulc rasters

    Example Args::

        args = {
            'workspace_dir': 'path/to/workspace_dir/',
            'results_suffix': '',
            'lulc_lookup_uri': 'path/to/lookup.csv',
            'lulc_snapshot_list': ['path/to/raster1', 'path/to/raster2', ...]
        }
    """
    LOGGER.info('Starting Coastal Blue Carbon Preprocessor run...')

    # Inputs
    vars_dict = _get_inputs(args)

    base_file_path_list = [(_OUTPUT, vars_dict['output_dir'])]
    reg = utils.build_file_registry(base_file_path_list,
                                    vars_dict['results_suffix'])

    aligned_lulcs = [
        reg['aligned_lulc_template'] % index
        for index in range(len(args['lulc_snapshot_list']))
    ]
    min_pixel_raster_info = min(
        (pygeoprocessing.get_raster_info(path)
         for path in vars_dict['lulc_snapshot_list']),
        key=lambda info: utils.mean_pixel_size_and_area(info['pixel_size'])[0])
    pygeoprocessing.align_and_resize_raster_stack(
        vars_dict['lulc_snapshot_list'], aligned_lulcs,
        ['near'] * len(aligned_lulcs), min_pixel_raster_info['pixel_size'],
        'intersection')

    # Run Preprocessor
    vars_dict['transition_matrix_dict'] = _preprocess_data(
        vars_dict['lulc_lookup_dict'], aligned_lulcs)

    # Outputs
    _create_transition_table(reg['transitions'],
                             vars_dict['lulc_to_code_dict'].keys(),
                             vars_dict['transition_matrix_dict'],
                             vars_dict['code_to_lulc_dict'])

    _create_carbon_pool_initial_table_template(
        reg['carbon_pool_initial_template'], vars_dict['code_to_lulc_dict'])

    _create_carbon_pool_transient_table_template(
        reg['carbon_pool_transient_template'], vars_dict['code_to_lulc_dict'])

    LOGGER.info('...Coastal Blue Carbon Preprocessor run complete.')
示例#12
0
def alternative_index_workflow(workspace_dir,
                               raster_input_dict,
                               aoi_path,
                               index_path,
                               polygon_input_list=None):
    """Compute the alternative index from raw inputs.

    All inputs, including AOI, must be share coordinate reference system and
    must have roughly equivalent extents. Recommend that inputs are clipped and
    projected in Arc prior to running this script.

    Args:
        workspace_dir (string): path to workspace where intermediate results
            should be created/stored
        raster_input_dict (dict): a nested python dictionary containing info
            about raster-based inputs that should be combined. The keys in the
            index should be the labels for each input; values in the dictionary
            should be dictionaries containing the keys 'path' (path to the
            raster input) and 'weight' (weighting value that is applied to the
            normalized values in this input relative to others). EACH INDEX IS
            INTERPRETED AS HIGH VALUE = GOOD.        
        aoi_path (string): path to boundary of the study area
        index_path (string): path to location where the index should be saved
        polygon_input_list (list): list of paths to polygon inputs that should
            be included. Each of these is assigned a weight of 1.

    Side effects:
        creates or modifies a raster at the location ``index_path``

    Returns:
        None
    
    """
    # ensure that each new input shares spatial reference
    vector_info = pygeoprocessing.get_vector_info(aoi_path)
    destination_proj = osr.SpatialReference()
    destination_proj.ImportFromWkt(vector_info['projection_wkt'])
    problem_list = []
    for new_input in raster_input_dict:
        new_proj = osr.SpatialReference()
        new_proj.ImportFromWkt(
            pygeoprocessing.get_raster_info(
                raster_input_dict[new_input]['path'])['projection_wkt'])
        if (new_proj.IsSame(destination_proj) == 0):
            problem_list.append(new_input)
    if problem_list:
        raise ValueError(
            "Project these to match the AOI: {}".format(problem_list))

    intermediate_dir = os.path.join(workspace_dir, 'intermediate')
    if not os.path.exists(intermediate_dir):
        os.makedirs(intermediate_dir)

    normalized_dir = os.path.join(intermediate_dir, 'normalized')
    if not os.path.exists(normalized_dir):
        os.makedirs(normalized_dir)

    aligned_dir = os.path.join(intermediate_dir, 'aligned')
    if not os.path.exists(aligned_dir):
        os.makedirs(aligned_dir)

    # normalize all raster-based inputs within AOI
    base_raster_path_list = []
    aligned_raster_path_list = []
    for new_input in raster_input_dict:
        value_raster_path = raster_input_dict[new_input]['path']
        try:
            weight = raster_input_dict[new_input]['weight']
        except KeyError:
            weight = 1
        bn = os.path.basename(value_raster_path)
        normalized_path = os.path.join(normalized_dir, bn)
        aligned_path = os.path.join(aligned_dir, bn)
        base_raster_path_list.append(normalized_path)
        aligned_raster_path_list.append(aligned_path)
        if not os.path.exists(normalized_path):
            with tempfile.NamedTemporaryFile(
                    prefix='mask_raster',
                    delete=False,
                    suffix='.tif',
                    dir=normalized_dir) as clipped_raster_file:
                clipped_raster_path = clipped_raster_file.name
            pygeoprocessing.mask_raster((value_raster_path, 1), aoi_path,
                                        clipped_raster_path)
            normalize(clipped_raster_path, normalized_path, aoi_path, weight)
            os.remove(clipped_raster_path)

    # align and resample normalized rasters, using minimum pixel size of inputs
    pixel_size_list = []
    for new_input in raster_input_dict:
        value_raster_path = raster_input_dict[new_input]['path']
        raster_info = pygeoprocessing.get_raster_info(value_raster_path)
        pixel_size_list.append(raster_info['pixel_size'])
    target_pixel_size = min(pixel_size_list)
    min_pixel_index = pixel_size_list.index(min(pixel_size_list))

    if not all([os.path.exists(f) for f in aligned_raster_path_list]):
        pygeoprocessing.align_and_resize_raster_stack(
            base_raster_path_list,
            aligned_raster_path_list, ['near'] * len(base_raster_path_list),
            target_pixel_size,
            'intersection',
            raster_align_index=min_pixel_index)

    # rasterize polygon inputs
    template_raster_path = aligned_raster_path_list[0]
    if polygon_input_list:
        for vec_path in polygon_input_list:
            target_raster_path = os.path.join(
                aligned_dir, '{}.tif'.format(os.path.basename(vec_path)[:-4]))
            aligned_raster_path_list.append(target_raster_path)
            if not os.path.exists(target_raster_path):
                pygeoprocessing.new_raster_from_base(
                    template_raster_path,
                    target_raster_path,
                    gdal.GDT_Int16, [_TARGET_NODATA],
                    fill_value_list=[_TARGET_NODATA])
                pygeoprocessing.rasterize(vec_path,
                                          target_raster_path,
                                          burn_values=[100])

    # add together
    raster_list_sum(aligned_raster_path_list,
                    _TARGET_NODATA,
                    index_path,
                    _TARGET_NODATA,
                    nodata_remove=True)
示例#13
0
def gap_data_workflow():
    """Process GAP data for Alaska subsistence species.

    Unzip folders downloaded from the Alaska GAP analysis project. Within
    species groups, sum up the distribution rasters for individual species.
    Clip the sum of the species distribution rasters inside the species group
    to the study area boundary.

    Returns:
        None

    """
    # species groups
    spp_group_dict = {
        'Fowl': [
            'AmericanWigeon_BreedingDistribution',
            'ArcticTern_BreedingDistribution',
            'BlueWingedTeal_BreedingDistribution',
            'Brant_BreedingDistribution', 'Bufflehead_BreedingDistribution',
            'Canvasback_BreedingDistribution',
            'CommonGoldeneye_BreedingDistribution',
            'CommonMerganser_BreedingDistribution',
            'CommonSnipe_BreedingDistribution', 'Gadwall_AnnualDistribution',
            'GreenWingedTeal_BreedingDistribution',
            'HarlequinDuck_BreedingDistribution',
            'LeastAuklet_BreedingDistribution',
            'LongTailedDuck_BreedingDistribution',
            'Mallard_BreedingDistribution',
            'MarbledMurrelet_BreedingDistribution',
            'NorthernPintail_BreedingDistribution',
            'NorthernShoveler_BreedingDistribution',
            'RedBreastedMerganser_BreedingDistribution',
            'RockPtarmigan_AnnualDistribution',
            'SandhillCrane_BreedingDistribution',
            'SharpTailedGrouse_AnnualDistribution',
            'SnowGoose_BreedingDistribution',
            'SpruceGrouse_AnnualDistribution',
            'SurfScoter_BreedingDistribution',
            'TundraSwan_BreedingDistribution',
            'WillowPtarmigan_AnnualDistribution'
        ],
        'Furbearers': [
            'AmericanBeaver_AnnualDistribution',
            'AmericanMarten_AnnualDistribution',
            'AmericanMink_AnnualDistribution',
            'CanadianLynx_AnnualDistribution', 'Ermine_AnnualDistribution',
            'LeastWeasel_AnnualDistribution', 'Muskrat_AnnualDistribution',
            'NorthAmericanPorcupine_AnnualDistribution',
            'RedFox_AnnualDistribution', 'SnowshoeHare_AnnualDistribution',
            'Wolverine_AnnualDistribution', 'Woodchuck_AnnualDistribution'
        ],
        'Large_Land_Mammals': [
            'AmericanBlackBear_AnnualDistribution',
            'BrownBear_AnnualDistribution', 'Coyote_AnnualDistribution',
            'DallsSheep_AnnualDistribution', 'Moose_AnnualDistribution',
            'MountainGoat_AnnualDistribution', 'Muskox_AnnualDistribution',
            'Wolf_AnnualDistribution'
        ],
    }
    extract_folder = "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Unzip"
    destination_folder = "E:/Datasets/AK_GAP_Analysis/Other_Subsistence_Spp/Processed"

    for sp_key in spp_group_dict:
        print("** working on species group {} **".format(sp_key))
        with tempfile.TemporaryDirectory(prefix=sp_key,
                                         dir=extract_folder) as temp_spp_dir:
            for sp in spp_group_dict[sp_key]:
                zip_path = os.path.join(_GAP_FOLDER, '{}.zip'.format(sp))
                with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                    zip_ref.extractall(extract_folder)

            print("testing nodata values")
            raster_list = [
                os.path.join(extract_folder, '{}.img'.format(sp))
                for sp in spp_group_dict[sp_key]
            ]
            input_nodata = pygeoprocessing.get_raster_info(
                raster_list[0])['nodata'][0]
            pixel_size = pygeoprocessing.get_raster_info(
                raster_list[0])['pixel_size']
            for raster_path in raster_list:
                test_nodata = pygeoprocessing.get_raster_info(
                    raster_path)['nodata'][0]
                if test_nodata != input_nodata:
                    reclassify_nodata(raster_path, input_nodata)

            print("aligning input rasters")
            aligned_path_list = [
                os.path.join(temp_spp_dir, '{}.tif'.format(sp))
                for sp in spp_group_dict[sp_key]
            ]
            pygeoprocessing.align_and_resize_raster_stack(
                raster_list, aligned_path_list, ['near'] * len(raster_list),
                pixel_size, 'union')

            print("calculating sum")
            intermediate_path = os.path.join(temp_spp_dir,
                                             '{}.tif'.format(sp_key))
            raster_list_sum(aligned_path_list,
                            input_nodata,
                            intermediate_path,
                            _TARGET_NODATA,
                            _TARGET_DATATYPE,
                            nodata_remove=True)

            print("clipping to region")
            target_path = os.path.join(destination_folder,
                                       '{}.tif'.format(sp_key))
            pygeoprocessing.align_and_resize_raster_stack(
                [intermediate_path], [target_path], ['near'],
                pixel_size,
                'intersection',
                base_vector_path_list=[_BOUNDARY_PATH],
                raster_align_index=0,
                vector_mask_options={'mask_vector_path': _BOUNDARY_PATH})
示例#14
0
def align_inputs(align_dir):
    """Align all inputs to the threat index raster.

    Align all inputs to the threat index raster, storing aligned rasters in
    `align_dir`. Reclassify nodata areas to _TARGET_NODATA.

    Return:
        a dictionary where keys identify inputs and values are paths to
            aligned rasters for each input

    """
    template_raster_info = pygeoprocessing.get_raster_info(_TEMPLATE_PATH)
    base_path_id_map = {
        'low_lying':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/commondata/raster_data9/AK_Low_Lying_Areas_v1.tif",
        'erodibility':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/AK_Erosion_STA_Add_reclass_revised.tif",
        'permafrost':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/AK_Permafrost_STA_Add_revised.tif",
        'tsunami':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/commondata/raster_data7/AK_Tsunami_v1.tif",
        'floodprone_no_sta':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/commondata/raster_data3/AK_Floodprone_Inputs_v1.tif",
        # revised to include only communities not in group 3 according to STA
        'sta':
        "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9/commondata/floodprone/AK_STA_Flooding_revised.tif",
        'dem':
        "D:/Current/Alaska/Data/ifsar_dem_resample_30m/ifsar_30m_proj.tif",  # ifsar DEM resampled to 30 m
    }
    base_input_path_list = [
        base_path_id_map[k] for k in sorted(base_path_id_map.keys())
    ]
    base_input_path_list.insert(0, _TEMPLATE_PATH)
    aligned_inputs = dict([
        (key, os.path.join(align_dir, 'aligned_%s' % os.path.basename(path)))
        for key, path in base_path_id_map.items()
    ])
    aligned_path_list = [
        aligned_inputs[k] for k in sorted(aligned_inputs.keys())
    ]
    aligned_path_list.insert(0, os.path.join(align_dir, 'template.tif'))

    # ensure all inputs match projection of template raster
    for input_path in base_input_path_list:
        input_wkt = pygeoprocessing.get_raster_info(
            input_path)['projection_wkt']
        if input_wkt != template_raster_info['projection_wkt']:
            raise ValueError("Inputs must share projection")

    if not all([os.path.isfile(p) for p in aligned_path_list]):
        print("\nAligning inputs .....")
        pygeoprocessing.align_and_resize_raster_stack(
            base_input_path_list,
            aligned_path_list, ['near'] * len(aligned_path_list),
            template_raster_info['pixel_size'],
            bounding_box_mode=template_raster_info['bounding_box'],
            raster_align_index=0)
        for key in [
                'floodprone_no_sta', 'low_lying', 'erodibility', 'permafrost',
                'tsunami'
        ]:
            print("\nReclassifying nodata .....")
            reclassify_nodata(aligned_inputs[key], _TARGET_NODATA)
    return aligned_inputs
    ndvi = (nir - red) / (nir + red)
    ndvi[qa == 1] = NODATA
    return ndvi


# # calculate change in NDVI between 2013 and 2016!

import pygeoprocessing

aligned_files = [
    'al_red_2013.tif', 'al_nir_2013.tif', 'al_qa_2013.tif', 'al_red_2016.tif',
    'al_nir_2016.tif', 'al_qa_2016.tif'
]

pygeoprocessing.align_and_resize_raster_stack(L8_2013_FILES + L8_2016_FILES,
                                              aligned_files, ['nearest'] * 6,
                                              (120, -120), 'intersection')


def diff_ndvi(red_2013, nir_2013, qa_2013, red_2016, nir_2016, qa_2016):
    valid_pixels = (qa_2013 != 1) & (qa_2016 != 1)

    def _calc_ndvi(red, nir):
        """Calculate NDVI from red and near-infrared landsat bands."""
        red = red[valid_pixels].astype(numpy.float32)
        nir = nir[valid_pixels].astype(numpy.float32)

        ndvi = (nir - red) / (nir + red)
        return ndvi

    ndvi_2013 = _calc_ndvi(red_2013, nir_2013)
示例#16
0
def _preprocess_rasters(base_raster_path_list,
                        churn_dir,
                        target_processed_raster_list_file_path,
                        target_projection_wkt=None,
                        target_pixel_size=None,
                        resample_method='near',
                        bounding_box_mode='intersection'):
    """Process base raster path list so it can be used in raster calcs.

    Parameters:
        base_raster_path_list (list): list of arbitrary rasters.
        churn_dir (str): path to a directory that can be used to write
            temporary files that could be used later for
            caching/reproducibility.
        target_processed_raster_list_file_path (str): path to a pickle file
            for processed output list that contains the list of raster paths
            that can be used in raster calcs, note this may be the original
            list of rasters or they may have been created by this call.
        target_projection_wkt (string): if not None, this is the desired
            projection of the target rasters in Well Known Text format. If
            None and all symbol rasters have the same projection, that
            projection will be used. Otherwise a ValueError is raised
            indicating that the rasters are in different projections with
            no guidance to resolve.
        target_pixel_size (tuple): It not None, desired output target pixel
            size. A ValueError is raised if symbol rasters are different
            pixel sizes and this value is None.
        resample_method (str): if the symbol rasters need to be resized for
            any reason, this method is used. The value can be one of:
            "near|bilinear|cubic|cubicspline|lanczos|average|mode|max".
        bounding_box_mode (string): one of "union", "intersection", or
            a sequence of floats of the form [minx, miny, maxx, maxy] in the
            target projection coordinate system.  Depending
            on the value, output extents are defined as the union,
            intersection, or the explicit bounding box.

    Return:
        ``None``
    """
    resample_inputs = False

    base_info_list = [
        pygeoprocessing.get_raster_info(path) for path in base_raster_path_list
    ]
    base_projection_list = [info['projection_wkt'] for info in base_info_list]
    base_pixel_list = [info['pixel_size'] for info in base_info_list]
    base_raster_shape_list = [info['raster_size'] for info in base_info_list]

    if target_pixel_size is not None:
        same_pixel_sizes = True
        pixel_sizes = [info['pixel_size']
                       for info in base_info_list] + [target_pixel_size]
        for pixel_size_a in pixel_sizes[0:-1]:
            for pixel_size_b in pixel_sizes[1:]:
                if not all(numpy.isclose(pixel_size_a, pixel_size_b)):
                    same_pixel_sizes = False
    else:
        same_pixel_sizes = True

    same_raster_sizes = (len(
        set([info['raster_size'] for info in base_info_list])) == 1)

    if (len(set(base_raster_shape_list)) == 1 and same_pixel_sizes
            and resample_method != 'near'):
        raise ValueError(
            f"there is a requested resample method of '{resample_method}' "
            "but all the pixel sizes are the same, you probably meant to "
            "leave off 'resample_method' as an argument.")

    if len(set(base_projection_list)) != 1:
        if target_projection_wkt is None:
            raise ValueError(
                "Projections of base rasters are not equal and there "
                "is no `target_projection_wkt` defined.\nprojection list: %s",
                str(base_projection_list))
        else:
            LOGGER.info('projections are different')
            resample_inputs = True

    if len(set(base_pixel_list)) != 1:
        if target_pixel_size is None:
            raise ValueError(
                "base and reference pixel sizes are different and no target "
                "is defined.\nbase pixel sizes: %s", str(base_pixel_list))
        LOGGER.info('pixel sizes are different')
        resample_inputs = True
    else:
        # else use the pixel size they all have
        target_pixel_size = base_pixel_list[0]

    if len(set(base_raster_shape_list)) != 1:
        LOGGER.info('raster shapes different')
        resample_inputs = True

    if resample_inputs:
        LOGGER.info("need to align/reproject inputs to apply calculation")
        try:
            os.makedirs(churn_dir)
        except OSError:
            LOGGER.debug('churn dir %s already exists', churn_dir)

        operand_raster_path_list = [
            os.path.join(churn_dir, os.path.basename(path))
            for path in base_raster_path_list
        ]
        if not same_pixel_sizes or not same_raster_sizes:
            pygeoprocessing.align_and_resize_raster_stack(
                base_raster_path_list,
                operand_raster_path_list,
                [resample_method] * len(base_raster_path_list),
                target_pixel_size,
                bounding_box_mode,
                target_projection_wkt=target_projection_wkt,
            )
        else:
            # no need to realign, just hard link it
            for base_path, target_path in zip(base_raster_path_list,
                                              operand_raster_path_list):
                if os.path.exists(target_path):
                    os.remove(target_path)
                os.link(base_path, target_path)
        result = operand_raster_path_list
    else:
        result = base_raster_path_list
    with open(target_processed_raster_list_file_path, 'wb') as result_file:
        pickle.dump(result, result_file)
示例#17
0
    parser.add_argument('landcover_raster', help='Paths to landcover raster.')
    parser.add_argument('other_raster',
                        help='Path to another raster to calculate stats over.')
    args = parser.parse_args()

    working_dir = tempfile.mkdtemp("lulc_raster_stats_workspace", dir='.')

    base_raster_path_list = [args.landcover_raster, args.other_raster]
    aligned_rater_path_list = [
        os.path.join(working_dir, os.path.basename(path))
        for path in base_raster_path_list
    ]
    other_raster_info = pygeoprocessing.get_raster_info(args.other_raster)
    pygeoprocessing.align_and_resize_raster_stack(
        base_raster_path_list,
        aligned_rater_path_list, ['mode', 'near'],
        other_raster_info['pixel_size'],
        'intersection',
        target_sr_wkt=other_raster_info['projection'])
    lulc_nodata = pygeoprocessing.get_raster_info(
        args.landcover_raster)['nodata']
    unique_values = get_unique_values(args.landcover_raster)
    LOGGER.debug(unique_values)
    stats_table = open('stats_table.csv', 'w')
    stats_table.write('lucode,min,max,mean,stdev\n')
    for mask_code in sorted(unique_values):
        mask_raster_path = os.path.join(working_dir, '%d.tif' % mask_code)
        pygeoprocessing.raster_calculator(
            [(aligned_rater_path_list[0], 1), (aligned_rater_path_list[1], 1),
             (mask_code, 'raw'),
             (other_raster_info['nodata'][0], 'raw')], mask_out_op,
            mask_raster_path, gdal.GDT_Float32, other_raster_info['nodata'][0])
示例#18
0
def extract_feature_checked(align_lock, vector_path, field_name, field_value,
                            base_raster_path, target_vector_path,
                            target_raster_path):
    """Extract single feature into separate vector and check for no error.

    Do not do a transform since it's all wgs84.

    Parameters:
        align_lock (multiprocessing.Lock): lock to only allow one align at
            a time.
        vector_path (str): base vector in WGS84 coordinates.
        field_name (str): field to search for
        field_value (str): field value to isolate
        base_raster_path (str): path to raster to clip out.
        target_vector_path (str): path to new GPKG vector that will
            contain only that feature.
        target_raster_path (str): path to clipped out raster

    Returns:
        True if no error, False otherwise.

    """
    attempt_number = 0
    if not os.path.exists(base_raster_path):
        raise ValueError("%s does not exist" % base_raster_path)
    while True:
        try:
            LOGGER.debug('opening vector: %s', vector_path)
            base_vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR)
            LOGGER.debug('getting layer')
            base_layer = base_vector.GetLayer()
            feature = None
            LOGGER.debug('iterating over features')
            for base_feature in base_layer:
                if base_feature.GetField(field_name) == field_value:
                    feature = base_feature
                    break
            LOGGER.debug('extracting feature %s', feature.GetField(field_name))

            geom = feature.GetGeometryRef()
            base_srs = base_layer.GetSpatialRef()

            base_raster_info = pygeoprocessing.get_raster_info(
                base_raster_path)

            base_layer = None
            base_vector = None

            # create a new shapefile
            if os.path.exists(target_vector_path):
                os.remove(target_vector_path)
            driver = ogr.GetDriverByName('GPKG')
            target_vector = driver.CreateDataSource(target_vector_path)
            target_layer = target_vector.CreateLayer(
                os.path.splitext(os.path.basename(target_vector_path))[0],
                base_srs, ogr.wkbMultiPolygon)
            layer_defn = target_layer.GetLayerDefn()
            feature_geometry = geom.Clone()
            base_feature = ogr.Feature(layer_defn)
            base_feature.SetGeometry(feature_geometry)
            target_layer.CreateFeature(base_feature)
            target_layer.SyncToDisk()
            geom = None
            feature_geometry = None
            base_feature = None
            target_layer = None
            target_vector = None

            with align_lock:
                pygeoprocessing.align_and_resize_raster_stack(
                    [base_raster_path], [target_raster_path], ['near'],
                    base_raster_info['pixel_size'],
                    'intersection',
                    base_vector_path_list=[target_vector_path],
                    vector_mask_options={
                        'mask_vector_path': target_vector_path,
                    })
            return True
        except Exception:
            LOGGER.exception('exception when extracting %s %s %s' %
                             (field_name, field_value, vector_path))
            attempt_number += 1
            if attempt_number == 20:
                return False
            time.sleep(min(1, 0.1 * 2**attempt_number))
示例#19
0
def mosaic_zonal_mean_with_surface():
    """Mosaic community footprints with continuous index.

    Where community footprints have values >0, they should supersede values in
    the continuous index.

    Returns:
        None

    """
    def mosaic_op(footprint_ar, index_ar):
        """Mosaic footprints into continuous index."""
        mosaic_mask = (footprint_ar > 0)
        result = numpy.copy(index_ar)
        result[mosaic_mask] = footprint_ar[mosaic_mask]
        return result

    # outer_dir = "D:/Current/Packages/AK_Threat_Inputs_012721_9522b9"
    outer_dir = "D:/Current/Packages/AK_Asset_Inputs_012721_2fd5ef"

    # align index and community footprints together
    align_dir = tempfile.mkdtemp()
    base_path_id_map = {
        # 'footprints': os.path.join(
        #     outer_dir, "AK_Communities_Subtract3_Threat_mean_int.tif"),
        # 'threat_index': os.path.join(
        #     outer_dir, "AK_Threat_Index_10class_revised.tif"),
        'footprints':
        os.path.join(outer_dir, 'AK_STA_Communities_Exposure_mean_int.tif'),
        'index':
        os.path.join(outer_dir, 'AK_Exposure_Index_10class_revised1.tif'),
    }
    base_input_path_list = [
        base_path_id_map[k] for k in sorted(base_path_id_map.keys())
    ]
    aligned_inputs = dict([
        (key, os.path.join(align_dir, 'aligned_%s' % os.path.basename(path)))
        for key, path in base_path_id_map.items()
    ])
    aligned_path_list = [
        aligned_inputs[k] for k in sorted(aligned_inputs.keys())
    ]

    template_raster_info = pygeoprocessing.get_raster_info(
        base_path_id_map['index'])
    pygeoprocessing.align_and_resize_raster_stack(
        base_input_path_list,
        aligned_path_list, ['near'] * len(aligned_path_list),
        template_raster_info['pixel_size'],
        bounding_box_mode=template_raster_info['bounding_box'])

    # mosaic community footprints into index raster
    # target_path = os.path.join(
    #     outer_dir, 'AK_Threat_Index_10class_revised_footprint_mean.tif')
    target_path = os.path.join(
        outer_dir, 'AK_Exposure_Index_10class_revised1_footprint_mean.tif')
    pygeoprocessing.raster_calculator(
        [(path, 1)
         for path in [aligned_inputs['footprints'], aligned_inputs['index']]],
        mosaic_op, target_path, gdal.GDT_Byte,
        template_raster_info['nodata'][0])

    # clean up
    shutil.rmtree(align_dir)
示例#20
0
def _collapse_infrastructure_layers(infrastructure_dir, base_raster_path,
                                    infrastructure_path, tmp_dir):
    """Collapse all GIS infrastructure layers to one raster.

    Gathers all the GIS layers in the given directory and collapses them
    to a single byte raster mask where 1 indicates a pixel overlapping with
    one of the original infrastructure layers, 0 does not, and nodata
    indicates a region that has no layers that overlap but are still contained
    in the bounding box.

    Parameters:
        infrastructure_dir (string): path to a directory containing maps of
            either gdal compatible rasters or OGR compatible shapefiles.
        base_raster_path (string): a path to a file that has the dimensions and
            projection of the desired output infrastructure file.
        infrastructure_path (string): (output) path to a file that will be a
            byte raster with 1s everywhere there was a GIS layer present in
            the GIS layers in `infrastructure_dir`.
        tmp_dir (string): path to folder to store inetermediate datasets such
            as aligned versions of infrastructure rasters.

    Returns:
        None

    """
    # load the infrastructure layers from disk
    infrastructure_filenames = []
    infrastructure_nodata_list = []
    infrastructure_tmp_filenames = []
    # in case we need to rasterize some vector inputs:
    tmp_rasterize_dir = os.path.join(tmp_dir, 'rasterized')
    for root_directory, _, filename_list in os.walk(infrastructure_dir):
        for filename in filename_list:
            if filename.lower().endswith(".tif"):
                infrastructure_filenames.append(
                    os.path.join(root_directory, filename))
                infrastructure_nodata_list.append(
                    pygeoprocessing.get_raster_info(
                        infrastructure_filenames[-1])['nodata'][0])

            if filename.lower().endswith(".shp"):
                utils.make_directories([tmp_rasterize_dir])
                file_handle, tmp_raster_path = tempfile.mkstemp(
                    dir=tmp_rasterize_dir, suffix='.tif')
                os.close(file_handle)

                pygeoprocessing.new_raster_from_base(base_raster_path,
                                                     tmp_raster_path,
                                                     gdal.GDT_Int32, [-1.0],
                                                     fill_value_list=[0])
                pygeoprocessing.rasterize(os.path.join(root_directory,
                                                       filename),
                                          tmp_raster_path,
                                          burn_values=[1],
                                          option_list=["ALL_TOUCHED=TRUE"])

                infrastructure_filenames.append(tmp_raster_path)
                infrastructure_tmp_filenames.append(tmp_raster_path)
                infrastructure_nodata_list.append(
                    pygeoprocessing.get_raster_info(
                        infrastructure_filenames[-1])['nodata'][0])

    if len(infrastructure_filenames) == 0:
        raise ValueError(
            "infrastructure directory didn't have any rasters or "
            "vectors at %s", infrastructure_dir)

    infrastructure_nodata = -1

    def _collapse_infrastructure_op(*infrastructure_array_list):
        """For each pixel, create mask 1 if all valid, else set to nodata."""
        nodata_mask = (numpy.isclose(infrastructure_array_list[0],
                                     infrastructure_nodata_list[0]))
        infrastructure_result = infrastructure_array_list[0] > 0
        for index in range(1, len(infrastructure_array_list)):
            current_nodata = numpy.isclose(infrastructure_array_list[index],
                                           infrastructure_nodata_list[index])

            infrastructure_result = (infrastructure_result | (
                (infrastructure_array_list[index] > 0) & ~current_nodata))

            nodata_mask = (nodata_mask & current_nodata)

        infrastructure_result[nodata_mask] = infrastructure_nodata
        return infrastructure_result

    LOGGER.info('collapse infrastructure into one raster')
    aligned_infrastructure_target_list = [
        os.path.join(tmp_dir, os.path.basename(x))
        for x in infrastructure_filenames
    ]
    base_raster_info = pygeoprocessing.get_raster_info(base_raster_path)

    pygeoprocessing.align_and_resize_raster_stack(
        infrastructure_filenames, aligned_infrastructure_target_list,
        ['near'] * len(infrastructure_filenames),
        base_raster_info['pixel_size'], base_raster_info['bounding_box'])
    infra_filename_band_list = [(x, 1)
                                for x in aligned_infrastructure_target_list]
    pygeoprocessing.raster_calculator(infra_filename_band_list,
                                      _collapse_infrastructure_op,
                                      infrastructure_path, gdal.GDT_Byte,
                                      infrastructure_nodata)

    # clean up the temporary filenames
    if os.path.isdir(tmp_rasterize_dir):
        for filename in infrastructure_tmp_filenames:
            os.remove(filename)
        os.rmdir(tmp_rasterize_dir)
示例#21
0
def _build_file_registry(C_prior_raster, transition_rasters, snapshot_years,
                         results_suffix, do_economic_analysis, outputs_dir,
                         intermediate_dir):
    """Build an output file registry.

    Args:
        C_prior_raster (str): template raster
        transition_rasters (list): A list of GDAL-supported rasters
            representing representing the landcover at transition years.  May
            be an empty list.
        snapshot_years (list): years of provided snapshots to help with
            filenames
        results_suffix (str): the results file suffix
        do_economic_analysis (bool): whether or not to create a NPV raster
        outputs_dir (str): path to output directory

    Returns:
        File_Registry (dict): map to collections of output files.
    """
    template_raster = C_prior_raster

    T_s_rasters = []
    A_r_rasters = []
    E_r_rasters = []
    N_r_rasters = []

    for snapshot_idx in xrange(len(snapshot_years)):
        snapshot_year = snapshot_years[snapshot_idx]
        T_s_rasters.append(_OUTPUT['carbon_stock'] % (snapshot_year))
        if snapshot_idx < len(snapshot_years)-1:
            next_snapshot_year = snapshot_years[snapshot_idx + 1]
            A_r_rasters.append(_OUTPUT['carbon_accumulation'] % (
                snapshot_year, next_snapshot_year))
            E_r_rasters.append(_OUTPUT['cabon_emissions'] % (
                snapshot_year, next_snapshot_year))
            N_r_rasters.append(_OUTPUT['carbon_net_sequestration'] % (
                snapshot_year, next_snapshot_year))

    # Total Net Sequestration
    N_total_raster = 'total_net_carbon_sequestration.tif'

    raster_registry_dict = {
        'T_s_rasters': T_s_rasters,
        'A_r_rasters': A_r_rasters,
        'E_r_rasters': E_r_rasters,
        'N_r_rasters': N_r_rasters,
        'N_total_raster': N_total_raster,
    }

    # Net Sequestration from Base Year to Analysis Year
    if do_economic_analysis:
        raster_registry_dict['NPV_raster'] = 'net_present_value.tif'

    file_registry = utils.build_file_registry(
        [(raster_registry_dict, outputs_dir),
         (_INTERMEDIATE, intermediate_dir)], results_suffix)

    LOGGER.info('Aligning and clipping incoming datasets')
    incoming_rasters = [C_prior_raster] + transition_rasters
    # If an analysis year is defined, it's appended to the snapshot_years list,
    # but won't have a corresponding raster.
    aligned_lulc_files = [file_registry['aligned_lulc_template'] % year
                          for year in snapshot_years[:len(incoming_rasters)]]
    baseline_pixel_size = pygeoprocessing.get_raster_info(
        C_prior_raster)['pixel_size']

    pygeoprocessing.align_and_resize_raster_stack(
        [C_prior_raster] + transition_rasters,
        aligned_lulc_files,
        ['near'] * len(aligned_lulc_files),
        baseline_pixel_size,
        'intersection')

    raster_lists = ['T_s_rasters', 'A_r_rasters', 'E_r_rasters', 'N_r_rasters']
    num_temporal_rasters = sum(
        [len(file_registry[key]) for key in raster_lists])
    LOGGER.info('Creating %s temporal rasters', num_temporal_rasters)
    for index, raster_filepath in enumerate(itertools.chain(
            *[file_registry[key] for key in raster_lists])):
        LOGGER.info('Setting up temporal raster %s of %s at %s', index+1,
                    num_temporal_rasters, os.path.basename(raster_filepath))
        pygeoprocessing.new_raster_from_base(
            template_raster,
            raster_filepath,
            gdal.GDT_Float32,
            [NODATA_FLOAT])

    for raster_key in ['N_total_raster', 'NPV_raster']:
        try:
            filepath = file_registry[raster_key]
            LOGGER.info('Setting up valuation raster %s',
                        os.path.basename(filepath))
            pygeoprocessing.new_raster_from_base(
                template_raster,
                filepath,
                gdal.GDT_Float32,
                [NODATA_FLOAT])
        except KeyError:
            # KeyError raised when ``raster_key`` is not in the file registry.
            pass

    return file_registry
示例#22
0
def execute(args):
    """Sediment Delivery Ratio.

    This function calculates the sediment export and retention of a landscape
    using the sediment delivery ratio model described in the InVEST user's
    guide.

    Parameters:
        args['workspace_dir'] (string): output directory for intermediate,
            temporary, and final files
        args['results_suffix'] (string): (optional) string to append to any
            output file names
        args['dem_path'] (string): path to a digital elevation raster
        args['erosivity_path'] (string): path to rainfall erosivity index
            raster
        args['erodibility_path'] (string): a path to soil erodibility raster
        args['lulc_path'] (string): path to land use/land cover raster
        args['watersheds_path'] (string): path to vector of the watersheds
        args['biophysical_table_path'] (string): path to CSV file with
            biophysical information of each land use classes.  contain the
            fields 'usle_c' and 'usle_p'
        args['threshold_flow_accumulation'] (number): number of upstream pixels
            on the dem to threshold to a stream.
        args['k_param'] (number): k calibration parameter
        args['sdr_max'] (number): max value the SDR
        args['ic_0_param'] (number): ic_0 calibration parameter
        args['drainage_path'] (string): (optional) path to drainage raster that
            is used to add additional drainage areas to the internally
            calculated stream layer

    Returns:
        None.
    """
    file_suffix = utils.make_suffix_string(args, 'results_suffix')

    biophysical_table = utils.build_lookup_from_csv(
        args['biophysical_table_path'], 'lucode')

    # Test to see if c or p values are outside of 0..1
    for table_key in ['usle_c', 'usle_p']:
        for (lulc_code, table) in biophysical_table.iteritems():
            try:
                float_value = float(table[table_key])
                if float_value < 0 or float_value > 1:
                    raise ValueError(
                        'Value should be within range 0..1 offending value '
                        'table %s, lulc_code %s, value %s' % (
                            table_key, str(lulc_code), str(float_value)))
            except ValueError:
                raise ValueError(
                    'Value is not a floating point value within range 0..1 '
                    'offending value table %s, lulc_code %s, value %s' % (
                        table_key, str(lulc_code), table[table_key]))

    intermediate_output_dir = os.path.join(
        args['workspace_dir'], 'intermediate_outputs')
    output_dir = os.path.join(args['workspace_dir'])
    utils.make_directories([output_dir, intermediate_output_dir])

    f_reg = utils.build_file_registry(
        [(_OUTPUT_BASE_FILES, output_dir),
         (_INTERMEDIATE_BASE_FILES, intermediate_output_dir),
         (_TMP_BASE_FILES, output_dir)], file_suffix)

    base_list = []
    aligned_list = []
    for file_key in ['dem', 'lulc', 'erosivity', 'erodibility']:
        base_list.append(args[file_key + "_path"])
        aligned_list.append(f_reg["aligned_" + file_key + "_path"])

    drainage_present = False
    if 'drainage_path' in args and args['drainage_path'] != '':
        drainage_present = True
        base_list.append(args['drainage_path'])
        aligned_list.append(f_reg['aligned_drainage_path'])

    dem_pixel_size = pygeoprocessing.get_raster_info(
        args['dem_path'])['pixel_size']
    pygeoprocessing.align_and_resize_raster_stack(
        base_list, aligned_list, ['nearest'] * len(base_list),
        dem_pixel_size, 'intersection',
        base_vector_path_list=[args['watersheds_path']],
        raster_align_index=0)

    LOGGER.info("calculating slope")
    natcap.invest.pygeoprocessing_0_3_3.calculate_slope(
        f_reg['aligned_dem_path'], f_reg['slope_path'])
    _threshold_slope(f_reg['slope_path'], f_reg['thresholded_slope_path'])

    LOGGER.info("calculating flow direction")
    natcap.invest.pygeoprocessing_0_3_3.routing.flow_direction_d_inf(
        f_reg['aligned_dem_path'], f_reg['flow_direction_path'])

    LOGGER.info("calculating flow accumulation")
    natcap.invest.pygeoprocessing_0_3_3.routing.flow_accumulation(
        f_reg['flow_direction_path'], f_reg['aligned_dem_path'],
        f_reg['flow_accumulation_path'])

    LOGGER.info('calculate ls term')

    _calculate_ls_factor(
        f_reg['flow_accumulation_path'], f_reg['slope_path'],
        f_reg['flow_direction_path'], f_reg['ls_path'])

    LOGGER.info("classifying streams from flow accumulation raster")
    natcap.invest.pygeoprocessing_0_3_3.routing.stream_threshold(
        f_reg['flow_accumulation_path'],
        float(args['threshold_flow_accumulation']),
        f_reg['stream_path'])

    if drainage_present:
        _add_drainage(
            f_reg['stream_path'],
            f_reg['aligned_drainage_path'],
            f_reg['stream_and_drainage_path'])
        f_reg['drainage_raster_path'] = (
            f_reg['stream_and_drainage_path'])
    else:
        f_reg['drainage_raster_path'] = (
            f_reg['stream_path'])

    LOGGER.info('calculate per pixel W')
    _calculate_w(
        biophysical_table, f_reg['aligned_lulc_path'], f_reg['w_path'],
        f_reg['thresholded_w_path'])

    LOGGER.info('calculate CP raster')
    _calculate_cp(
        biophysical_table, f_reg['aligned_lulc_path'],
        f_reg['cp_factor_path'])

    LOGGER.info('calculating RKLS')
    _calculate_rkls(*[f_reg[key] for key in [
        'ls_path', 'aligned_erosivity_path', 'aligned_erodibility_path',
        'drainage_raster_path', 'rkls_path']])

    LOGGER.info('calculating USLE')
    _calculate_usle(*[f_reg[key] for key in [
        'rkls_path', 'cp_factor_path', 'drainage_raster_path', 'usle_path']])

    LOGGER.info('calculating w_bar')
    for factor_path, accumulation_path, out_bar_path in [
            (f_reg['thresholded_w_path'], f_reg['w_accumulation_path'],
             f_reg['w_bar_path']),
            (f_reg['thresholded_slope_path'], f_reg['s_accumulation_path'],
             f_reg['s_bar_path'])]:
        _calculate_bar_factor(
            f_reg['aligned_dem_path'], factor_path,
            f_reg['flow_accumulation_path'], f_reg['flow_direction_path'],
            f_reg['zero_absorption_source_path'], f_reg['loss_path'],
            accumulation_path, out_bar_path)

    LOGGER.info('calculating d_up')
    _calculate_d_up(
        *[f_reg[key] for key in [
            'w_bar_path', 's_bar_path', 'flow_accumulation_path',
            'd_up_path']])

    LOGGER.info('calculate WS factor')
    _calculate_inverse_ws_factor(
        f_reg['thresholded_slope_path'], f_reg['thresholded_w_path'],
        f_reg['ws_inverse_path'])

    LOGGER.info('calculating d_dn')
    natcap.invest.pygeoprocessing_0_3_3.routing.routing_core.distance_to_stream(
        f_reg['flow_direction_path'], f_reg['drainage_raster_path'],
        f_reg['d_dn_path'], factor_uri=f_reg['ws_inverse_path'])

    LOGGER.info('calculate ic')
    _calculate_ic(
        f_reg['d_up_path'], f_reg['d_dn_path'], f_reg['ic_path'])

    LOGGER.info('calculate sdr')
    _calculate_sdr(
        float(args['k_param']), float(args['ic_0_param']),
        float(args['sdr_max']), f_reg['ic_path'],
        f_reg['drainage_raster_path'], f_reg['sdr_path'])

    LOGGER.info('calculate sed export')
    _calculate_sed_export(
        f_reg['usle_path'], f_reg['sdr_path'], f_reg['sed_export_path'])

    LOGGER.info('calculate sediment retention index')
    _calculate_sed_retention_index(
        f_reg['rkls_path'], f_reg['usle_path'], f_reg['sdr_path'],
        float(args['sdr_max']), f_reg['sed_retention_index_path'])

    LOGGER.info('calculate sediment retention')
    LOGGER.info('calculate S factor')
    _calculate_inverse_s_factor(
        f_reg['thresholded_slope_path'], f_reg['s_inverse_path'])

    LOGGER.info('calculating d_dn bare soil')
    natcap.invest.pygeoprocessing_0_3_3.routing.routing_core.distance_to_stream(
        f_reg['flow_direction_path'], f_reg['drainage_raster_path'],
        f_reg['d_dn_bare_soil_path'], factor_uri=f_reg['s_inverse_path'])

    LOGGER.info('calculating d_up bare soil')
    _calculate_d_up_bare(
        f_reg['s_bar_path'], f_reg['flow_accumulation_path'],
        f_reg['d_up_bare_soil_path'])

    LOGGER.info('calculate ic')
    _calculate_ic(
        f_reg['d_up_bare_soil_path'], f_reg['d_dn_bare_soil_path'],
        f_reg['ic_bare_soil_path'])

    _calculate_sdr(
        float(args['k_param']), float(args['ic_0_param']),
        float(args['sdr_max']), f_reg['ic_bare_soil_path'],
        f_reg['drainage_raster_path'], f_reg['sdr_bare_soil_path'])

    _calculate_sed_retention(
        f_reg['rkls_path'], f_reg['usle_path'], f_reg['drainage_raster_path'],
        f_reg['sdr_path'], f_reg['sdr_bare_soil_path'],
        f_reg['sed_retention_path'])

    LOGGER.info('generating report')
    _generate_report(
        args['watersheds_path'], f_reg['usle_path'],
        f_reg['sed_export_path'], f_reg['sed_retention_path'],
        f_reg['watershed_results_sdr_path'])

    for tmp_filename_key in _TMP_BASE_FILES:
        if os.path.exists(f_reg[tmp_filename_key]):
            os.remove(f_reg[tmp_filename_key])
示例#23
0
def get_inputs(args):
    """Get Inputs.

    Parameters:
        workspace_dir (str): workspace directory
        results_suffix (str): optional suffix appended to results
        lulc_lookup_uri (str): lulc lookup table filepath
        lulc_transition_matrix_uri (str): lulc transition table filepath
        carbon_pool_initial_uri (str): initial conditions table filepath
        carbon_pool_transient_uri (str): transient conditions table filepath
        lulc_baseline_map_uri (str): baseline map filepath
        lulc_transition_maps_list (list): ordered list of transition map
            filepaths
        lulc_transition_years_list (list): ordered list of transition years
        analysis_year (int): optional final year to extend the analysis beyond
            the last transition year
        do_economic_analysis (bool): whether to run economic component of
            the analysis
        do_price_table (bool): whether to use the price table for the economic
            component of the analysis
        price (float): the price of net sequestered carbon
        inflation_rate (float): the interest rate on the price of carbon
        price_table_uri (str): price table filepath
        discount_rate (float): the discount rate on future valuations of carbon

    Returns:
        d (dict): data dictionary.

    Example Returns:
        d = {
            'do_economic_analysis': <bool>,
            'lulc_to_Sb': <dict>,
            'lulc_to_Ss': <dict>
            'lulc_to_L': <dict>,
            'lulc_to_Yb': <dict>,
            'lulc_to_Ys': <dict>,
            'lulc_to_Hb': <dict>,
            'lulc_to_Hs': <dict>,
            'lulc_trans_to_Db': <dict>,
            'lulc_trans_to_Ds': <dict>,
            'C_r_rasters': <list>,
            'transition_years': <list>,
            'snapshot_years': <list>,
            'timesteps': <int>,
            'transitions': <list>,
            'price_t': <list>,
            'File_Registry': <dict>
        }

    """
    d = {
        'do_economic_analysis': False,
        'lulc_to_Sb': {'lulc': 'biomass'},
        'lulc_to_Ss': {'lulc': 'soil'},
        'lulc_to_L': {'lulc': 'litter'},
        'lulc_to_Yb': {'lulc': 'accum-bio'},
        'lulc_to_Ys': {'lulc': 'accum-soil'},
        'lulc_to_Hb': {'lulc': 'hl-bio'},
        'lulc_to_Hs': {'lulc': 'hl-soil'},
        'lulc_trans_to_Db': {('lulc1', 'lulc2'): 'dist-val'},
        'lulc_trans_to_Ds': {('lulc1', 'lulc2'): 'dist-val'},
        'C_r_rasters': [],
        'transition_years': [],
        'snapshot_years': [],
        'timesteps': None,
        'transitions': None,
        'price_t': None,
    }

    # Directories
    args['results_suffix'] = utils.make_suffix_string(
        args, 'results_suffix')
    outputs_dir = os.path.join(args['workspace_dir'], 'outputs_core')
    intermediate_dir = os.path.join(args['workspace_dir'], 'intermediate')
    utils.make_directories([args['workspace_dir'],
                            outputs_dir,
                            intermediate_dir])

    # Rasters
    try:
        d['transition_years'] = [int(i) for i in
                                 args['lulc_transition_years_list']]
    except KeyError:
        d['transition_years'] = []

    # Comparing the sorted version of this list handles the case where there
    # might not be any transition_years.
    if sorted(d['transition_years']) != d['transition_years']:
        raise ValueError(
            'LULC snapshot years must be provided in chronological order '
            'and in the same order as the LULC snapshot rasters.')

    d['transitions'] = len(d['transition_years']) + 1  # +1 for lulc baseline

    d['snapshot_years'] = [int(args['lulc_baseline_year'])] + d['transition_years'][:]
    if 'analysis_year' in args and args['analysis_year'] not in ['', None]:
        if int(args['analysis_year']) <= d['snapshot_years'][-1]:
            raise ValueError(
                'Analysis year must be greater than last transition year.')
        d['snapshot_years'].append(int(args['analysis_year']))

    d['timesteps'] = d['snapshot_years'][-1] - d['snapshot_years'][0]

    try:
        transition_raster_paths = args['lulc_transition_maps_list']
    except KeyError:
        transition_raster_paths = []

    aligned_baseline_lulc_path = os.path.join(
        intermediate_dir, 'aligned_lulc%s.tif' % args['results_suffix'])
    aligned_transition_raster_paths = [
        os.path.join(intermediate_dir, 'aligned_transition_%s%s.tif' % (
            year, args['results_suffix']))
        for year in d['transition_years']]
    baseline_info = pygeoprocessing.get_raster_info(
        args['lulc_baseline_map_uri'])

    pygeoprocessing.align_and_resize_raster_stack(
        [args['lulc_baseline_map_uri']] + transition_raster_paths,
        [aligned_baseline_lulc_path] + aligned_transition_raster_paths,
        ['near'] * (1 + len(aligned_transition_raster_paths)),
        baseline_info['pixel_size'], 'intersection')

    d['C_prior_raster'] = aligned_baseline_lulc_path
    d['C_r_rasters'] = aligned_transition_raster_paths

    # Reclass Dictionaries
    lulc_lookup_dict = utils.build_lookup_from_csv(
        args['lulc_lookup_uri'], 'lulc-class')
    lulc_to_code_dict = \
        dict((k.lower(), v['code']) for k, v in lulc_lookup_dict.items() if k)
    initial_dict = utils.build_lookup_from_csv(
            args['carbon_pool_initial_uri'], 'lulc-class')

    code_dict = dict((lulc_to_code_dict[k.lower()], s) for (k, s)
                     in initial_dict.iteritems() if k)
    for args_key, col_name in [('lulc_to_Sb', 'biomass'),
                               ('lulc_to_Ss', 'soil'),
                               ('lulc_to_L', 'litter')]:
            d[args_key] = dict(
                (code, row[col_name]) for code, row in code_dict.iteritems())

    # Transition Dictionaries
    biomass_transient_dict, soil_transient_dict = \
        _create_transient_dict(args['carbon_pool_transient_uri'])

    d['lulc_to_Yb'] = dict((key, sub['yearly-accumulation'])
                           for key, sub in biomass_transient_dict.items())
    d['lulc_to_Ys'] = dict((key, sub['yearly-accumulation'])
                           for key, sub in soil_transient_dict.items())
    d['lulc_to_Hb'] = dict((key, sub['half-life'])
                           for key, sub in biomass_transient_dict.items())
    d['lulc_to_Hs'] = dict((key, sub['half-life'])
                           for key, sub in soil_transient_dict.items())

    # Parse LULC Transition CSV (Carbon Direction and Relative Magnitude)
    d['lulc_trans_to_Db'], d['lulc_trans_to_Ds'] = _get_lulc_trans_to_D_dicts(
        args['lulc_transition_matrix_uri'],
        args['lulc_lookup_uri'],
        biomass_transient_dict,
        soil_transient_dict)

    # Economic Analysis
    d['do_economic_analysis'] = False
    if args['do_economic_analysis']:
        d['do_economic_analysis'] = True
        # convert percentage to decimal
        discount_rate = float(args['discount_rate']) * 0.01
        if args['do_price_table']:
            d['price_t'] = _get_price_table(
                args['price_table_uri'],
                d['snapshot_years'][0],
                d['snapshot_years'][-1])
        else:
            inflation_rate = float(args['inflation_rate']) * 0.01
            price = float(args['price'])
            d['price_t'] = (1 + inflation_rate) ** numpy.arange(
                0, float(d['timesteps'])+1) * price

        d['price_t'] /= (1 + discount_rate) ** numpy.arange(0, d['timesteps']+1)

    # Create Output Rasters
    d['File_Registry'] = _build_file_registry(
        d['C_prior_raster'],
        d['C_r_rasters'],
        d['snapshot_years'],
        args['results_suffix'],
        d['do_economic_analysis'],
        outputs_dir, intermediate_dir)

    return d
示例#24
0
def raster_list_sum(raster_list,
                    target_path,
                    target_datatype,
                    nodata_remove=False):
    """Calculate the sum per pixel across rasters in a list.

    Sum the rasters in `raster_list` element-wise, allowing nodata values
    in the rasters to propagate to the result or treating nodata as zero. If
    nodata is treated as zero, areas where all inputs are nodata will be nodata
    in the output.
    This function also checks for agreement among the inputs in terms of
    spatial reference, pixel size, and nodata values; it aligns all of the
    inputs; and it clips the sum of the inputs to the regional boundary.

    Parameters:
        raster_list (list): list of paths to rasters to sum
        target_path (string): path to location to store the result
        target_datatype (gdal data type enum): GDAL data type for result raster
        nodata_remove (bool): if true, treat nodata values in input
            rasters as zero. If false, the sum in a pixel where any input
            raster is nodata is nodata.

    Side effects:
        modifies or creates the raster indicated by `target_path`

    Returns:
        None

    """
    def raster_sum_op(*raster_list):
        """Add the rasters in raster_list without removing nodata values."""
        invalid_mask = numpy.any(numpy.isclose(numpy.array(raster_list),
                                               _TARGET_NODATA),
                                 axis=0)
        for r in raster_list:
            numpy.place(r, numpy.isclose(r, _TARGET_NODATA), [0])
        sum_of_rasters = numpy.sum(raster_list, axis=0)
        sum_of_rasters[invalid_mask] = _TARGET_NODATA
        return sum_of_rasters

    def raster_sum_op_nodata_remove(*raster_list):
        """Add the rasters in raster_list, treating nodata as zero."""
        invalid_mask = numpy.all(numpy.isclose(numpy.array(raster_list),
                                               _TARGET_NODATA),
                                 axis=0)
        for r in raster_list:
            numpy.place(r, numpy.isclose(r, _TARGET_NODATA), [0])
        sum_of_rasters = numpy.sum(raster_list, axis=0)
        sum_of_rasters[invalid_mask] = _TARGET_NODATA
        return sum_of_rasters

    # check that all inputs have same spatial reference, pixel size, & nodata
    raster_info = pygeoprocessing.get_raster_info(raster_list[0])
    destination_proj = osr.SpatialReference()
    destination_proj.ImportFromWkt(raster_info['projection_wkt'])
    srs_problems = []
    pixel_size_problems = []
    for other_path in raster_list:
        other_info = pygeoprocessing.get_raster_info(other_path)
        new_proj = osr.SpatialReference()
        new_proj.ImportFromWkt(other_info['projection_wkt'])
        if (new_proj.IsSame(destination_proj) == 0):
            srs_problems.append(other_path)
        if other_info['pixel_size'] != raster_info['pixel_size']:
            pixel_size_problems.append(other_path)
    if srs_problems:
        srs_problems.append(raster_list[0])
        raise ValueError("Project these to match: {}".format(srs_problems))
    if pixel_size_problems:
        pixel_size_problems.append(raster_list[0])
        # raise ValueError(
        #     "These do not have same pixel size: {}".format(
        #         pixel_size_problems))  # TODO put back in ?
    for other_path in raster_list:
        other_nodata = pygeoprocessing.get_raster_info(other_path)['nodata'][0]
        if other_nodata != _TARGET_NODATA:
            print("reclassifying nodata")
            reclassify_nodata(other_path, _TARGET_NODATA)

    # align all the rasters to be summed together
    print("aligning {} rasters".format(len(raster_list)))
    with tempfile.TemporaryDirectory() as align_dir:
        aligned_path_list = [
            os.path.join(align_dir, os.path.basename(p)) for p in raster_list
        ]
        pygeoprocessing.align_and_resize_raster_stack(
            raster_list, aligned_path_list, ['near'] * len(raster_list),
            raster_info['pixel_size'], 'union')

        print("calculating raster sum")
        unclip_path = os.path.join(align_dir, 'sum_unclip.tif')
        if nodata_remove:
            pygeoprocessing.raster_calculator([(path, 1)
                                               for path in aligned_path_list],
                                              raster_sum_op_nodata_remove,
                                              unclip_path, target_datatype,
                                              _TARGET_NODATA)
        else:
            pygeoprocessing.raster_calculator([(path, 1)
                                               for path in aligned_path_list],
                                              raster_sum_op, unclip_path,
                                              target_datatype, _TARGET_NODATA)

        # clip the sum to the regional boundary
        print("clipping to regional boundary")
        pygeoprocessing.mask_raster((unclip_path, 1), _BOUNDARY_PATH,
                                    target_path)
def _preprocess_rasters(base_raster_path_list,
                        churn_dir,
                        target_processed_raster_list_file_path,
                        target_sr_wkt=None,
                        target_pixel_size=None,
                        resample_method='near'):
    """Process base raster path list so it can be used in raster calcs.

    Parameters:
        base_raster_path_list (list): list of arbitrary rasters.
        churn_dir (str): path to a directory that can be used to write
            temporary files that could be used later for
            caching/reproducibility.
        target_processed_raster_list_file_path (str): path to a pickle file
            for processed output list that contains the list of raster paths
            that can be used in raster calcs, note this may be the original
            list of rasters or they may have been created by this call.
        target_sr_wkt (string): if not None, this is the desired
            projection of the target rasters in Well Known Text format. If
            None and all symbol rasters have the same projection, that
            projection will be used. Otherwise a ValueError is raised
            indicating that the rasters are in different projections with
            no guidance to resolve.
        target_pixel_size (tuple): It not None, desired output target pixel
            size. A ValueError is raised if symbol rasters are different
            pixel sizes and this value is None.
        resample_method (str): if the symbol rasters need to be resized for
            any reason, this method is used. The value can be one of:
            "near|bilinear|cubic|cubicspline|lanczos|average|mode|max".

    Returns:


    """
    resample_inputs = False

    base_info_list = [
        pygeoprocessing.get_raster_info(path) for path in base_raster_path_list
    ]
    base_projection_list = [info['projection'] for info in base_info_list]
    base_pixel_list = [info['pixel_size'] for info in base_info_list]
    base_raster_shape_list = [info['raster_size'] for info in base_info_list]

    if len(set(base_projection_list)) != 1:
        if target_sr_wkt is None:
            raise ValueError(
                "Projections of base rasters are not equal and there "
                "is no `target_sr_wkt` defined.\nprojection list: %s",
                str(base_projection_list))
        else:
            LOGGER.info('projections are different')
            resample_inputs = True

    if len(set(base_pixel_list)) != 1:
        if target_pixel_size is None:
            raise ValueError(
                "base and reference pixel sizes are different and no target "
                "is defined.\nbase pixel sizes: %s", str(base_pixel_list))
        LOGGER.info('pixel sizes are different')
        resample_inputs = True
    else:
        # else use the pixel size they all have
        target_pixel_size = base_pixel_list[0]

    if len(set(base_raster_shape_list)) != 1:
        LOGGER.info('raster shapes different')
        resample_inputs = True

    if resample_inputs:
        LOGGER.info("need to align/reproject inputs to apply calculation")
        try:
            os.makedirs(churn_dir)
        except OSError:
            LOGGER.debug('churn dir %s already exists', churn_dir)

        operand_raster_path_list = [
            os.path.join(churn_dir, os.path.basename(path))
            for path in base_raster_path_list
        ]
        pygeoprocessing.align_and_resize_raster_stack(
            base_raster_path_list,
            operand_raster_path_list,
            [resample_method] * len(base_raster_path_list),
            target_pixel_size,
            'intersection',
            target_sr_wkt=target_sr_wkt)
        result = operand_raster_path_list
    else:
        result = base_raster_path_list
    with open(target_processed_raster_list_file_path, 'wb') as result_file:
        pickle.dump(result, result_file)
示例#26
0
    working_dir = tempfile.mkdtemp(dir='.', prefix='avg_raster_workspace')

    file_list = [
        path for pattern in args.raster_pattern for path in glob.glob(pattern)
    ]

    aligned_list = [
        os.path.join(working_dir, os.path.basename(path)) for path in file_list
    ]

    target_pixel_size = pygeoprocessing.get_raster_info(
        file_list[0])['pixel_size']

    pygeoprocessing.align_and_resize_raster_stack(file_list, aligned_list,
                                                  ['near'] * len(aligned_list),
                                                  target_pixel_size, 'union')

    nodata_list = [(pygeoprocessing.get_raster_info(path)['nodata'][0], 'raw')
                   for path in aligned_list]

    # count valid pixels
    pygeoprocessing.raster_calculator(
        [(path, 1) for path in aligned_list] + nodata_list, count_op,
        args.prefix + TARGET_VALID_COUNT_RASTER_PATH, gdal.GDT_Int32,
        COUNT_NODATA)

    # average valid pixels
    pygeoprocessing.raster_calculator([(path, 1)
                                       for path in aligned_list] + nodata_list,
                                      average_op,
示例#27
0
def _execute(args):
    """Execute the seasonal water yield model.

    Parameters:
        See the parameters for
        `natcap.invest.seasonal_water_yield.seasonal_wateryield.execute`.

    Returns:
        None
    """
    LOGGER.info('prepare and test inputs for common errors')

    # fail early on a missing required rain events table
    if (not args['user_defined_local_recharge'] and
            not args['user_defined_climate_zones']):
        rain_events_lookup = (
            utils.build_lookup_from_csv(
                args['rain_events_table_path'], 'month'))

    biophysical_table = utils.build_lookup_from_csv(
        args['biophysical_table_path'], 'lucode')

    if args['monthly_alpha']:
        # parse out the alpha lookup table of the form (month_id: alpha_val)
        alpha_month = dict(
            (key, val['alpha']) for key, val in
            utils.build_lookup_from_csv(
                args['monthly_alpha_path'], 'month').iteritems())
    else:
        # make all 12 entries equal to args['alpha_m']
        alpha_m = float(fractions.Fraction(args['alpha_m']))
        alpha_month = dict(
            (month_index+1, alpha_m) for month_index in xrange(12))

    beta_i = float(fractions.Fraction(args['beta_i']))
    gamma = float(fractions.Fraction(args['gamma']))
    threshold_flow_accumulation = float(args['threshold_flow_accumulation'])
    pixel_size = pygeoprocessing.get_raster_info(
        args['dem_raster_path'])['pixel_size']
    file_suffix = utils.make_suffix_string(args, 'results_suffix')
    intermediate_output_dir = os.path.join(
        args['workspace_dir'], 'intermediate_outputs')
    output_dir = args['workspace_dir']
    utils.make_directories([intermediate_output_dir, output_dir])

    LOGGER.info('Building file registry')
    file_registry = utils.build_file_registry(
        [(_OUTPUT_BASE_FILES, output_dir),
         (_INTERMEDIATE_BASE_FILES, intermediate_output_dir),
         (_TMP_BASE_FILES, output_dir)], file_suffix)

    LOGGER.info('Checking that the AOI is not the output aggregate vector')
    if (os.path.normpath(args['aoi_path']) ==
            os.path.normpath(file_registry['aggregate_vector_path'])):
        raise ValueError(
            "The input AOI is the same as the output aggregate vector, "
            "please choose a different workspace or move the AOI file "
            "out of the current workspace %s" %
            file_registry['aggregate_vector_path'])

    LOGGER.info('Aligning and clipping dataset list')
    input_align_list = [args['lulc_raster_path'], args['dem_raster_path']]
    output_align_list = [
        file_registry['lulc_aligned_path'], file_registry['dem_aligned_path']]
    if not args['user_defined_local_recharge']:
        precip_path_list = []
        et0_path_list = []

        et0_dir_list = [
            os.path.join(args['et0_dir'], f) for f in os.listdir(
                args['et0_dir'])]
        precip_dir_list = [
            os.path.join(args['precip_dir'], f) for f in os.listdir(
                args['precip_dir'])]

        for month_index in range(1, N_MONTHS + 1):
            month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_index)

            for data_type, dir_list, path_list in [
                    ('et0', et0_dir_list, et0_path_list),
                    ('Precip', precip_dir_list, precip_path_list)]:
                file_list = [
                    month_file_path for month_file_path in dir_list
                    if month_file_match.match(month_file_path)]
                if len(file_list) == 0:
                    raise ValueError(
                        "No %s found for month %d" % (data_type, month_index))
                if len(file_list) > 1:
                    raise ValueError(
                        "Ambiguous set of files found for month %d: %s" %
                        (month_index, file_list))
                path_list.append(file_list[0])

        input_align_list = (
            precip_path_list + [args['soil_group_path']] + et0_path_list +
            input_align_list)
        output_align_list = (
            file_registry['precip_path_aligned_list'] +
            [file_registry['soil_group_aligned_path']] +
            file_registry['et0_path_aligned_list'] + output_align_list)

    align_index = len(input_align_list) - 1  # this aligns with the DEM
    if args['user_defined_local_recharge']:
        input_align_list.append(args['l_path'])
        output_align_list.append(file_registry['l_aligned_path'])
    elif args['user_defined_climate_zones']:
        input_align_list.append(args['climate_zone_raster_path'])
        output_align_list.append(
            file_registry['cz_aligned_raster_path'])
    interpolate_list = ['nearest'] * len(input_align_list)

    pygeoprocessing.align_and_resize_raster_stack(
        input_align_list, output_align_list, interpolate_list,
        pixel_size, 'intersection', base_vector_path_list=[args['aoi_path']],
        raster_align_index=align_index)

    LOGGER.info('flow direction')
    natcap.invest.pygeoprocessing_0_3_3.routing.flow_direction_d_inf(
        file_registry['dem_aligned_path'],
        file_registry['flow_dir_path'])

    LOGGER.info('flow weights')
    natcap.invest.pygeoprocessing_0_3_3.routing.routing_core.calculate_flow_weights(
        file_registry['flow_dir_path'],
        file_registry['outflow_weights_path'],
        file_registry['outflow_direction_path'])

    LOGGER.info('flow accumulation')
    natcap.invest.pygeoprocessing_0_3_3.routing.flow_accumulation(
        file_registry['flow_dir_path'],
        file_registry['dem_aligned_path'],
        file_registry['flow_accum_path'])

    LOGGER.info('stream thresholding')
    natcap.invest.pygeoprocessing_0_3_3.routing.stream_threshold(
        file_registry['flow_accum_path'],
        threshold_flow_accumulation,
        file_registry['stream_path'])

    LOGGER.info('quick flow')
    if args['user_defined_local_recharge']:
        file_registry['l_path'] = file_registry['l_aligned_path']
        li_nodata = pygeoprocessing.get_raster_info(
            file_registry['l_path'])['nodata'][0]

        def l_avail_op(l_array):
            """Calculate equation [8] L_avail = min(gamma*L, L)"""
            result = numpy.empty(l_array.shape)
            result[:] = li_nodata
            valid_mask = (l_array != li_nodata)
            result[valid_mask] = numpy.min(numpy.stack(
                (gamma*l_array[valid_mask], l_array[valid_mask])), axis=0)
            return result
        pygeoprocessing.raster_calculator(
            [(file_registry['l_path'], 1)], l_avail_op,
            file_registry['l_avail_path'], gdal.GDT_Float32, li_nodata)
    else:
        # user didn't predefine local recharge so calculate it
        LOGGER.info('loading number of monthly events')
        for month_id in xrange(N_MONTHS):
            if args['user_defined_climate_zones']:
                cz_rain_events_lookup = (
                    utils.build_lookup_from_csv(
                        args['climate_zone_table_path'], 'cz_id'))
                month_label = MONTH_ID_TO_LABEL[month_id]
                climate_zone_rain_events_month = dict([
                    (cz_id, cz_rain_events_lookup[cz_id][month_label]) for
                    cz_id in cz_rain_events_lookup])
                n_events_nodata = -1
                pygeoprocessing.reclassify_raster(
                    (file_registry['cz_aligned_raster_path'], 1),
                    climate_zone_rain_events_month,
                    file_registry['n_events_path_list'][month_id],
                    gdal.GDT_Float32, n_events_nodata, values_required=True)
            else:
                # rain_events_lookup defined near entry point of execute
                n_events = rain_events_lookup[month_id+1]['events']
                pygeoprocessing.new_raster_from_base(
                    file_registry['dem_aligned_path'],
                    file_registry['n_events_path_list'][month_id],
                    gdal.GDT_Float32, [TARGET_NODATA],
                    fill_value_list=[n_events])

        LOGGER.info('calculate curve number')
        _calculate_curve_number_raster(
            file_registry['lulc_aligned_path'],
            file_registry['soil_group_aligned_path'],
            biophysical_table, file_registry['cn_path'])

        LOGGER.info('calculate Si raster')
        _calculate_si_raster(
            file_registry['cn_path'], file_registry['stream_path'],
            file_registry['si_path'])

        for month_index in xrange(N_MONTHS):
            LOGGER.info('calculate quick flow for month %d', month_index+1)
            _calculate_monthly_quick_flow(
                file_registry['precip_path_aligned_list'][month_index],
                file_registry['lulc_aligned_path'], file_registry['cn_path'],
                file_registry['n_events_path_list'][month_index],
                file_registry['stream_path'],
                file_registry['qfm_path_list'][month_index],
                file_registry['si_path'])

        qf_nodata = -1
        LOGGER.info('calculate QFi')

        # TODO: lose this loop
        def qfi_sum_op(*qf_values):
            """Sum the monthly qfis."""
            qf_sum = numpy.zeros(qf_values[0].shape)
            valid_mask = qf_values[0] != qf_nodata
            valid_qf_sum = qf_sum[valid_mask]
            for index in range(len(qf_values)):
                valid_qf_sum += qf_values[index][valid_mask]
            qf_sum[:] = qf_nodata
            qf_sum[valid_mask] = valid_qf_sum
            return qf_sum

        pygeoprocessing.raster_calculator(
            [(path, 1) for path in file_registry['qfm_path_list']],
            qfi_sum_op, file_registry['qf_path'], gdal.GDT_Float32, qf_nodata)

        LOGGER.info('calculate local recharge')
        kc_lookup = {}
        LOGGER.info('classify kc')
        for month_index in xrange(12):
            kc_lookup = dict([
                (lucode, biophysical_table[lucode]['kc_%d' % (month_index+1)])
                for lucode in biophysical_table])
            kc_nodata = -1  # a reasonable nodata value
            pygeoprocessing.reclassify_raster(
                (file_registry['lulc_aligned_path'], 1), kc_lookup,
                file_registry['kc_path_list'][month_index], gdal.GDT_Float32,
                kc_nodata)

        # call through to a cython function that does the necessary routing
        # between AET and L.sum.avail in equation [7], [4], and [3]
        seasonal_water_yield_core.calculate_local_recharge(
            file_registry['precip_path_aligned_list'],
            file_registry['et0_path_aligned_list'],
            file_registry['qfm_path_list'],
            file_registry['flow_dir_path'],
            file_registry['outflow_weights_path'],
            file_registry['outflow_direction_path'],
            file_registry['dem_aligned_path'],
            file_registry['lulc_aligned_path'], alpha_month,
            beta_i, gamma, file_registry['stream_path'],
            file_registry['l_path'],
            file_registry['l_avail_path'],
            file_registry['l_sum_avail_path'],
            file_registry['aet_path'], file_registry['kc_path_list'])

    #calculate Qb as the sum of local_recharge_avail over the AOI, Eq [9]
    qb_sum, qb_valid_count = _sum_valid(file_registry['l_path'])
    qb_result = 0.0
    if qb_valid_count > 0:
        qb_result = qb_sum / qb_valid_count

    li_nodata = pygeoprocessing.get_raster_info(
        file_registry['l_path'])['nodata'][0]

    def vri_op(li_array):
        """Calculate vri index [Eq 10]."""
        result = numpy.empty_like(li_array)
        result[:] = li_nodata
        if qb_sum > 0:
            valid_mask = li_array != li_nodata
            result[valid_mask] = li_array[valid_mask] / qb_sum
        return result
    pygeoprocessing.raster_calculator(
        [(file_registry['l_path'], 1)], vri_op, file_registry['vri_path'],
        gdal.GDT_Float32, li_nodata)

    _aggregate_recharge(
        args['aoi_path'], file_registry['l_path'],
        file_registry['vri_path'],
        file_registry['aggregate_vector_path'])

    LOGGER.info('calculate L_sum')  # Eq. [12]
    pygeoprocessing.new_raster_from_base(
        file_registry['dem_aligned_path'],
        file_registry['zero_absorption_source_path'],
        gdal.GDT_Float32, [TARGET_NODATA], fill_value_list=[0.0])
    natcap.invest.pygeoprocessing_0_3_3.routing.route_flux(
        file_registry['flow_dir_path'],
        file_registry['dem_aligned_path'],
        file_registry['l_path'],
        file_registry['zero_absorption_source_path'],
        file_registry['loss_path'],
        file_registry['l_sum_pre_clamp'], 'flux_only',
        stream_uri=file_registry['stream_path'])

    # The result of route_flux can be slightly negative due to roundoff error
    # (on the order of 1e-4.  It is acceptable to clamp those values to 0.0
    l_sum_pre_clamp_nodata = pygeoprocessing.get_raster_info(
        file_registry['l_sum_pre_clamp'])['nodata'][0]

    def clamp_l_sum(l_sum_pre_clamp):
        """Clamp any negative values to 0.0."""
        result = l_sum_pre_clamp.copy()
        result[
            (l_sum_pre_clamp != l_sum_pre_clamp_nodata) &
            (l_sum_pre_clamp < 0.0)] = 0.0
        return result

    pygeoprocessing.raster_calculator(
        [(file_registry['l_sum_pre_clamp'], 1)], clamp_l_sum,
        file_registry['l_sum_path'], gdal.GDT_Float32, l_sum_pre_clamp_nodata)

    LOGGER.info('calculate B_sum')
    seasonal_water_yield_core.route_baseflow_sum(
        file_registry['dem_aligned_path'],
        file_registry['l_path'],
        file_registry['l_avail_path'],
        file_registry['l_sum_path'],
        file_registry['outflow_direction_path'],
        file_registry['outflow_weights_path'],
        file_registry['stream_path'],
        file_registry['b_sum_path'])

    LOGGER.info('calculate B')

    b_sum_nodata = li_nodata

    def op_b(b_sum, l_avail, l_sum):
        """Calculate B=max(B_sum*Lavail/L_sum, 0)."""
        valid_mask = (
            (b_sum != b_sum_nodata) & (l_avail != li_nodata) & (l_sum > 0) &
            (l_sum != l_sum_pre_clamp_nodata))
        result = numpy.empty(b_sum.shape)
        result[:] = b_sum_nodata
        result[valid_mask] = (
            b_sum[valid_mask] * l_avail[valid_mask] / l_sum[valid_mask])
        # if l_sum is zero, it's okay to make B zero says Perrine in an email
        result[l_sum == 0] = 0.0
        result[(result < 0) & valid_mask] = 0
        return result

    pygeoprocessing.raster_calculator(
        [(file_registry['b_sum_path'], 1),
         (file_registry['l_path'], 1),
         (file_registry['l_sum_path'], 1)], op_b, file_registry['b_path'],
        gdal.GDT_Float32, b_sum_nodata)

    LOGGER.info('deleting temporary files')
    for file_id in _TMP_BASE_FILES:
        try:
            if isinstance(file_registry[file_id], basestring):
                os.remove(file_registry[file_id])
            elif isinstance(file_registry[file_id], list):
                for index in xrange(len(file_registry[file_id])):
                    os.remove(file_registry[file_id][index])
        except OSError:
            # Let it go.
            pass

    LOGGER.info('  (\\w/)  SWY Complete!')
    LOGGER.info('  (..  \\ ')
    LOGGER.info(' _/  )  \\______')
    LOGGER.info('(oo /\'\\        )`,')
    LOGGER.info(' `--\' (v  __( / ||')
    LOGGER.info('       |||  ||| ||')
    LOGGER.info('      //_| //_|')
示例#28
0
def generate_n_load():
    """
    The plan:

        * resample fertilizer map to match resolution of load_n
        * where load_n is -999, substitute the value from the fertilizer map
    """
    def ag_load_op(base_load_n_array, ag_load_array):
        """raster calculator replace _USE_AG_LOAD_ID with ag loads."""
        result = numpy.copy(base_load_n_array)
        if load_nodata is not None:
            nodata_load_mask = numpy.isclose(ag_load_array, load_nodata)
        else:
            nodata_load_mask = numpy.zeros(ag_load_array.shape,
                                           dtype=numpy.bool)
        ag_mask = (base_load_n_array == _USE_AG_LOAD_ID)
        result[ag_mask
               & ~nodata_load_mask] = (ag_load_array[ag_mask
                                                     & ~nodata_load_mask])
        result[ag_mask & nodata_load_mask] = 0.0
        return result

    if not os.path.exists(PROCESSING_DIR):
        os.makedirs(PROCESSING_DIR)

    rescaled_fertilizer_path = os.path.join(DATA_DIR, 'fertilizer_map',
                                            'align_to_load_n.tif')
    if not os.path.exists(rescaled_fertilizer_path):
        # align fertilizer map with load_n on natural veg
        with tempfile.NamedTemporaryFile(
                prefix='aligned_natveg',
                dir=PROCESSING_DIR) as natveg_load_temp_file:
            natveg_aligned_path = natveg_load_temp_file.name
            source_path_list = [
                base_data_dict['natveg_load'], base_data_dict['fertilizer_map']
            ]
            aligned_path_list = [natveg_aligned_path, rescaled_fertilizer_path]
            target_pixel_size = pygeoprocessing.get_raster_info(
                base_data_dict['natveg_load'])['pixel_size']
            bounding_box = pygeoprocessing.get_raster_info(
                base_data_dict['natveg_load'])['bounding_box']
            pygeoprocessing.align_and_resize_raster_stack(
                source_path_list,
                aligned_path_list, ['near'] * len(source_path_list),
                target_pixel_size,
                bounding_box,
                raster_align_index=0)
    ag_load_raster_path = rescaled_fertilizer_path
    load_n_per_ha_raster_path = base_data_dict['natveg_load']

    load_nodata = pygeoprocessing.get_raster_info(
        ag_load_raster_path)['nodata'][0]

    nodata = pygeoprocessing.get_raster_info(
        load_n_per_ha_raster_path)['nodata'][0]

    target_ag_load_path = os.path.join(DATA_DIR, 'n_load_natveg_ag.tif')
    pygeoprocessing.raster_calculator([(load_n_per_ha_raster_path, 1),
                                       (ag_load_raster_path, 1)], ag_load_op,
                                      target_ag_load_path, gdal.GDT_Float32,
                                      nodata)