def force_global_angular_data_to_equal_area_earth_grid(input_path, output_path):

    output_datatype = hb.get_datatype_from_uri(input_path)
    output_ndv = hb.get_ndv_from_path(input_path)
    match_wkt = hb.get_dataset_projection_wkt_uri(input_path)
    match_wkt = hb.get_wkt_from_epsg_code(6933)

    input_geotransform  = hb.get_geotransform_uri(input_path)

    output_geotransform = list(hb.common_geotransforms['wec_30s'])

    output_geotransform[1] = input_geotransform[1] * hb.size_of_one_arcdegree_at_equator_in_meters
    output_geotransform[5] = input_geotransform[5] * hb.size_of_one_arcdegree_at_equator_in_meters


    # Load the array, but use numpy to convert it to the new datatype
    input_array = hb.as_array(input_path).astype(hb.gdal_number_to_numpy_type[output_datatype])

    if not output_ndv:
        output_ndv = -9999

    hb.save_array_as_geotiff(input_array, output_path,
                             data_type=output_datatype,
                             ndv=output_ndv,
                             geotransform_override=output_geotransform,
                             projection_override=match_wkt)
def force_geotiff_to_match_projection_ndv_and_datatype(input_path, match_path, output_path, output_datatype=None, output_ndv=None):
    """Rather than actually projecting, just change the metadata so it matches exactly. This only will be useful
    if there was a data error and something got a projection defined when the underlying data wasnt actually transofmred
    into that shape.

    NOTE that the output will keep the same geotransform as input, and only the projection, no data and datatype will change.
    """

    if not output_datatype:

        output_datatype = hb.get_datatype_from_uri(match_path)

    if not output_ndv:
        output_ndv = hb.get_ndv_from_path(match_path)
    match_wkt = hb.get_dataset_projection_wkt_uri(match_path)
    input_geotransform  = hb.get_geotransform_uri(input_path)

    # Load the array, but use numpy to convert it to the new datatype
    input_array = hb.as_array(input_path).astype(hb.gdal_number_to_numpy_type[output_datatype])

    if not output_ndv:
        output_ndv = -9999

    hb.save_array_as_geotiff(input_array, output_path,
                             data_type=output_datatype,
                             ndv=output_ndv,
                             geotransform_override=input_geotransform,
                             projection_override=match_wkt)
def stitch_projections():
    global p
    if p.run_this:

        scenario_name = os.path.split(p.workspace_dir)[1]
        p.projected_lulc_stitched_path = hb.ruri(
            os.path.join(p.cur_dir, 'projected_lulc.tif'))
        p.projected_lulc_stitched_merged_path = hb.ruri(
            os.path.join(p.cur_dir, 'projected_lulc_merged.tif'))
        p.original_lulc_stitched_path = hb.ruri(
            os.path.join(p.cur_dir, 'original_lulc.tif'))

        do_global_stitch = True
        if p.output_base_map_path and len(
                p.layers_to_stitch) > 0 and do_global_stitch:
            L.info(
                'Stamping generated lulcs with extent_shift_match_path of output_base_map_path '
                + str(p.output_base_map_path))
            ndv = hb.get_datatype_from_uri(p.output_base_map_path)
            hb.create_gdal_virtual_raster_using_file(
                p.layers_to_stitch,
                p.projected_lulc_stitched_path,
                p.output_base_map_path,
                dstnodata=255)

            base_raster_path_band_list = [(p.projected_lulc_stitched_path, 1),
                                          (p.output_base_map_path, 1)]

            def fill_where_missing(a, b):
                # NOTE == not work here because a.any() or a.all() error. Crappy workaround is inequalities.
                return np.where((a >= 255) & (b <= 255), b, a)

            # Because SEALS doesn't run for small islands, we fill in any missing values based on the base data input lulc.

            datatype_target = 1
            nodata_target = 255
            opts = ['TILED=YES', 'BIGTIFF=IF_SAFER', 'COMPRESS=lzw']
            hb.raster_calculator(base_raster_path_band_list,
                                 fill_where_missing,
                                 p.projected_lulc_stitched_merged_path,
                                 datatype_target,
                                 nodata_target,
                                 gtiff_creation_options=opts)

            try:
                import geoecon as ge
                ge.add_geotiff_overview_file(
                    p.projected_lulc_stitched_merged_path)
            except:
                pass
        else:
            L.info('Stitching together all of the generated LULCs.')
            if len(p.layers_to_stitch) > 0:
                hb.create_gdal_virtual_raster_using_file(
                    p.layers_to_stitch,
                    p.projected_lulc_stitched_path,
                    dstnodata=255)
Exemple #4
0
def raster_calculator_flex(
    input_, op, output_path, **kwargs
):  #, datatype=None, ndv=None, gtiff_creation_options=None, compress=False

    # If input is a string, put it into a list
    if isinstance(input_, str):
        input_ = [input_]
    elif isinstance(input_, hb.ArrayFrame):
        input_ = input_.path

    final_input = [''] * len(input_)
    for c, i in enumerate(input_):
        if isinstance(i, hb.ArrayFrame):
            final_input[c] = i.path
        else:
            final_input[c] = i
    input_ = final_input

    # Determine size of inputs
    if isinstance(input_, str) or isinstance(input_, hb.ArrayFrame):
        input_size = 1
    elif isinstance(input_, list):
        input_size = len(input_)
    else:
        raise NameError(
            'input_ given to raster_calculator_flex() not understood. Give a path or list of paths.'
        )

    # Check that files exist.
    for i in input_:
        if not os.path.exists(i):
            raise FileNotFoundError(
                str(input_) + ' not found by raster_calculator_flex()')

    # Verify datatypes
    datatype = kwargs.get('datatype', None)
    if not datatype:
        datatypes = [hb.get_datatype_from_uri(i) for i in input_]
        if len(set(datatypes)) > 1:
            L.info(
                'Rasters given to raster_calculator_flex() were not all of the same type. Defaulting to using first input datatype.'
            )
        datatype = datatypes[0]

    # Check NDVs.
    ndv = kwargs.get('ndv', None)
    if not ndv:
        ndvs = [hb.get_nodata_from_uri(i) for i in input_]
        if len(set(ndvs)) > 1:
            L.info(
                'NDVs used in rasters given to raster_calculator_flex() were not all the same. Defaulting to using first value.'
            )
        ndv = ndvs[0]

    gtiff_creation_options = kwargs.get('gtiff_creation_options', None)
    if not gtiff_creation_options:
        gtiff_creation_options = ['TILED=YES',
                                  'BIGTIFF=IF_SAFER']  #, 'COMPRESS=lzw']

    compress = kwargs.get('compress', None)
    if compress:
        gtiff_creation_options.append('COMPRESS=lzw')

    # Build tuples to match the required format of raster_calculator.
    if input_size == 1:
        if isinstance(input_[0], str):
            input_tuples_list = [(input_[0], 1)]
        else:
            input_tuples_list = [(input_[0].path, 1)]
    else:
        if isinstance(input_[0], str):
            input_tuples_list = [(i, 1) for i in input_]
        else:
            input_tuples_list = [(i.path, 1) for i in input_]

    # Check that the op matches the number of rasters.
    if len(inspect.signature(op).parameters) != input_size:
        raise NameError(
            'op given to raster_calculator_flex() did not have the same number of parameters as the number of rasters given.'
        )

    hb.raster_calculator(input_tuples_list,
                         op,
                         output_path,
                         datatype,
                         ndv,
                         gtiff_creation_options=gtiff_creation_options)

    output_af = hb.ArrayFrame(output_path)
    return output_af
def raster_calculator_af_flex(
    input_, op, output_path, **kwargs
):  #KWARGS: datatype=None, ndv=None, gtiff_creation_options=None, compress=False, add_overviews=False
    """KWARGS:
    datatype=None,
    ndv=None,
    gtiff_creation_options=None,
    compress=False,
    add_overviews=False

    In HB, a flex input is one of [string that points to a file, an array frame, or a suitabily formatted list of the above"""
    print('input_', input_)
    # If input is a string, put it into a list
    if isinstance(input_, str):
        input_ = [input_]
    elif isinstance(input_, hb.ArrayFrame):
        input_ = input_.path

    final_input = [''] * len(input_)
    for c, i in enumerate(input_):
        print('c,i', c, i)
        if isinstance(i, hb.ArrayFrame):
            final_input[c] = i.path
        else:
            final_input[c] = i
    input_ = final_input

    # Determine size of inputs
    if isinstance(input_, str) or isinstance(input_, hb.ArrayFrame):
        input_size = 1
    elif isinstance(input_, list):
        input_size = len(input_)
    else:
        raise NameError(
            'input_ given to raster_calculator_af_flex() not understood. Give a path or list of paths.'
        )

    # # Check that files exist.
    # for i in input_:
    #     if not os.path.exists(i):
    #         raise FileNotFoundError(str(input_) + ' not found by raster_calculator_af_flex()')

    # Verify datatypes
    datatype = kwargs.get('datatype', None)
    if not datatype:
        print('input_', input_)
        datatypes = [
            hb.get_datatype_from_uri(i) for i in input_ if type(i) is not float
        ]
        print('datatypes', datatypes)
        if len(set(datatypes)) > 1:
            L.info(
                'Rasters given to raster_calculator_af_flex() were not all of the same type. Defaulting to using first input datatype.'
            )
        datatype = datatypes[0]

    # Check NDVs.
    ndv = kwargs.get('ndv', None)
    if not ndv:
        ndvs = [
            hb.get_ndv_from_path(i) for i in input_ if type(i) is not float
        ]
        if len(set(ndvs)) > 1:
            L.info(
                'NDVs used in rasters given to raster_calculator_af_flex() were not all the same. Defaulting to using first value.'
            )
        ndv = ndvs[0]

    gtiff_creation_options = kwargs.get('gtiff_creation_options', None)
    if not gtiff_creation_options:
        gtiff_creation_options = ['TILED=YES',
                                  'BIGTIFF=IF_SAFER']  #, 'COMPRESS=lzw']

    compress = kwargs.get('compress', None)
    if compress:
        gtiff_creation_options.append('COMPRESS=deflate')

    # Build tuples to match the required format of raster_calculator.
    if input_size == 1:
        if isinstance(input_[0], str):
            input_tuples_list = [(input_[0], 1)]
        else:
            input_tuples_list = [(input_[0].path, 1)]
    else:
        if isinstance(input_[0], str):
            input_tuples_list = [(i, 1) for i in input_]

        else:
            input_tuples_list = [(i.path, 1) for i in input_]

    for c, i in enumerate(input_tuples_list):
        if type(i[0]) is float:
            input_tuples_list[c] = (i[0], 'raw')

    # # Check that the op matches the number of rasters.
    # if len(inspect.signature(op).parameters) != input_size:
    #     raise NameError('op given to raster_calculator_af_flex() did not have the same number of parameters as the number of rasters given.')

    print('input_tuples_list', input_tuples_list)
    hb.raster_calculator_hb(input_tuples_list,
                            op,
                            output_path,
                            datatype,
                            ndv,
                            gtiff_creation_options=gtiff_creation_options)

    if kwargs.get('add_overviews'):
        hb.add_overviews_to_path(output_path)

    output_af = hb.ArrayFrame(output_path)
    return output_af
def resample_to_match(input_path,
                      match_path,
                      output_path,
                      resample_method='bilinear',
                      output_data_type=None,
                      src_ndv=None,
                      ndv=None,
                      compress=True,
                      ensure_fits=False,
                      gtiff_creation_options=hb.DEFAULT_GTIFF_CREATION_OPTIONS,
                      calc_raster_stats=False,
                      add_overviews=False,
                      pixel_size_override=None,
                      verbose=False,
                      ):

    if pixel_size_override is None:
        target_pixel_size = (hb.get_cell_size_from_uri(match_path), -hb.get_cell_size_from_uri(match_path))
    elif not isinstance(pixel_size_override, (tuple, list)):
        target_pixel_size = (pixel_size_override, -pixel_size_override)

    target_sr_wkt = hb.get_raster_info(match_path)['projection']

    target_bb = hb.get_raster_info_hb(match_path)['bounding_box']

    if output_data_type is None:
        output_data_type = hb.get_datatype_from_uri(match_path)

    if src_ndv is None:
        src_ndv = hb.get_ndv_from_path(input_path)

    if ndv is None:
        dst_ndv = hb.get_ndv_from_path(match_path)
    else:
        if output_data_type < 5:
            dst_ndv = 255
        else:
            dst_ndv = -9999.0

    if ensure_fits:
        # This addition to the core geoprocessing code was to fix the case where the alignment moved the target tif
        # up and to the left, but in a way that then trunkated 1 row/col on the bottom right, causing wrong-shape
        # raster_math errors.z
        pass
        # target_bounding_box = reduce(
        #     functools.partial(hb.merge_bounding_boxes, mode=bounding_box_mode),
        #     [info['bounding_box'] for info in
        #      (raster_info_list + vector_info_list)])
        #
        # if original_bounding_box[2] > target_bounding_box[2]:
        #     target_bounding_box[2] += target_pixel_size[0]
        #
        # if original_bounding_box[3] > target_bounding_box[3]:
        #     target_bounding_box[3] -= target_pixel_size[1]

        target_bb[2] += target_pixel_size[0]
        target_bb[3] += target_pixel_size[1]
    if compress is True:
        gtiff_creation_options = (
            'TILED=YES',
            'BIGTIFF=YES',
            'COMPRESS=DEFLATE',
            'BLOCKXSIZE=256',
            'BLOCKYSIZE=256',
        )
    else:
        gtiff_creation_options = (
            'TILED=YES',
            'BIGTIFF=YES',
            'BLOCKXSIZE=256',
            'BLOCKYSIZE=256',
        )

    hb.warp_raster_hb(input_path, target_pixel_size, output_path,
                      resample_method, target_bb=target_bb, base_sr_wkt=None, target_sr_wkt=target_sr_wkt,
                      gtiff_creation_options=gtiff_creation_options,
                      n_threads=None, vector_mask_options=None,
                      output_data_type=output_data_type,
                      src_ndv=src_ndv,
                      dst_ndv=dst_ndv,
                      calc_raster_stats=calc_raster_stats,
                      add_overviews=add_overviews,
    )