Пример #1
0
def main(image_ws, ini_path, bs=2048, stats_flag=False, overwrite_flag=False):
    """Prep a Landsat scene for METRIC

    Parameters
    ----------
    image_ws : str
        Landsat scene folder that will be prepped.
    ini_path : str
        File path of the input parameters file.
    bs : int, optional
        Processing block size (the default is 2048).
    stats_flag : bool, optional
        If True, compute raster statistics (the default is True).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    True is successful

    """

    # Open config file
    config = dripy.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    calc_refl_toa_flag = dripy.read_param(
        'calc_refl_toa_flag', True, config, 'INPUTS')
    calc_refl_toa_qa_flag = dripy.read_param(
        'calc_refl_toa_qa_flag', True, config, 'INPUTS')
    # calc_refl_sur_ledaps_flag = dripy.read_param(
    #     'calc_refl_sur_ledaps_flag', False, config, 'INPUTS')
    # calc_refl_sur_qa_flag = dripy.read_param(
    #     'calc_refl_sur_qa_flag', False, config, 'INPUTS')
    calc_ts_bt_flag = dripy.read_param(
        'calc_ts_bt_flag', True, config, 'INPUTS')

    # Use QA band to set common area
    # Fmask cloud, shadow, & snow pixels will be removed from common area
    calc_fmask_common_flag = dripy.read_param(
        'calc_fmask_common_flag', True, config, 'INPUTS')
    fmask_smooth_flag = dripy.read_param(
        'fmask_smooth_flag', False, config, 'INPUTS')
    fmask_buffer_flag = dripy.read_param(
        'fmask_buffer_flag', False, config, 'INPUTS')
    fmask_erode_flag = dripy.read_param(
        'fmask_erode_flag', False, config, 'INPUTS')
    if fmask_smooth_flag:
        fmask_smooth_cells = int(dripy.read_param(
            'fmask_smooth_cells', 1, config, 'INPUTS'))
        if fmask_smooth_cells == 0 and fmask_smooth_flag:
            fmask_smooth_flag = False
    if fmask_erode_flag:
        fmask_erode_cells = int(dripy.read_param(
            'fmask_erode_cells', 1, config, 'INPUTS'))
        if fmask_erode_cells == 0 and fmask_erode_flag:
            fmask_erode_flag = False
    if fmask_buffer_flag:
        fmask_buffer_cells = int(dripy.read_param(
            'fmask_buffer_cells', 1, config, 'INPUTS'))
        if fmask_buffer_cells == 0 and fmask_buffer_flag:
            fmask_buffer_flag = False

    # Remove edge (fringe) cells
    edge_smooth_flag = dripy.read_param(
        'edge_smooth_flag', True, config, 'INPUTS')

    # Include hand made cloud masks
    cloud_mask_flag = dripy.read_param(
        'cloud_mask_flag', False, config, 'INPUTS')
    cloud_mask_ws = ""
    if cloud_mask_flag:
        cloud_mask_ws = config.get('INPUTS', 'cloud_mask_ws')

    # Extract separate Fmask rasters
    calc_fmask_flag = dripy.read_param(
        'calc_fmask_flag', True, config, 'INPUTS')
    calc_fmask_cloud_flag = dripy.read_param(
        'calc_fmask_cloud_flag', True, config, 'INPUTS')
    calc_fmask_snow_flag = dripy.read_param(
        'calc_fmask_snow_flag', True, config, 'INPUTS')
    calc_fmask_water_flag = dripy.read_param(
        'calc_fmask_water_flag', True, config, 'INPUTS')

    # Keep Landsat DN, LEDAPS, and Fmask rasters
    keep_dn_flag = dripy.read_param(
        'keep_dn_flag', True, config, 'INPUTS')
    # keep_sr_flag = dripy.read_param(
    #     'keep_sr_flag', True, config, 'INPUTS')

    # For this to work I would need to pass in the metric input file
    # calc_elev_flag = dripy.read_param(
    #     'calc_elev_flag', False, config, 'INPUTS')
    # calc_landuse_flag = dripy.read_param(
    #     'calc_landuse_flag', False, config, 'INPUTS')

    # calc_acca_cloud_flag = dripy.read_param(
    #     'calc_acca_cloud_flag', True, config, 'INPUTS')
    # calc_acca_snow_flag = dripy.read_param(
    #     'calc_acca_snow_flag', True, config, 'INPUTS')
    # calc_ledaps_dem_land_flag = dripy.read_param(
    #     'calc_ledaps_dem_land_flag', False, config, 'INPUTS')
    # calc_ledaps_veg_flag = dripy.read_param(
    #     'calc_ledaps_veg_flag', False, config, 'INPUTS')
    # calc_ledaps_snow_flag = dripy.read_param(
    #     'calc_ledaps_snow_flag', False, config, 'INPUTS')
    # calc_ledaps_land_flag = dripy.read_param(
    #     'calc_ledaps_land_flag', False, config, 'INPUTS')
    # calc_ledaps_cloud_flag = dripy.read_param(
    #     'calc_ledaps_cloud_flag', False, config, 'INPUTS')

    # Interpolate/clip/project hourly rasters for each Landsat scene
    # calc_metric_flag = dripy.read_param(
    #     'calc_metric_flag', False, config, 'INPUTS')
    calc_metric_ea_flag = dripy.read_param(
        'calc_metric_ea_flag', False, config, 'INPUTS')
    calc_metric_wind_flag = dripy.read_param(
        'calc_metric_wind_flag', False, config, 'INPUTS')
    calc_metric_etr_flag = dripy.read_param(
        'calc_metric_etr_flag', False, config, 'INPUTS')
    calc_metric_tair_flag = dripy.read_param(
        'calc_metric_tair_flag', False, config, 'INPUTS')

    # Interpolate/clip/project AWC and daily ETr/PPT rasters
    # to compute SWB Ke for each Landsat scene
    calc_swb_ke_flag = dripy.read_param(
        'calc_swb_ke_flag', False, config, 'INPUTS')
    if cloud_mask_flag:
        spinup_days = dripy.read_param(
            'swb_spinup_days', 30, config, 'INPUTS')
        min_spinup_days = dripy.read_param(
            'swb_min_spinup_days', 5, config, 'INPUTS')

    # Round ea raster to N digits to save space
    rounding_digits = dripy.read_param(
        'rounding_digits', 3, config, 'INPUTS')

    env = drigo.env
    image = et_image.Image(image_ws, env)
    np.seterr(invalid='ignore', divide='ignore')
    gdal.UseExceptions()

    # Input file paths
    dn_image_dict = et_common.landsat_band_image_dict(
        image.orig_data_ws, image.image_name_re)

    # # Open METRIC config file
    # if config_file:
    #    logging.info(
    #        log_f.format('METRIC INI File:', os.path.basename(config_file)))
    #    config = configparser.ConfigParser()
    #    try:
    #        config.read(config_file)
    #    except:
    #        logging.error('\nERROR: Config file could not be read, ' +
    #                      'is not an input file, or does not exist\n' +
    #                      'ERROR: config_file = {}\n').format(config_file)
    #        sys.exit()
    #    #  Overwrite
    #    overwrite_flag = dripy.read_param('overwrite_flag', True, config)
    #
    #    #  Elevation and landuse parameters/flags from METRIC input file
    #    calc_elev_flag = dripy.read_param('save_dem_raster_flag', True, config)
    #    calc_landuse_flag = dripy.read_param(
    #        'save_landuse_raster_flag', True, config)
    #    if calc_elev_flag:
    #        elev_pr_path = config.get('INPUTS','dem_raster')
    #    if calc_landuse_flag:
    #        landuse_pr_path = config.get('INPUTS', 'landuse_raster')
    # else:
    #    overwrite_flag = False
    #    calc_elev_flag = False
    #    calc_landuse_flag = False
    #
    # Elev raster must exist
    # if calc_elev_flag and not os.path.isfile(elev_pr_path):
    #    logging.error('\nERROR: Elevation raster {} does not exist\n'.format(
    #        elev_pr_path))
    #    return False
    # Landuse raster must exist
    # if calc_landuse_flag and not os.path.isfile(landuse_pr_path):
    #    logging.error('\nERROR: Landuse raster {} does not exist\n'.format(
    #        landuse_pr_path))
    #    return False

    # Removing ancillary files before checking for inputs
    if os.path.isdir(os.path.join(image.orig_data_ws, 'gap_mask')):
        shutil.rmtree(os.path.join(image.orig_data_ws, 'gap_mask'))
    for item in os.listdir(image.orig_data_ws):
        if (image.type == 'Landsat7' and
            (item.endswith('_B8.TIF') or
             item.endswith('_B6_VCID_2.TIF'))):
            os.remove(os.path.join(image.orig_data_ws, item))
        elif (image.type == 'Landsat8' and
              (item.endswith('_B1.TIF') or
               item.endswith('_B8.TIF') or
               item.endswith('_B9.TIF') or
               item.endswith('_B11.TIF'))):
            os.remove(os.path.join(image.orig_data_ws, item))
        elif (item.endswith('_VER.jpg') or
              item.endswith('_VER.txt') or
              item.endswith('_GCP.txt') or
              item == 'README.GTF'):
            os.remove(os.path.join(image.orig_data_ws, item))

    # Check correction level (image must be L1T to process)
    if image.correction != 'L1TP':
        logging.debug('  Image is not L1TP corrected, skipping')
        return False
        # calc_fmask_common_flag = False
        # calc_refl_toa_flag = False
        # calc_ts_bt_flag = False
        # calc_metric_ea_flag = False
        # calc_metric_wind_flag = False
        # calc_metric_etr_flag = False
        # overwrite_flag = False

    # QA band must exist
    if calc_fmask_common_flag and image.qa_band not in dn_image_dict.keys():
        logging.warning(
             '\nQA band does not exist but calc_fmask_common_flag=True'
             '\n  Setting calc_fmask_common_flag=False\n  {}'.format(
                 image.qa_band))
        calc_fmask_common_flag = False
    if cloud_mask_flag and not os.path.isdir(cloud_mask_ws):
        logging.warning(
             '\ncloud_mask_ws is not a directory but cloud_mask_flag=True.'
             '\n  Setting cloud_mask_flag=False\n   {}'.format(cloud_mask_ws))
        cloud_mask_flag = False

    # Check for Landsat TOA images
    if (calc_refl_toa_flag and
        (set(list(image.band_toa_dict.keys()) +
                 [image.thermal_band, image.qa_band]) !=
            set(dn_image_dict.keys()))):
        logging.warning(
            '\nMissing Landsat images but calc_refl_toa_flag=True'
            '\n  Setting calc_refl_toa_flag=False')
        calc_refl_toa_flag = False

    # Check for Landsat brightness temperature image
    if calc_ts_bt_flag and image.thermal_band not in dn_image_dict.keys():
        logging.warning(
            '\nThermal band image does not exist but calc_ts_bt_flag=True'
            '\n  Setting calc_ts_bt_flag=False')
        calc_ts_bt_flag = False
        # DEADBEEF - Should the function return False if Ts doesn't exist?
        # return False

    # Check for METRIC hourly/daily input folders
    if calc_metric_ea_flag:
        metric_ea_input_ws = config.get('INPUTS', 'metric_ea_input_folder')
        if not os.path.isdir(metric_ea_input_ws):
            logging.warning(
                 '\nHourly Ea folder does not exist but calc_metric_ea_flag=True'
                 '\n  Setting calc_metric_ea_flag=False\n  {}'.format(
                     metric_ea_input_ws))
            calc_metric_ea_flag = False
    if calc_metric_wind_flag:
        metric_wind_input_ws = config.get('INPUTS', 'metric_wind_input_folder')
        if not os.path.isdir(metric_wind_input_ws):
            logging.warning(
                 '\nHourly wind folder does not exist but calc_metric_wind_flag=True'
                 '\n  Setting calc_metric_wind_flag=False\n  {}'.format(
                     metric_wind_input_ws))
            calc_metric_wind_flag = False
    if calc_metric_etr_flag:
        metric_etr_input_ws = config.get('INPUTS', 'metric_etr_input_folder')
        if not os.path.isdir(metric_etr_input_ws):
            logging.warning(
                 '\nHourly ETr folder does not exist but calc_metric_etr_flag=True'
                 '\n  Setting calc_metric_etr_flag=False\n  {}'.format(
                     metric_etr_input_ws))
            calc_metric_etr_flag = False
    if calc_metric_tair_flag:
        metric_tair_input_ws = config.get('INPUTS', 'metric_tair_input_folder')
        if not os.path.isdir(metric_tair_input_ws):
            logging.warning(
                 '\nHourly Tair folder does not exist but calc_metric_tair_flag=True'
                 '\n  Setting calc_metric_tair_flag=False\n  {}'.format(
                     metric_tair_input_ws))
            calc_metric_tair_flag = False
    if (calc_metric_ea_flag or calc_metric_wind_flag or
            calc_metric_etr_flag or calc_metric_tair_flag):
        metric_hourly_re = re.compile(config.get('INPUTS', 'metric_hourly_re'))
        metric_daily_re = re.compile(config.get('INPUTS', 'metric_daily_re'))

    if calc_swb_ke_flag:
        awc_input_path = config.get('INPUTS', 'awc_input_path')
        etr_input_ws = config.get('INPUTS', 'etr_input_folder')
        ppt_input_ws = config.get('INPUTS', 'ppt_input_folder')
        etr_input_re = re.compile(config.get('INPUTS', 'etr_input_re'))
        ppt_input_re = re.compile(config.get('INPUTS', 'ppt_input_re'))
        if not os.path.isfile(awc_input_path):
            logging.warning(
                 '\nAWC raster does not exist but calc_swb_ke_flag=True'
                 '\n  Setting calc_swb_ke_flag=False\n  {}'.format(
                     awc_input_path))
            calc_swb_ke_flag = False
        if not os.path.isdir(etr_input_ws):
            logging.warning(
                 '\nDaily ETr folder does not exist but calc_swb_ke_flag=True'
                 '\n  Setting calc_swb_ke_flag=False\n  {}'.format(
                     etr_input_ws))
            calc_swb_ke_flag = False
        if not os.path.isdir(ppt_input_ws):
            logging.warning(
                 '\nDaily PPT folder does not exist but calc_swb_ke_flag=True'
                 '\n  Setting calc_swb_ke_flag=False\n  {}'.format(
                     ppt_input_ws))
            calc_swb_ke_flag = False

    # Build folders for support rasters
    if ((calc_fmask_common_flag or calc_refl_toa_flag or
         # calc_refl_sur_ledaps_flag or
         calc_ts_bt_flag or
         calc_metric_ea_flag or calc_metric_wind_flag or
         calc_metric_etr_flag or calc_metric_tair_flag or
         calc_swb_ke_flag) and
        not os.path.isdir(image.support_ws)):
        os.makedirs(image.support_ws)
    if calc_refl_toa_flag and not os.path.isdir(image.refl_toa_ws):
        os.makedirs(image.refl_toa_ws)
    # if calc_refl_sur_ledaps_flag and not os.path.isdir(image.refl_sur_ws):
    #     os.makedirs(image.refl_sur_ws)

    # DEADBEEF - This is being further down just for the Fmask images
    # # Apply overwrite flag
    # if overwrite_flag:
    #     overwrite_list = [
    #         image.fmask_cloud_raster, image.fmask_snow_raster,
    #         image.fmask_water_raster
    #         # image.elev_raster, image.landuse_raster
    #         # image.common_area_raster
    #     ]
    #     for overwrite_path in overwrite_list:
    #         try:
    #             dripy.remove_file(image.fmask_cloud_raster)
    #         except:
    #             pass

    # Use QA band to build common area rasters
    logging.info('\nCommon Area Raster')
    qa_ds = gdal.Open(dn_image_dict[image.qa_band], 0)
    common_geo = drigo.raster_ds_geo(qa_ds)
    common_extent = drigo.raster_ds_extent(qa_ds)
    common_proj = drigo.raster_ds_proj(qa_ds)
    common_osr = drigo.raster_ds_osr(qa_ds)
    # Initialize common_area as all non-fill QA values
    qa_array = drigo.raster_ds_to_array(qa_ds, return_nodata=False)
    common_array = qa_array != 1
    common_rows, common_cols = common_array.shape
    del qa_ds

    # Erode and dilate to remove fringe on edge
    # Default is to not smooth, but user can force smoothing
    # This needs to be applied before Fmask
    if edge_smooth_flag and image.prefix in ['LT05', 'LE07']:
        struct = ndimage.generate_binary_structure(2, 2).astype(np.uint8)
        if image.prefix == 'LT05':
            cells = 8
        elif image.prefix == 'LE07':
            cells = 2
        else:
            cells = 0
        common_array = ndimage.binary_dilation(
            ndimage.binary_erosion(common_array, struct, cells),
            struct, cells)

    # Try applying user defined cloud masks to common_area
    cloud_mask_path = os.path.join(
        cloud_mask_ws, image.folder_id + '_mask.shp')
    if cloud_mask_flag and os.path.isfile(cloud_mask_path):
        logging.info('  Applying cloud mask shapefile')
        feature_path = os.path.join(
            cloud_mask_ws, (image.folder_id + '_mask.shp'))
        logging.info('    {}'.format(feature_path))
        cloud_mask_memory_ds = drigo.polygon_to_raster_ds(
            feature_path, nodata_value=0, burn_value=1,
            output_osr=common_osr, output_cs=30,
            output_extent=common_extent)
        cloud_array = drigo.raster_ds_to_array(
            cloud_mask_memory_ds, return_nodata=False)
        # DEADBEEF - If user sets a cloud mask,
        #   it is probably better than Fmask
        # Eventually change "if" calc_fmask_common_flag: to "elif"
        common_array[cloud_array == 1] = 0
        del cloud_mask_memory_ds, cloud_array

    # Remove Fmask cloud, shadow, and snow pixels from common_area
    if calc_fmask_common_flag:
        logging.info('  Applying Fmask to common area')
        fmask_array = et_numpy.bqa_fmask_func(qa_array)
        fmask_mask = (fmask_array >= 2) & (fmask_array <= 4)

        if fmask_smooth_flag:
            logging.debug(
                '  Smoothing (dilate/erode/erode/dilate) Fmask clouds, shadows,'
                ' and snow pixels by {} cells'.format(fmask_smooth_cells))
            # ArcGIS smoothing procedure
            fmask_mask = ndimage.binary_dilation(
                fmask_mask, iterations=fmask_smooth_cells,
                structure=ndimage.generate_binary_structure(2, 2))
            fmask_mask = ndimage.binary_erosion(
                fmask_mask, iterations=fmask_smooth_cells,
                structure=ndimage.generate_binary_structure(2, 2))
            fmask_mask = ndimage.binary_erosion(
                fmask_mask, iterations=fmask_smooth_cells,
                structure=ndimage.generate_binary_structure(2, 2))
            fmask_mask = ndimage.binary_dilation(
                fmask_mask, iterations=fmask_smooth_cells,
                structure=ndimage.generate_binary_structure(2, 2))

        if fmask_erode_flag:
            logging.debug(
                '  Eroding Fmask clouds, shadows, and snow pixels by '
                '{} cells'.format(fmask_erode_cells))
            fmask_mask = ndimage.binary_erosion(
                fmask_mask, iterations=fmask_erode_cells,
                structure=ndimage.generate_binary_structure(2, 2))

        if fmask_buffer_flag:
            logging.debug(
                '  Dilating (buffering) Fmask clouds, shadows, and snow pixels '
                'by {} cells'.format(fmask_buffer_cells))
            fmask_mask = ndimage.binary_dilation(
                fmask_mask, iterations=fmask_buffer_cells,
                structure=ndimage.generate_binary_structure(2, 2))

        # Reset common_array for buffered cells
        common_array[fmask_mask] = 0

        del fmask_array, fmask_mask

    # Check that there are some cloud free pixels
    if not np.any(common_array):
        logging.error(
            '  ERROR: There are no cloud/snow free pixels, returning False')
        return False

    # Always overwrite common area raster
    # if not os.path.isfile(image.common_area_raster):
    drigo.array_to_raster(
        common_array, image.common_area_raster,
        output_geo=common_geo, output_proj=common_proj,
        stats_flag=stats_flag)

    # Print common geo/extent
    logging.debug('  Common geo:      {}'.format(common_geo))
    logging.debug('  Common extent:   {}'.format(common_extent))

    # Extract Fmask components as separate rasters
    if (calc_fmask_flag or calc_fmask_cloud_flag or calc_fmask_snow_flag or
            calc_fmask_water_flag):
        logging.info('\nFmask')
        fmask_array = et_numpy.bqa_fmask_func(qa_array)

        # Remove existing Fmask rasters
        if (calc_fmask_flag and overwrite_flag and
                os.path.isfile(image.fmask_output_raster)):
            logging.debug('  Overwriting: {}'.format(
                image.fmask_output_raster))
            dripy.remove_file(image.fmask_output_raster)
        if (calc_fmask_cloud_flag and overwrite_flag and
                os.path.isfile(image.fmask_cloud_raster)):
            logging.debug('  Overwriting: {}'.format(
                image.fmask_cloud_raster))
            dripy.remove_file(image.fmask_cloud_raster)
        if (calc_fmask_snow_flag and overwrite_flag and
                os.path.isfile(image.fmask_snow_raster)):
            logging.debug('  Overwriting: {}'.format(
                image.fmask_snow_raster))
            dripy.remove_file(image.fmask_snow_raster)
        if (calc_fmask_water_flag and overwrite_flag and
                os.path.isfile(image.fmask_water_raster)):
            logging.debug('  Overwriting: {}'.format(
                image.fmask_water_raster))
            dripy.remove_file(image.fmask_water_raster)

        # Save Fmask data as separate rasters
        if (calc_fmask_flag and not os.path.isfile(image.fmask_output_raster)):
            logging.debug('  Saving Fmask raster')
            drigo.array_to_raster(
                fmask_array.astype(np.uint8), image.fmask_output_raster,
                output_geo=common_geo, output_proj=common_proj,
                mask_array=None, output_nodata=255, stats_flag=stats_flag)
        if (calc_fmask_cloud_flag and
                not os.path.isfile(image.fmask_cloud_raster)):
            logging.debug('  Saving Fmask cloud raster')
            fmask_cloud_array = (fmask_array == 2) | (fmask_array == 4)
            drigo.array_to_raster(
                fmask_cloud_array.astype(np.uint8), image.fmask_cloud_raster,
                output_geo=common_geo, output_proj=common_proj,
                mask_array=None, output_nodata=255, stats_flag=stats_flag)
            del fmask_cloud_array
        if (calc_fmask_snow_flag and
                not os.path.isfile(image.fmask_snow_raster)):
            logging.debug('  Saving Fmask snow raster')
            fmask_snow_array = (fmask_array == 3)
            drigo.array_to_raster(
                fmask_snow_array.astype(np.uint8), image.fmask_snow_raster,
                output_geo=common_geo, output_proj=common_proj,
                mask_array=None, output_nodata=255, stats_flag=stats_flag)
            del fmask_snow_array
        if (calc_fmask_water_flag and
                not os.path.isfile(image.fmask_water_raster)):
            logging.debug('  Saving Fmask water raster')
            fmask_water_array = (fmask_array == 1)
            drigo.array_to_raster(
                fmask_water_array.astype(np.uint8), image.fmask_water_raster,
                output_geo=common_geo, output_proj=common_proj,
                mask_array=None, output_nodata=255, stats_flag=stats_flag)
            del fmask_water_array
        del fmask_array

    # # Calculate elevation
    # if calc_elev_flag and not os.path.isfile(elev_path):
    #     logging.info('Elevation')
    #     elev_array, elev_nodata = drigo.raster_to_array(
    #         elev_pr_path, 1, common_extent)
    #     drigo.array_to_raster(
    #         elev_array, elev_raster,
    #         output_geo=common_geo, output_proj=env.snap_proj,
    #         mask_array=common_array, stats_flag=stats_flag)
    #     del elev_array, elev_nodata, elev_path
    #
    # # Calculate landuse
    # if calc_landuse_flag and not os.path.isfile(landuse_raster):
    #     logging.info('Landuse')
    #     landuse_array, landuse_nodata = drigo.raster_to_array(
    #         landuse_pr_path, 1, common_extent)
    #     drigo.array_to_raster(
    #         landuse_array, landuse_raster,
    #         output_geo=common_geo, output_proj=env.snap_proj,
    #         mask_array=common_array, stats_flag=stats_flag)
    #     del landuse_array, landuse_nodata, landuse_raster

    # Calculate toa reflectance
    # f32_gtype, f32_nodata = numpy_to_gdal_type(np.float32)
    if calc_refl_toa_flag:
        logging.info('Top-of-Atmosphere Reflectance')
        if os.path.isfile(image.refl_toa_raster) and overwrite_flag:
            logging.debug('  Overwriting: {}'.format(
                image.refl_toa_raster))
            dripy.remove_file(image.refl_toa_raster)
        if not os.path.isfile(image.refl_toa_raster):
            # First build empty composite raster
            drigo.build_empty_raster(
                image.refl_toa_raster, image.band_toa_cnt, np.float32, None,
                env.snap_proj, env.cellsize, common_extent)
            # cos_theta_solar_flt = et_common.cos_theta_solar_func(
            #    image.sun_elevation)

            # Process by block
            logging.info('Processing by block')
            logging.debug('  Mask  cols/rows: {}/{}'.format(
                common_cols, common_rows))
            for b_i, b_j in drigo.block_gen(common_rows, common_cols, bs):
                logging.debug('  Block  y: {:5d}  x: {:5d}'.format(b_i, b_j))
                block_data_mask = drigo.array_to_block(
                    common_array, b_i, b_j, bs).astype(np.bool)
                block_rows, block_cols = block_data_mask.shape
                block_geo = drigo.array_offset_geo(common_geo, b_j, b_i)
                block_extent = drigo.geo_extent(
                    block_geo, block_rows, block_cols)
                logging.debug('    Block rows: {}  cols: {}'.format(
                    block_rows, block_cols))
                logging.debug('    Block extent: {}'.format(block_extent))
                logging.debug('    Block geo: {}'.format(block_geo))

                # Process each TOA band
                # for band, band_i in sorted(image.band_toa_dict.items()):
                for band, dn_image in sorted(dn_image_dict.items()):
                    if band not in image.band_toa_dict.keys():
                        continue
                    # thermal_band_flag = (band == image.thermal_band)
                    # Set 0 as nodata value
                    drigo.raster_path_set_nodata(dn_image, 0)
                    # Calculate TOA reflectance
                    dn_array, dn_nodata = drigo.raster_to_array(
                        dn_image, 1, block_extent)
                    dn_array = dn_array.astype(np.float64)
                    # dn_array = dn_array.astype(np.float32)
                    dn_array[dn_array == 0] = np.nan
                    #
                    if image.type in ['Landsat4', 'Landsat5', 'Landsat7']:
                        refl_toa_array = et_numpy.l457_refl_toa_band_func(
                            dn_array, image.cos_theta_solar,
                            image.dr, image.esun_dict[band],
                            image.lmin_dict[band], image.lmax_dict[band],
                            image.qcalmin_dict[band], image.qcalmax_dict[band])
                    elif image.type in ['Landsat8']:
                        refl_toa_array = et_numpy.l8_refl_toa_band_func(
                            dn_array, image.cos_theta_solar,
                            image.refl_mult_dict[band],
                            image.refl_add_dict[band])
                    # if (image.type in ['Landsat4', 'Landsat5', 'Landsat7'] and
                    #     not thermal_band_flag):
                    #     refl_toa_array = et_numpy.l457_refl_toa_band_func(
                    #         dn_array, image.cos_theta_solar,
                    #         image.dr, image.esun_dict[band],
                    #         image.lmin_dict[band], image.lmax_dict[band],
                    #         image.qcalmin_dict[band],
                    #         image.qcalmax_dict[band])
                    #         # image.rad_mult_dict[band],
                    #         # image.rad_add_dict[band])
                    # elif (image.type in ['Landsat8'] and
                    #       not thermal_band_flag):
                    #     refl_toa_array = et_numpy.l8_refl_toa_band_func(
                    #         dn_array, image.cos_theta_solar,
                    #         image.refl_mult_dict[band],
                    #         image.refl_add_dict[band])
                    # elif (image.type in ['Landsat4', 'Landsat5', 'Landsat7'] and
                    #       thermal_band_flag):
                    #     refl_toa_array = et_numpy.l457_ts_bt_band_func(
                    #         dn_array,
                    #         image.lmin_dict[band], image.lmax_dict[band],
                    #         image.qcalmin_dict[band],
                    #         image.qcalmax_dict[band],
                    #         # image.rad_mult_dict[band],
                    #         # image.rad_add_dict[band],
                    #         image.k1_dict[band], image.k2_dict[band])
                    # elif (image.type in ['Landsat8'] and
                    #       thermal_band_flag):
                    #     refl_toa_array = et_numpy.l8_ts_bt_band_func(
                    #         dn_array,
                    #         image.rad_mult_dict[band],
                    #         image.rad_add_dict[band],
                    #         image.k1_dict[band], image.k2_dict[band])

                    # refl_toa_array = et_numpy.refl_toa_band_func(
                    #     dn_array, cos_theta_solar_flt,
                    #     image.dr, image.esun_dict[band],
                    #     image.lmin_dict[band], image.lmax_dict[band],
                    #     image.qcalmin_dict[band], image.qcalmax_dict[band],
                    #     thermal_band_flag)
                    drigo.block_to_raster(
                        refl_toa_array.astype(np.float32),
                        image.refl_toa_raster,
                        b_i, b_j, band=image.band_toa_dict[band])
                    # drigo.array_to_comp_raster(
                    #    refl_toa_array.astype(np.float32),
                    #    image.refl_toa_raster,
                    #    image.band_toa_dict[band], common_array)
                    del refl_toa_array, dn_array
            if stats_flag:
                drigo.raster_statistics(image.refl_toa_raster)

        # # Process each TOA band
        # # for band, band_i in sorted(image.band_toa_dict.items()):
        # for band, dn_image in sorted(dn_image_dict.items()):
        #     thermal_band_flag = (band == image.thermal_band)
        #     #  Set 0 as nodata value
        #     drigo.raster_path_set_nodata(dn_image, 0)
        #     #  Calculate TOA reflectance
        #     dn_array, dn_nodata = drigo.raster_to_array(
        #         dn_image, 1, common_extent)
        #     dn_array = dn_array.astype(np.float64)
        #     # dn_array = dn_array.astype(np.float32)
        #     dn_array[dn_array == 0] = np.nan
        #     #
        #     if (image.type in ['Landsat4', 'Landsat5', 'Landsat7'] and
        #         not thermal_band_flag):
        #         refl_toa_array = et_numpy.l457_refl_toa_band_func(
        #             dn_array, image.cos_theta_solar,
        #             image.dr, image.esun_dict[band],
        #             image.lmin_dict[band], image.lmax_dict[band],
        #             image.qcalmin_dict[band], image.qcalmax_dict[band])
        #             # image.rad_mult_dict[band], image.rad_add_dict[band])
        #     elif (image.type in ['Landsat4', 'Landsat5', 'Landsat7'] and
        #           thermal_band_flag):
        #         refl_toa_array = et_numpy.l457_ts_bt_band_func(
        #             dn_array, image.lmin_dict[band], image.lmax_dict[band],
        #             image.qcalmin_dict[band], image.qcalmax_dict[band],
        #             # image.rad_mult_dict[band], image.rad_add_dict[band],
        #             image.k1_dict[band], image.k2_dict[band])
        #     elif (image.type in ['Landsat8'] and
        #           not thermal_band_flag):
        #         refl_toa_array = et_numpy.l8_refl_toa_band_func(
        #             dn_array, image.cos_theta_solar,
        #             image.refl_mult_dict[band], image.refl_add_dict[band])
        #     elif (image.type in ['Landsat8'] and
        #           thermal_band_flag):
        #         refl_toa_array = et_numpy.l8_ts_bt_band_func(
        #             dn_array,
        #             image.rad_mult_dict[band], image.rad_add_dict[band],
        #             image.k1_dict[band], image.k2_dict[band])
        #     # refl_toa_array = et_numpy.refl_toa_band_func(
        #     #     dn_array, cos_theta_solar_flt,
        #     #     image.dr, image.esun_dict[band],
        #     #     image.lmin_dict[band], image.lmax_dict[band],
        #     #     image.qcalmin_dict[band], image.qcalmax_dict[band],
        #     #     thermal_band_flag)
        #     drigo.array_to_comp_raster(
        #         refl_toa_array.astype(np.float32), image.refl_toa_raster,
        #         image.band_toa_dict[band], common_array)
        #     del refl_toa_array, dn_array


    # Calculate brightness temperature
    if calc_ts_bt_flag:
        logging.info('Brightness Temperature')
        if os.path.isfile(image.ts_bt_raster) and overwrite_flag:
            logging.debug('  Overwriting: {}'.format(image.ts_bt_raster))
            dripy.remove_file(image.ts_bt_raster)
        if not os.path.isfile(image.ts_bt_raster):
            band = image.thermal_band
            thermal_dn_path = dn_image_dict[band]
            drigo.raster_path_set_nodata(thermal_dn_path, 0)
            thermal_dn_array, thermal_dn_nodata = drigo.raster_to_array(
                thermal_dn_path, 1, common_extent, return_nodata=True)
            thermal_dn_mask = thermal_dn_array != thermal_dn_nodata
            if image.type in ['Landsat4', 'Landsat5', 'Landsat7']:
                ts_bt_array = et_numpy.l457_ts_bt_band_func(
                    thermal_dn_array,
                    image.lmin_dict[band], image.lmax_dict[band],
                    image.qcalmin_dict[band], image.qcalmax_dict[band],
                    # image.rad_mult_dict[band], image.rad_add_dict[band],
                    image.k1_dict[band], image.k2_dict[band])
            elif image.type in ['Landsat8']:
                ts_bt_array = et_numpy.l8_ts_bt_band_func(
                    thermal_dn_array,
                    image.rad_mult_dict[band], image.rad_add_dict[band],
                    image.k1_dict[band], image.k2_dict[band])
            # thermal_rad_array = et_numpy.refl_toa_band_func(
            #     thermal_dn_array, image.cos_theta_solar,
            #     image.dr, image.esun_dict[band],
            #     image.lmin_dict[band], image.lmax_dict[band],
            #     image.qcalmin_dict[band], image.qcalmax_dict[band],
            #     thermal_band_flag=True)
            # ts_bt_array = et_numpy.ts_bt_func(
            #     thermal_rad_array, image.k1_dict[image.thermal_band],
            #     image.k2_dict[image.thermal_band])
            ts_bt_array[~thermal_dn_mask] = np.nan
            drigo.array_to_raster(
                ts_bt_array, image.ts_bt_raster,
                output_geo=common_geo, output_proj=env.snap_proj,
                # mask_array=common_array,
                stats_flag=stats_flag)
            # del thermal_dn_array, thermal_rad_array
            del thermal_dn_path, thermal_dn_array, ts_bt_array


    # Interpolate/project/clip METRIC hourly/daily rasters
    if (calc_metric_ea_flag or
            calc_metric_wind_flag or
            calc_metric_etr_flag):
        logging.info('METRIC hourly/daily rasters')

        # Get bracketing hours from image acquisition time
        image_prev_dt = image.acq_datetime.replace(
            minute=0, second=0, microsecond=0)
        image_next_dt = image_prev_dt + timedelta(seconds=3600)

        # Get NLDAS properties from one of the images
        input_ws = os.path.join(
            metric_etr_input_ws, str(image_prev_dt.year))
        try:
            input_path = [
                os.path.join(input_ws, file_name)
                for file_name in os.listdir(input_ws)
                for match in [metric_hourly_re.match(file_name)]
                if (match and
                    (image_prev_dt.strftime('%Y%m%d') ==
                     match.group('YYYYMMDD')))][0]
        except IndexError:
            logging.error('  No hourly file for {}'.format(
                image_prev_dt.strftime('%Y-%m-%d %H00')))
            return False
        try:
            input_ds = gdal.Open(input_path)
            input_osr = drigo.raster_ds_osr(input_ds)
            # input_proj = drigo.osr_proj(input_osr)
            input_extent = drigo.raster_ds_extent(input_ds)
            input_cs = drigo.raster_ds_cellsize(input_ds, x_only=True)
            # input_geo = input_extent.geo(input_cs)
            input_x, input_y = input_extent.origin()
            input_ds = None
        except:
            logging.error('  Could not get default input image properties')
            logging.error('    {}'.format(input_path))
            return False

        # Project Landsat scene extent to NLDAS GCS
        common_gcs_osr = common_osr.CloneGeogCS()
        common_gcs_extent = drigo.project_extent(
            common_extent, common_osr, common_gcs_osr,
            cellsize=env.cellsize)
        common_gcs_extent.buffer_extent(0.1)
        common_gcs_extent.adjust_to_snap(
            'EXPAND', input_x, input_y, input_cs)
        # common_gcs_geo = common_gcs_extent.geo(input_cs)

        def metric_weather_func(output_raster, input_ws, input_re,
                                prev_dt, next_dt,
                                resample_method=gdal.GRA_NearestNeighbour,
                                rounding_flag=False):
            """Interpolate/project/clip METRIC hourly rasters"""
            logging.debug('    Output: {}'.format(output_raster))
            if os.path.isfile(output_raster):
                if overwrite_flag:
                    logging.debug('    Overwriting output')
                    dripy.remove_file(output_raster)
                else:
                    logging.debug('    Skipping, file already exists ' +
                                  'and overwrite is False')
                    return False
            prev_ws = os.path.join(input_ws, str(prev_dt.year))
            next_ws = os.path.join(input_ws, str(next_dt.year))

            # Technically previous and next could come from different days
            # or even years, although this won't happen in the U.S.
            try:
                prev_path = [
                    os.path.join(prev_ws, input_name)
                    for input_name in os.listdir(prev_ws)
                    for input_match in [input_re.match(input_name)]
                    if (input_match and
                        (prev_dt.strftime('%Y%m%d') ==
                         input_match.group('YYYYMMDD')))][0]
                logging.debug('    Input prev: {}'.format(prev_path))
            except IndexError:
                logging.error('  No previous hourly file')
                logging.error('    {}'.format(prev_dt))
                return False
            try:
                next_path = [
                    os.path.join(next_ws, input_name)
                    for input_name in os.listdir(next_ws)
                    for input_match in [input_re.match(input_name)]
                    if (input_match and
                        (next_dt.strftime('%Y%m%d') ==
                         input_match.group('YYYYMMDD')))][0]
                logging.debug('    Input next: {}'.format(next_path))
            except IndexError:
                logging.error('  No next hourly file')
                logging.error('    {}'.format(next_dt))
                return False

            # Band numbers are 1's based
            prev_band = int(prev_dt.strftime('%H')) + 1
            next_band = int(next_dt.strftime('%H')) + 1
            logging.debug('    Input prev band: {}'.format(prev_band))
            logging.debug('    Input next band: {}'.format(next_band))

            # Read arrays
            prev_array = drigo.raster_to_array(
                prev_path, band=prev_band, mask_extent=common_gcs_extent,
                return_nodata=False)
            next_array = drigo.raster_to_array(
                next_path, band=next_band, mask_extent=common_gcs_extent,
                return_nodata=False)
            if not np.any(prev_array) or not np.any(next_array):
                logging.warning('\nWARNING: Input NLDAS array is all nodata\n')
                return None

            output_array = hourly_interpolate_func(
                prev_array, next_array,
                prev_dt, next_dt, image.acq_datetime)
            output_array = drigo.project_array(
                output_array, resample_method,
                input_osr, input_cs, common_gcs_extent,
                common_osr, env.cellsize, common_extent, output_nodata=None)

            # Apply common area mask
            output_array[~common_array] = np.nan

            # Reduce the file size by rounding to the nearest n digits
            if rounding_flag:
                output_array = np.around(output_array, rounding_digits)
            # Force output to 32-bit float
            drigo.array_to_raster(
                output_array.astype(np.float32), output_raster,
                output_geo=common_geo, output_proj=common_proj,
                stats_flag=stats_flag)
            del output_array
            return True

        # Ea - Project to Landsat scene after clipping
        if calc_metric_ea_flag:
            logging.info('  Hourly vapor pressure (Ea)')
            metric_weather_func(
                image.metric_ea_raster, metric_ea_input_ws,
                metric_hourly_re, image_prev_dt, image_next_dt,
                gdal.GRA_Bilinear, rounding_flag=True)

        # Wind - Project to Landsat scene after clipping
        if calc_metric_wind_flag:
            logging.info('  Hourly windspeed')
            metric_weather_func(
                image.metric_wind_raster, metric_wind_input_ws,
                metric_hourly_re, image_prev_dt, image_next_dt,
                gdal.GRA_NearestNeighbour, rounding_flag=False)

        # ETr - Project to Landsat scene after clipping
        if calc_metric_etr_flag:
            logging.info('  Hourly reference ET (ETr)')
            metric_weather_func(
                image.metric_etr_raster, metric_etr_input_ws,
                metric_hourly_re, image_prev_dt, image_next_dt,
                gdal.GRA_NearestNeighbour, rounding_flag=False)

        # ETr 24hr - Project to Landsat scene after clipping
        if calc_metric_etr_flag:
            logging.info('  Daily reference ET (ETr)')
            logging.debug('    Output: {}'.format(
                image.metric_etr_24hr_raster))
            if (os.path.isfile(image.metric_etr_24hr_raster) and
                    overwrite_flag):
                logging.debug('    Overwriting output')
                os.remove(image.metric_etr_24hr_raster)
            if not os.path.isfile(image.metric_etr_24hr_raster):
                etr_prev_ws = os.path.join(
                    metric_etr_input_ws, str(image_prev_dt.year))
                try:
                    input_path = [
                        os.path.join(etr_prev_ws, file_name)
                        for file_name in os.listdir(etr_prev_ws)
                        for match in [metric_daily_re.match(file_name)]
                        if (match and
                            (image_prev_dt.strftime('%Y%m%d') ==
                             match.group('YYYYMMDD')))][0]
                    logging.debug('    Input: {}'.format(input_path))
                except IndexError:
                    logging.error('  No daily file for {}'.format(
                        image_prev_dt.strftime('%Y-%m-%d')))
                    return False
                output_array = drigo.raster_to_array(
                    input_path, mask_extent=common_gcs_extent,
                    return_nodata=False)
                output_array = drigo.project_array(
                    output_array, gdal.GRA_NearestNeighbour,
                    input_osr, input_cs, common_gcs_extent,
                    common_osr, env.cellsize, common_extent,
                    output_nodata=None)
                # Apply common area mask
                output_array[~common_array] = np.nan
                # Reduce the file size by rounding to the nearest n digits
                # output_array = np.around(output_array, rounding_digits)
                drigo.array_to_raster(
                    output_array, image.metric_etr_24hr_raster,
                    output_geo=common_geo, output_proj=common_proj,
                    stats_flag=stats_flag)
                del output_array
                del input_path

        # Tair - Project to Landsat scene after clipping
        if calc_metric_tair_flag:
            logging.info('  Hourly air temperature (Tair)')
            metric_weather_func(
                image.metric_tair_raster, metric_tair_input_ws,
                metric_hourly_re, image_prev_dt, image_next_dt,
                gdal.GRA_NearestNeighbour, rounding_flag=False)

        # Cleanup
        del image_prev_dt, image_next_dt

    # Soil Water Balance
    if calc_swb_ke_flag:
        logging.info('Daily soil water balance')

        # Check if output file already exists
        logging.debug('  Ke:  {}'.format(image.ke_raster))
        if os.path.isfile(image.ke_raster):
            if overwrite_flag:
                logging.debug('    Overwriting output')
                dripy.remove_file(image.ke_raster)
            else:
                logging.debug('    Skipping, file already '
                              'exists and overwrite is False')
                return False
        ke_array = et_common.raster_swb_func(
            image.acq_datetime, common_osr, env.cellsize, common_extent,
            awc_input_path, etr_input_ws, etr_input_re,
            ppt_input_ws, ppt_input_re,
            spinup_days=spinup_days, min_spinup_days=min_spinup_days)
        # Apply common area mask
        ke_array[~common_array] = np.nan
        # Reduce the file size by rounding to the nearest 2 digits
        np.around(ke_array, 2, out=ke_array)

        # Force output to 32-bit float
        drigo.array_to_raster(
            ke_array.astype(np.float32), image.ke_raster,
            output_geo=common_geo, output_proj=common_proj,
            stats_flag=stats_flag)

    # Remove Landsat TOA rasters
    if not keep_dn_flag:
        for landsat_item in dripy.build_file_list(
                image.orig_data_ws, image.image_name_re):
            os.remove(os.path.join(image.orig_data_ws, landsat_item))
    return True
Пример #2
0
def main(ini_path,
         tile_list=None,
         stats_flag=True,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         new_window_flag=False):
    """Run METRIC Model 1 for all images

    Args:
        ini_path (str): file path of the input parameters file
        tile_list (list): list of Landsat path/row (i.e. [p45r43, p45r33])
            This will override the tile list in the INI file
        stats_flag (bool): if True, compute raster statistics.
            Default is True.
        overwrite_flag (bool): if True, overwrite existing files
        mp_procs (int): number of cores to use
        delay: max random delay starting function in seconds
        debug_flag (bool): if True, enable debug level logging
        new_window_flag (bool): if True, open each process in new terminal.
            Microsoft Windows only.

    Returns:
        None
    """
    logging.info('\nRunning METRIC model 1 for all images')

    # Open config file
    config = open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'metric_model1_func')

    # For now build INI file name from template INI names
    ini_name = os.path.basename(config.get('INPUTS', 'metric_ini'))
    ini_name = os.path.splitext(os.path.basename(ini_name))[0]

    # INI file is built as a function of year and tile_name
    ini_fmt = '{}_{}_{}.ini'

    # Calculate pixel regions/rating immediatly after running Model 1
    # pixel_regions_flag = True

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # Regular expressions
    # For now assume tiles are two digit numbers
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    mp_list = []
    for tile_name in sorted(tile_list):
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            continue

        # Check that there are scene folders
        scene_id_list = [
            scene_id for scene_id in sorted(os.listdir(tile_ws))
            if (os.path.isdir(os.path.join(tile_ws, scene_id))
                or image_re.match(scene_id))
        ]
        if not scene_id_list:
            continue
        logging.debug('  {} {}'.format(year, tile_name))

        # Check that there is an input file for the path/row
        ini_path = os.path.join(tile_ws,
                                ini_fmt.format(ini_name, year, tile_name))
        if not os.path.join(ini_path):
            logging.warning(
                '    METRIC input file {} does not exist'.format(ini_path))
            continue

        # Setup command line argument
        call_args = [sys.executable, func_path, '-i', ini_path]
        if stats_flag:
            call_args.append('--stats')
        if overwrite_flag:
            call_args.append('--overwrite')
        if debug_flag:
            call_args.append('--debug')

        # Run METRIC Model 1
        for scene_id in scene_id_list:
            logging.debug('  {}'.format(scene_id))
            scene_ws = os.path.join(tile_ws, scene_id)
            if mp_procs > 1:
                mp_list.append([call_args, scene_ws, delay, new_window_flag])
            else:
                subprocess.call(call_args, cwd=scene_ws)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool

    logging.debug('\nScript complete')
Пример #3
0
def main(ini_path,
         tile_list=None,
         blocksize=2048,
         smooth_flag=True,
         stats_flag=True,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         new_window_flag=False):
    """Prep Landsat scenes

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    blocksize : int, optional
        Processing block size (the default is 2048).
    smooth_flag : bool, optional
        If True, dilate/erode image to remove fringe/edge pixels
        (the Default is True).
    stats_flag : bool, optional
        If True, compute raster statistics (the default is True).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cores to use (the default is 1).
    delay : float, optional
        max random delay starting function in seconds (the default is 0).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    new_window_flag : bool, optional
        If True, open each process in new terminal window (the default is False).
        Microsoft Windows only.

    Returns
    -------
    None

    """
    logging.info('\nPreparing Landsat scenes')

    # Open config file
    config = open_ini(ini_path)

    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'prep_scene_func')
    skip_list_path = read_param('skip_list_path', '', config, 'INPUTS')

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # Regular expressions
    # For now assume path/row are two digit numbers
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\nFolder {} does not exist'.format(project_ws))
        sys.exit()

    # Setup command line argument
    call_args = [sys.executable, func_path, '-i', ini_path]
    if blocksize:
        call_args.extend(['--blocksize', str(blocksize)])
    if stats_flag:
        call_args.append('--stats')
    if overwrite_flag:
        call_args.append('--overwrite')
    if debug_flag:
        call_args.append('--debug')
    if smooth_flag:
        call_args.append('--smooth')

    # Read skip list
    if skip_list_path:
        logging.debug('\nReading scene skip list')
        with open(skip_list_path) as skip_list_f:
            skip_list = skip_list_f.readlines()
            skip_list = [
                image_id.strip() for image_id in skip_list
                if image_re.match(image_id.strip())
            ]
    else:
        logging.debug('\nSkip list not set in INI')
        skip_list = []

    # Process each image
    mp_list = []
    for tile_name in sorted(tile_list):
        logging.debug('\nTile: {}'.format(tile_name))
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            logging.debug('  No image folder, skipping')
            continue

        # Check that there are scene folders
        image_id_list = [
            image_id for image_id in sorted(os.listdir(tile_ws))
            if (os.path.isdir(os.path.join(tile_ws, image_id))
                and image_re.match(image_id) and image_id not in skip_list)
        ]
        if not image_id_list:
            logging.debug('  No available images, skipping')
            continue
        logging.info('  {} {}'.format(year, tile_name))

        # Prep each Landsat scene
        for image_id in image_id_list:
            image_ws = os.path.join(tile_ws, image_id)
            if mp_procs > 1:
                mp_list.append([call_args, image_ws, delay, new_window_flag])
            else:
                logging.debug('  {}'.format(image_id))
                subprocess.call(call_args, cwd=image_ws)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool

    logging.debug('\nScript complete')
Пример #4
0
def main(ini_path, mc_iter_str='', tile_list=None,
         cold_tgt_pct=None, hot_tgt_pct=None, groupsize=64, blocksize=2048,
         multipoint_flag=True, shapefile_flag=True, stats_flag=True,
         overwrite_flag=False, mp_procs=1, delay=0, debug_flag=False,
         new_window_flag=False, no_file_logging=False,
         no_final_plots=None, no_temp_plots=None):
    """Run METRIC Monte Carlo for all Landsat scenes

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    mc_iter_str : str
        MonteCarlo iteration list and/or range.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    cold_tgt_pct : float, optional
        Target percentage of pixels with ETrF greater than cold Kc.
    hot_tgt_pct : float, optional
        Target percentage of pixels with ETrF less than hot Kc.
    groupsize : int, optional
        Script will try to place calibration point randomly into a labeled
        group of clustered values with at least n pixels (the default is 64).
        -1 = In the largest group
         0 = Anywhere in the image (not currently implemented)
         1 >= In any group with a pixel count greater or equal to n
    blocksize : int, optional
        Processing block size (the default is 2048).
    multipoint_flag : bool, optional
        If True, save cal. points to multipoint shapefile (the default is True).
    shapefile_flag : bool, optional
        If True, save calibration points to shapefile (the default False).
    stats_flag : bool, optional
        If True, compute raster statistics (the default is True).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cores to use (the default is 1).
    delay : float, optional
        Max random delay starting function in seconds (the default is 0).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    new_window_flag : bool, optional
        If True, open each process in new terminal window (the default is False).
        Microsoft Windows only.
    no_file_logging : bool
        If True, don't write logging to file (the default is False).
    no_final_plots : bool
        If True, don't save final ETrF histograms (the default is None).
        This will override the flag in the INI file
    no_temp_plots : bool
        If True, don't save temp ETrF histograms (the default is None).
        This will override the flag in the INI file

    Returns
    -------
    None
    """
    logging.info('\nRunning METRIC Monte Carlo')

    # Open config file
    config = dripy.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = dripy.read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'monte_carlo_func')
    keep_list_path = dripy.read_param('keep_list_path', '', config, 'INPUTS')
    # skip_list_path = dripy.read_param('skip_list_path', '', config, 'INPUTS')

    # For now, get mc_iter list from command line, not from project file
    # mc_iter_list = config.get('INPUTS', 'mc_iter_list')
    mc_iter_list = list(dripy.parse_int_set(mc_iter_str))

    # Need soemthing in mc_iter_list to iterate over
    if not mc_iter_list:
        mc_iter_list = [None]

    # For now build INI file name from template INI names
    metric_ini_name = os.path.basename(config.get('INPUTS', 'metric_ini'))
    metric_ini_name = os.path.splitext(os.path.basename(metric_ini_name))[0]
    mc_ini_name = os.path.basename(config.get('INPUTS', 'monte_carlo_ini'))
    mc_ini_name = os.path.splitext(os.path.basename(mc_ini_name))[0]

    # INI file is built as a function of year and tile_name
    metric_ini_fmt = '{}_{}_{}.ini'
    mc_ini_fmt = '{}_{}_{}.ini'

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # if len(tile_list) == 1:
    #     devel_flag = True
    # else:
    #     devel_flag = False
    # # devel_flag = True

    # Regular expressions
    # For now assume path/row are two digit numbers
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_id_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(?:\w{4})_(\d{3})(\d{3})_'
        '(\d{4})(\d{2})(\d{2})_(?:\d{8})_(?:\d{2})_(?:\w{2})$')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    # Read keep/skip lists
    if keep_list_path:
        logging.debug('\nReading scene keep list')
        with open(keep_list_path) as keep_list_f:
            image_keep_list = keep_list_f.readlines()
            image_keep_list = [image_id.strip() for image_id in image_keep_list
                               if image_id_re.match(image_id.strip())]
    else:
        logging.debug('\nScene keep list not set in INI')
        image_keep_list = []
    # if skip_list_path:
    #     logging.debug('\nReading scene skip list')
    #     with open(skip_list_path) as skip_list_f:
    #         image_skip_list = skip_list_f.readlines()
    #         image_skip_list = [image_id.strip() for image_id in image_skip_list
    #                      if image_re.match(image_id.strip())]
    # else:
    #     logging.debug('\nScene skip list not set in INI')
    #     image_skip_list = []


    mp_list = []
    for tile_name in sorted(tile_list):
        logging.debug('\nTile: {}'.format(tile_name))
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            logging.debug('  {} {} - invalid tile, skipping'.format(
                year, tile_name))
            continue

        # Check that there are image folders
        image_id_list = [
            image_id for image_id in sorted(os.listdir(tile_ws))
            if (image_id_re.match(image_id) and
                os.path.isdir(os.path.join(tile_ws, image_id)) and
                (image_keep_list and image_id in image_keep_list))]
            #     (image_skip_list and image_id not in image_skip_list))]
        if not image_id_list:
            logging.debug('  {} {} - no available images, skipping'.format(
                year, tile_name))
            continue
        else:
            logging.debug('  {} {}'.format(year, tile_name))

        for image_id in image_id_list:
            image_ws = os.path.join(tile_ws, image_id)
            pixel_ws = os.path.join(image_ws, 'PIXELS')
            if not os.path.isdir(pixel_ws):
                os.mkdir(pixel_ws)
            # Since the multipoint shapefile will be appended, delete it
            #  in the wrapper script
            if multipoint_flag and os.path.isdir(pixel_ws):
                for pixel_file in os.listdir(pixel_ws):
                    if re.match('\w+_\w+.shp$', pixel_file):
                        logging.info('\n Removing {}'.format(pixel_file))
                        os.remove(os.path.join(pixel_ws, pixel_file))
        logging.debug('  {} {}'.format(year, tile_name))

        # Check that there is an input file for the path/row
        metric_ini_path = os.path.join(
            tile_ws, metric_ini_fmt.format(metric_ini_name, year, tile_name))
        mc_ini_path = os.path.join(
            tile_ws, mc_ini_fmt.format(mc_ini_name, year, tile_name))
        if not os.path.join(metric_ini_path):
            logging.warning('    METRIC Input file {} does not exist'.format(
                metric_ini_path))
            continue
        elif not os.path.join(mc_ini_path):
            logging.warning(
                '    Monte Carlo Input file {} does not exist'.format(
                    mc_ini_path))
            continue

        # Setup command line argument
        # call_args = [sys.executable, mc_func_path, '-i', ini_path]
        call_args = [sys.executable, func_path,
                     '--metric_ini', metric_ini_path,
                     '--mc_ini', mc_ini_path,
                     '--groupsize', str(groupsize)]
        if cold_tgt_pct is not None and hot_tgt_pct is not None:
            call_args.extend(['-t', str(cold_tgt_pct), str(hot_tgt_pct)])
            if blocksize:
                call_args.extend(['--blocksize', str(blocksize)])
        if shapefile_flag:
            call_args.append('--shapefile')
        if multipoint_flag:
            call_args.append('--multipoint')
        if stats_flag:
            call_args.append('--stats')
        if overwrite_flag:
            call_args.append('--overwrite')
        if debug_flag:
            call_args.append('--debug')
        if no_file_logging:
            call_args.append('--no_file_logging')
        if no_final_plots:
            call_args.append('--no_final_plots')
        if no_temp_plots:
            call_args.append('--no_temp_plots')

        # Run all scenes for each Monte Carlo iteration
        for mc_iter in mc_iter_list:
            if mc_iter is not None:
                mc_args = ['-mc', str(mc_iter)]
            else:
                mc_args = []
            for image_id in image_id_list:
                image_folder = os.path.join(tile_ws, image_id)
                logging.debug('  {}'.format(os.path.basename(image_folder)))
                if mp_procs > 1:
                    mp_list.append([
                        call_args + mc_args, image_folder, delay,
                        new_window_flag])
                else:
                    subprocess.call(call_args + mc_args, cwd=image_folder)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(dripy.call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool

    logging.debug('\nScript complete')
Пример #5
0
def pixel_rating(image_ws, ini_path, stats_flag=False, overwrite_flag=None):
    """Calculate pixel rating

    Args:
        image_ws (str): Image folder path
        ini_path (str): Pixel regions config file path
        stats_flag (bool): if True, compute raster statistics
        ovewrite_flag (bool): if True, overwrite existing files

    Returns:
        None
    """
    logging.info('Generating suggested hot/cold pixel regions')
    log_fmt = '  {:<18s} {}'

    env = gdc.env
    image = et_image.Image(image_ws, env)
    np.seterr(invalid='ignore')

    # # Check  that image_ws is valid
    # image_re = re.compile(
    #     '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')
    # if not os.path.isdir(image_ws) or not image_re.match(scene_id):
    #     logging.error('\nERROR: Image folder is invalid or does not exist\n')
    #     return False

    # Folder Paths
    region_ws = os.path.join(image_ws, 'PIXEL_REGIONS')

    # Open config file
    config = open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    # Arrays are processed by block
    bs = read_param('block_size', 1024, config)
    logging.info('  {:<18s} {}'.format('Block Size:', bs))

    # Raster pyramids/statistics
    pyramids_flag = read_param('pyramids_flag', False, config)
    if pyramids_flag:
        gdal.SetConfigOption('HFA_USE_RRD', 'YES')
    if stats_flag is None:
        stats_flag = read_param('statistics_flag', False, config)

    # Overwrite
    if overwrite_flag is None:
        overwrite_flag = read_param('overwrite_flag', True, config)

    # Check that common_area raster exists
    if not os.path.isfile(image.common_area_raster):
        logging.error(
            '\nERROR: A common area raster was not found.' +
            '\nERROR: Please rerun prep tool to build these files.\n' +
            '    {}\n'.format(image.common_area_raster))
        sys.exit()

    # Use common_area to set mask parameters
    common_ds = gdal.Open(image.common_area_raster)
    # env.mask_proj = raster_ds_proj(common_ds)
    env.mask_geo = gdc.raster_ds_geo(common_ds)
    env.mask_rows, env.mask_cols = gdc.raster_ds_shape(common_ds)
    env.mask_extent = gdc.geo_extent(env.mask_geo, env.mask_rows,
                                     env.mask_cols)
    env.mask_array = gdc.raster_ds_to_array(common_ds)[0]
    env.mask_path = image.common_area_raster
    env.snap_osr = gdc.raster_path_osr(image.common_area_raster)
    env.snap_proj = env.snap_osr.ExportToWkt()
    env.cellsize = gdc.raster_path_cellsize(image.common_area_raster)[0]
    common_ds = None
    logging.debug('  {:<18s} {}'.format('Mask Extent:', env.mask_extent))

    # Read Pixel Regions config file
    # Currently there is no code to support applying an NLCD mask
    apply_nlcd_mask = False
    # apply_nlcd_mask = read_param('apply_nlcd_mask', False, config)
    apply_cdl_ag_mask = read_param('apply_cdl_ag_mask', False, config)
    apply_field_mask = read_param('apply_field_mask', False, config)
    apply_ndwi_mask = read_param('apply_ndwi_mask', True, config)
    apply_ndvi_mask = read_param('apply_ndvi_mask', True, config)
    # Currently the code to apply a study area mask is commented out
    # apply_study_area_mask = read_param(
    #     'apply_study_area_mask', False, config)

    albedo_rating_flag = read_param('albedo_rating_flag', True, config)
    nlcd_rating_flag = read_param('nlcd_rating_flag', True, config)
    ndvi_rating_flag = read_param('ndvi_rating_flag', True, config)
    ts_rating_flag = read_param('ts_rating_flag', True, config)
    ke_rating_flag = read_param('ke_rating_flag', False, config)

    # if apply_study_area_mask:
    #     study_area_path = config.get('INPUTS', 'study_area_path')
    if apply_nlcd_mask or nlcd_rating_flag:
        nlcd_raster = config.get('INPUTS', 'landuse_raster')
    if apply_cdl_ag_mask:
        cdl_ag_raster = config.get('INPUTS', 'cdl_ag_raster')
        cdl_buffer_cells = read_param('cdl_buffer_cells', 0, config)
        cdl_ag_eroded_name = read_param('cdl_ag_eroded_name',
                                        'cdl_ag_eroded_{}.img', config)
    if apply_field_mask:
        field_raster = config.get('INPUTS', 'fields_raster')

    cold_rating_pct = read_param('cold_percentile', 99, config)
    hot_rating_pct = read_param('hot_percentile', 99, config)
    # min_cold_rating_score = read_param('min_cold_rating_score', 0.3, config)
    # min_hot_rating_score = read_param('min_hot_rating_score', 0.3, config)

    ts_bin_count = int(read_param('ts_bin_count', 10, config))
    if 100 % ts_bin_count != 0:
        logging.warning(
            'WARNING: ts_bins_count of {} is not a divisor ' +
            'of 100. Using default ts_bins_count = 4'.format(ts_bin_count))
        ts_bin_count = 10
    bin_size = 1. / (ts_bin_count - 1)
    hot_rating_values = np.arange(0., 1. + bin_size, step=bin_size)
    cold_rating_values = hot_rating_values[::-1]

    # Input raster paths
    r_fmt = '.img'
    if 'Landsat' in image.type:
        albedo_raster = image.albedo_sur_raster
        ndvi_raster = image.ndvi_toa_raster
        ndwi_raster = image.ndwi_toa_raster
        ts_raster = image.ts_raster
        ke_raster = image.ke_raster

    # Check config file input paths
    # if apply_study_area_mask and not os.path.isfile(study_area_path):
    #     logging.error(
    #         ('\nERROR: The study area shapefile {} does ' +
    #             'not exist\n').format(study_area_path))
    #     sys.exit()
    if ((apply_nlcd_mask or nlcd_rating_flag)
            and not os.path.isfile(nlcd_raster)):
        logging.error(('\nERROR: The NLCD raster {} does ' +
                       'not exist\n').format(nlcd_raster))
        sys.exit()
    if apply_cdl_ag_mask and not os.path.isfile(cdl_ag_raster):
        logging.error(('\nERROR: The CDL Ag raster {} does ' +
                       'not exist\n').format(cdl_ag_raster))
        sys.exit()
    if apply_field_mask and not os.path.isfile(field_raster):
        logging.error(('\nERROR: The field raster {} does ' +
                       'not exist\n').format(field_raster))
        sys.exit()
    if (not (isinstance(cold_rating_pct,
                        (int, float)) and (0 <= cold_rating_pct <= 100))):
        logging.error(
            '\nERROR: cold_percentile must be a value between 0 and 100\n')
        sys.exit()
    if (not (isinstance(hot_rating_pct,
                        (int, float)) and (0 <= hot_rating_pct <= 100))):
        logging.error(
            '\nERROR: hot_percentile must be a value between 0 and 100\n')
        sys.exit()

    # Set raster names
    raster_dict = dict()

    # Output Rasters
    raster_dict['region_mask'] = os.path.join(region_ws, 'region_mask' + r_fmt)
    raster_dict['cold_rating'] = os.path.join(region_ws,
                                              'cold_pixel_rating' + r_fmt)
    raster_dict['hot_rating'] = os.path.join(region_ws,
                                             'hot_pixel_rating' + r_fmt)
    raster_dict['cold_sugg'] = os.path.join(region_ws,
                                            'cold_pixel_suggestion' + r_fmt)
    raster_dict['hot_sugg'] = os.path.join(region_ws,
                                           'hot_pixel_suggestion' + r_fmt)

    # Read pixel region raster flags
    save_dict = dict()
    save_dict['region_mask'] = read_param('save_region_mask_flag', False,
                                          config)
    save_dict['cold_rating'] = read_param('save_rating_rasters_flag', False,
                                          config)
    save_dict['hot_rating'] = read_param('save_rating_rasters_flag', False,
                                         config)
    save_dict['cold_sugg'] = read_param('save_suggestion_rasters_flag', True,
                                        config)
    save_dict['hot_sugg'] = read_param('save_suggestion_rasters_flag', True,
                                       config)

    # Output folder
    if not os.path.isdir(region_ws):
        os.mkdir(region_ws)

    # Remove existing files if necessary
    region_ws_file_list = [
        os.path.join(region_ws, item) for item in os.listdir(region_ws)
    ]
    if overwrite_flag and region_ws_file_list:
        for raster_path in raster_dict.values():
            if raster_path in region_ws_file_list:
                remove_file(raster_path)

    # Check scene specific input paths
    if apply_ndwi_mask and not os.path.isfile(ndwi_raster):
        logging.error(
            'ERROR: NDWI raster does not exist\n {}'.format(ndwi_raster))
        sys.exit()
    elif apply_ndvi_mask and not os.path.isfile(ndvi_raster):
        logging.error(
            'ERROR: NDVI raster does not exist\n {}'.format(ndvi_raster))
        sys.exit()
    elif ke_rating_flag and not os.path.isfile(ke_raster):
        logging.error(
            ('ERROR: The Ke raster does not exist\n {}').format(ke_raster))
        sys.exit()

    # Remove existing and build new empty rasters if necessary
    # If processing by block, rating rasters must be built
    logging.debug('\nBuilding empty rasters')
    for name, save_flag in sorted(save_dict.items()):
        if save_flag and 'rating' in name:
            gdc.build_empty_raster(raster_dict[name], 1, np.float32)
        elif save_flag:
            gdc.build_empty_raster(raster_dict[name],
                                   1,
                                   np.uint8,
                                   output_nodata=0)

    if apply_cdl_ag_mask:
        logging.info('Building CDL ag mask')
        cdl_array = gdc.raster_to_array(cdl_ag_raster,
                                        mask_extent=env.mask_extent,
                                        return_nodata=False)
        if cdl_buffer_cells > 0:
            logging.info('  Eroding CDL by {} cells'.format(cdl_buffer_cells))
            structure_array = np.ones((cdl_buffer_cells, cdl_buffer_cells),
                                      dtype=np.int)
            # Deadbeef - This could blow up in memory on bigger rasters
            cdl_array = ndimage.binary_erosion(
                cdl_array, structure_array).astype(structure_array.dtype)
        cdl_ag_eroded_raster = os.path.join(
            image.support_ws, cdl_ag_eroded_name.format(cdl_buffer_cells))
        gdc.array_to_raster(cdl_array,
                            cdl_ag_eroded_raster,
                            output_geo=env.mask_geo,
                            output_proj=env.snap_proj,
                            mask_array=env.mask_array,
                            output_nodata=0,
                            stats_flag=False)
        cdl_array = None
        del cdl_array

    # Build region mask
    logging.debug('Building region mask')
    region_mask = np.copy(env.mask_array).astype(np.bool)
    if apply_field_mask:
        field_mask, field_nodata = gdc.raster_to_array(
            field_raster, mask_extent=env.mask_extent, return_nodata=True)
        region_mask &= field_mask != field_nodata
        del field_mask, field_nodata
    if apply_ndwi_mask:
        ndwi_array = gdc.raster_to_array(ndwi_raster,
                                         1,
                                         mask_extent=env.mask_extent,
                                         return_nodata=False)
        region_mask &= ndwi_array > 0.0
        del ndwi_array
    if apply_ndvi_mask:
        ndvi_array = gdc.raster_to_array(ndvi_raster,
                                         1,
                                         mask_extent=env.mask_extent,
                                         return_nodata=False)
        region_mask &= ndvi_array > 0.12
        del ndvi_array
    if apply_cdl_ag_mask:
        cdl_array, cdl_nodata = gdc.raster_to_array(
            cdl_ag_eroded_raster,
            mask_extent=env.mask_extent,
            return_nodata=True)
        region_mask &= cdl_array != cdl_nodata
        del cdl_array, cdl_nodata
    if save_dict['region_mask']:
        gdc.array_to_raster(region_mask,
                            raster_dict['region_mask'],
                            stats_flag=False)

    # Initialize rating arrays
    # This needs to be done before the ts_rating if block
    cold_rating_array = np.ones(env.mask_array.shape, dtype=np.float32)
    hot_rating_array = np.ones(env.mask_array.shape, dtype=np.float32)
    cold_rating_array[~region_mask] = np.nan
    hot_rating_array[~region_mask] = np.nan

    # Temperature pixel rating - grab the max and min value for the entire
    #  Ts image in a memory safe way by using gdal_common blocks
    # The following is a percentile based approach
    if ts_rating_flag:
        logging.debug('Computing Ts percentile rating')
        ts_array = gdc.raster_to_array(ts_raster,
                                       mask_extent=env.mask_extent,
                                       return_nodata=False)
        ts_array[~region_mask] = np.nan

        percentiles = range(0, (100 + ts_bin_count), int(100 / ts_bin_count))
        ts_score_value = 1. / (ts_bin_count - 1)
        hot_rating_values = np.arange(0, (1. + ts_score_value),
                                      step=ts_score_value)[:ts_bin_count]
        cold_rating_values = hot_rating_values[::-1]
        ts_percentile_array = stats.scoreatpercentile(
            ts_array[np.isfinite(ts_array)], percentiles)

        for bins_i in range(len(ts_percentile_array))[:-1]:
            bool_array = ((ts_array > ts_percentile_array[bins_i]) &
                          (ts_array <= ts_percentile_array[bins_i + 1]))
            cold_rating_array[bool_array] = cold_rating_values[bins_i]
            hot_rating_array[bool_array] = hot_rating_values[bins_i]
        # gdc.array_to_raster(cold_rating_array, raster_dict['cold_rating'])
        # gdc.array_to_raster(hot_rating_array, raster_dict['hot_rating'])

        # Cleanup
        del ts_array, ts_percentile_array
        del cold_rating_values, hot_rating_values
        del ts_score_value, percentiles

    # Process by block
    logging.info('\nProcessing by block')
    logging.debug('  Mask  cols/rows: {}/{}'.format(env.mask_cols,
                                                    env.mask_rows))
    for b_i, b_j in gdc.block_gen(env.mask_rows, env.mask_cols, bs):
        logging.debug('  Block  y: {:5d}  x: {:5d}'.format(b_i, b_j))
        block_data_mask = gdc.array_to_block(env.mask_array, b_i, b_j,
                                             bs).astype(np.bool)
        # block_nodata_mask = ~block_data_mask
        block_rows, block_cols = block_data_mask.shape
        block_geo = gdc.array_offset_geo(env.mask_geo, b_j, b_i)
        block_extent = gdc.geo_extent(block_geo, block_rows, block_cols)
        logging.debug('    Block rows: {}  cols: {}'.format(
            block_rows, block_cols))
        # logging.debug('    Block extent: {}'.format(block_extent))
        # logging.debug('    Block geo: {}'.format(block_geo))

        # Don't skip empty blocks since block rating needs to be written
        #  back to the array at the end of the block loop
        block_region_mask = gdc.array_to_block(region_mask, b_i, b_j, bs)
        if not np.any(block_region_mask):
            logging.debug('    Empty block')
            block_empty_flag = True
        else:
            block_empty_flag = False

        # New style continuous pixel weighting
        cold_rating_block = gdc.array_to_block(cold_rating_array, b_i, b_j, bs)
        hot_rating_block = gdc.array_to_block(hot_rating_array, b_i, b_j, bs)

        # Rating arrays already have region_mask set
        # cold_rating_block = np.ones(block_region_mask.shape, dtype=np.float32)
        # hot_rating_block = np.ones(block_region_mask.shape, dtype=np.float32)
        # cold_rating_block[~block_region_mask] = np.nan
        # hot_rating_block[~block_region_mask] = np.nan
        # del block_region_mask

        if ndvi_rating_flag and not block_empty_flag:
            # NDVI based rating
            ndvi_array = gdc.raster_to_array(ndvi_raster,
                                             1,
                                             mask_extent=block_extent,
                                             return_nodata=False)
            # Don't let NDVI be negative
            ndvi_array.clip(0., 0.833, out=ndvi_array)
            # ndvi_array.clip(0.001, 0.833, out=ndvi_array)
            cold_rating_block *= ndvi_array
            cold_rating_block *= 1.20
            ndvi_mask = (ndvi_array > 0)
            # DEADBEEF - Can this calculation be masked to only NDVI > 0?
            ndvi_mask = ndvi_array > 0
            hot_rating_block[ndvi_mask] *= stats.norm.pdf(
                np.log(ndvi_array[ndvi_mask]), math.log(0.15), 0.5)
            hot_rating_block[ndvi_mask] *= 1.25
            del ndvi_mask
            # hot_rating_block *= stats.norm.pdf(
            #     np.log(ndvi_array), math.log(0.15), 0.5)
            # hot_rating_block *= 1.25
            # cold_rating_block.clip(0., 1., out=cold_rating_block)
            # hot_rating_block.clip(0., 1., out=hot_rating_block)
            del ndvi_array

        if albedo_rating_flag and not block_empty_flag:
            # Albdo based rating
            albedo_array = gdc.raster_to_array(albedo_raster,
                                               1,
                                               mask_extent=block_extent,
                                               return_nodata=False)
            albedo_cold_pdf = stats.norm.pdf(albedo_array, 0.21, 0.03)
            albedo_hot_pdf = stats.norm.pdf(albedo_array, 0.21, 0.06)
            del albedo_array
            cold_rating_block *= albedo_cold_pdf
            cold_rating_block *= 0.07
            hot_rating_block *= albedo_hot_pdf
            hot_rating_block *= 0.15
            # cold_rating_block.clip(0., 1., out=cold_rating_block)
            # hot_rating_block.clip(0., 1., out=hot_rating_block)
            del albedo_cold_pdf, albedo_hot_pdf

        if nlcd_rating_flag and not block_empty_flag:
            # NLCD based weighting, this could be CDL instead?
            nlcd_array = nlcd_rating(
                gdc.raster_to_array(nlcd_raster,
                                    1,
                                    mask_extent=block_extent,
                                    return_nodata=False))
            cold_rating_block *= nlcd_array
            hot_rating_block *= nlcd_array
            del nlcd_array

        if ke_rating_flag and not block_empty_flag:
            # SWB Ke based rating
            ke_array = gdc.raster_to_array(ke_raster,
                                           1,
                                           mask_extent=block_extent,
                                           return_nodata=False)
            # Don't let NDVI be negative
            ke_array.clip(0., 1., out=ke_array)
            # Assumption, lower Ke is better for selecting the hot pixel
            # As the power (2) decreases and approaches 1,
            #   the relationship gets more linear
            # cold_rating_block *= (1 - ke_array ** 2)
            # hot_rating_block *= (1 - ke_array ** 1.5)
            # Linear inverse
            # cold_rating_block *= (1. - ke_array)
            hot_rating_block *= (1. - ke_array)
            # cold_rating_block.clip(0., 1., out=cold_rating_block)
            # hot_rating_block.clip(0., 1., out=hot_rating_block)
            del ke_array

        # Clearness
        # clearness = 1.0
        # cold_rating *= clearness
        # hot_rating *= clearness

        # Reset nan values
        # cold_rating_block[~region_mask] = np.nan
        # hot_rating_block[~region_mask] = np.nan

        # Save rating values
        cold_rating_array = gdc.block_to_array(cold_rating_block,
                                               cold_rating_array, b_i, b_j, bs)
        hot_rating_array = gdc.block_to_array(hot_rating_block,
                                              hot_rating_array, b_i, b_j, bs)

        # Save rating rasters
        if save_dict['cold_rating']:
            gdc.block_to_raster(cold_rating_block, raster_dict['cold_rating'],
                                b_i, b_j, bs)
        if save_dict['hot_rating']:
            gdc.block_to_raster(hot_rating_block, raster_dict['hot_rating'],
                                b_i, b_j, bs)
        # Save rating values
        cold_rating_array = gdc.block_to_array(cold_rating_block,
                                               cold_rating_array, b_i, b_j, bs)
        hot_rating_array = gdc.block_to_array(hot_rating_block,
                                              hot_rating_array, b_i, b_j, bs)

        del cold_rating_block, hot_rating_block

    # Select pixels above target percentile
    # Only build suggestion arrays if saving
    logging.debug('Building suggested pixel rasters')
    if save_dict['cold_sugg']:
        cold_rating_score = float(
            stats.scoreatpercentile(
                cold_rating_array[np.isfinite(cold_rating_array)],
                cold_rating_pct))
        # cold_rating_array, cold_rating_nodata = gdc.raster_to_array(
        #     raster_dict['cold_rating'], 1, mask_extent=env.mask_extent)
        # if cold_rating_score < float(min_cold_rating_score):
        #     logging.error(('ERROR: The cold_rating_score ({}) is less ' +
        #                    'than the min_cold_rating_score ({})').format(
        #                     cold_rating_score, min_cold_rating_score))
        #     sys.exit()
        cold_sugg_mask = cold_rating_array >= cold_rating_score
        gdc.array_to_raster(cold_sugg_mask,
                            raster_dict['cold_sugg'],
                            stats_flag=stats_flag)
        logging.debug('  Cold Percentile: {}'.format(cold_rating_pct))
        logging.debug('  Cold Score:  {:.6f}'.format(cold_rating_score))
        logging.debug('  Cold Pixels: {}'.format(np.sum(cold_sugg_mask)))
        del cold_sugg_mask, cold_rating_array
    if save_dict['hot_sugg']:
        hot_rating_score = float(
            stats.scoreatpercentile(
                hot_rating_array[np.isfinite(hot_rating_array)],
                hot_rating_pct))
        # hot_rating_array, hot_rating_nodata = gdc.raster_to_array(
        #     raster_dict['hot_rating'], 1, mask_extent=env.mask_extent)
        # if hot_rating_score < float(min_hot_rating_score):
        #     logging.error(('ERROR: The hot_rating_array ({}) is less ' +
        #                    'than the min_hot_rating_score ({})').format(
        #                     hot_rating_array, min_hot_rating_score))
        #     sys.exit()
        hot_sugg_mask = hot_rating_array >= hot_rating_score
        gdc.array_to_raster(hot_sugg_mask,
                            raster_dict['hot_sugg'],
                            stats_flag=stats_flag)
        logging.debug('  Hot Percentile: {}'.format(hot_rating_pct))
        logging.debug('  Hot Score:  {:.6f}'.format(hot_rating_score))
        logging.debug('  Hot Pixels: {}'.format(np.sum(hot_sugg_mask)))
        del hot_sugg_mask, hot_rating_array

    # Raster Statistics
    if stats_flag:
        logging.info('Calculating Statistics')
        for name, save_flag in save_dict.items():
            if save_flag:
                gdc.raster_statistics(raster_dict[name])
    # Raster Pyramids
    if pyramids_flag:
        logging.info('Building Pyramids')
        for name, save_flag in save_dict.items():
            if save_flag:
                gdc.raster_pyramids(raster_dict[name])
Пример #6
0
def main(ini_path, tile_list=None, overwrite_flag=False):
    """Prep Landsat path/row specific data

    Args:
        ini_path (str): file path of the input parameters file
        tile_list (list): list of Landsat path/row (i.e. [p45r43, p45r33])
            This will override the tile list in the INI file
        overwrite_flag (bool): boolean, overwrite existing files
        mp_procs (int): number of cores to use

    Returns:
        None
    """
    logging.info('\nPrepare path/row INI files')

    # Open config file
    config = python_common.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = python_common.read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    ini_file_flag = python_common.read_param('ini_file_flag', True, config,
                                             'INPUTS')
    landsat_flag = python_common.read_param('landsat_flag', True, config,
                                            'INPUTS')
    ledaps_flag = python_common.read_param('ledaps_flag', False, config,
                                           'INPUTS')
    dem_flag = python_common.read_param('dem_flag', True, config, 'INPUTS')
    nlcd_flag = python_common.read_param('nlcd_flag', True, config, 'INPUTS')
    cdl_flag = python_common.read_param('cdl_flag', True, config, 'INPUTS')
    landfire_flag = python_common.read_param('landfire_flag', False, config,
                                             'INPUTS')
    field_flag = python_common.read_param('field_flag', False, config,
                                          'INPUTS')
    metric_flag = python_common.read_param('metric_flag', True, config,
                                           'INPUTS')
    monte_carlo_flag = python_common.read_param('monte_carlo_flag', False,
                                                config, 'INPUTS')
    interp_rasters_flag = python_common.read_param('interpolate_rasters_flag',
                                                   False, config, 'INPUTS')
    interp_tables_flag = python_common.read_param('interpolate_tables_flag',
                                                  False, config, 'INPUTS')

    metric_hourly_weather = python_common.read_param('metric_hourly_weather',
                                                     'NLDAS', config, 'INPUTS')

    project_ws = config.get('INPUTS', 'project_folder')
    footprint_path = config.get('INPUTS', 'footprint_path')
    # For now, assume the UTM zone file is colocated with the footprints shapefile
    utm_path = python_common.read_param(
        'utm_path',
        os.path.join(os.path.dirname(footprint_path),
                     'wrs2_tile_utm_zones.json'), config, 'INPUTS')
    skip_list_path = python_common.read_param('skip_list_path', '', config,
                                              'INPUTS')

    # Ts and albedo corrections
    ts_correction_flag = python_common.read_param('Ts_correction_flag', True,
                                                  config, 'INPUTS')
    k_value = python_common.read_param('K_value', 2, config, 'INPUTS')
    albedo_correction_flag = python_common.read_param(
        'albedo_correction_flag ', True, config, 'INPUTS')
    dense_veg_min_albedo = python_common.read_param('dense_veg_min_albedo ',
                                                    0.18, config, 'INPUTS')

    # tile_gcs_buffer = read_param('tile_buffer', 0.1, config)

    # Template input files for scripts
    if metric_flag:
        metric_ini = config.get('INPUTS', 'metric_ini')
        pixel_rating_ini = config.get('INPUTS', 'pixel_rating_ini')
    if monte_carlo_flag:
        monte_carlo_ini = config.get('INPUTS', 'monte_carlo_ini')

    if interp_rasters_flag or interp_tables_flag:
        interpolate_folder = python_common.read_param('interpolate_folder',
                                                      'ET', config)
        interpolate_ini = config.get('INPUTS', 'interpolate_ini')
    if interp_rasters_flag:
        study_area_path = config.get('INPUTS', 'study_area_path')
        study_area_mask_flag = python_common.read_param(
            'study_area_mask_flag', True, config)
        study_area_snap = python_common.read_param('study_area_snap', (0, 0),
                                                   config)
        study_area_cellsize = python_common.read_param('study_area_cellsize',
                                                       30, config)
        study_area_buffer = python_common.read_param('study_area_buffer', 0,
                                                     config)
        study_area_proj = python_common.read_param('study_area_proj', '',
                                                   config)
    if interp_tables_flag:
        zones_path = config.get('INPUTS', 'zones_path')
        zones_name_field = python_common.read_param('zones_name_field', 'FID',
                                                    config)
        # zones_buffer = read_param('zones_buffer', 0, config)
        zones_snap = python_common.read_param('zones_snap', (0, 0), config)
        zones_cellsize = python_common.read_param('zones_cellsize', 30, config)
        # zones_proj = read_param('zones_proj', '', config)
        zones_mask = python_common.read_param('zones_mask', None, config)
        zones_buffer = None
        zones_proj = None

    # Input/output folder and file paths
    if landsat_flag:
        landsat_input_ws = config.get('INPUTS', 'landsat_input_folder')
    else:
        landsat_input_ws = None
    if ledaps_flag:
        ledaps_input_ws = config.get('INPUTS', 'ledaps_input_folder')
    else:
        ledaps_input_ws = None

    if dem_flag:
        dem_input_ws = config.get('INPUTS', 'dem_input_folder')
        dem_tile_fmt = config.get('INPUTS', 'dem_tile_fmt')
        dem_output_ws = config.get('INPUTS', 'dem_output_folder')
        dem_output_name = python_common.read_param('dem_output_name',
                                                   'dem.img', config)
        # dem_output_name = config.get('INPUTS', 'dem_output_name')
    else:
        dem_input_ws, dem_tile_fmt = None, None
        dem_output_ws, dem_output_name = None, None

    if nlcd_flag:
        nlcd_input_path = config.get('INPUTS', 'nlcd_input_path')
        nlcd_output_ws = config.get('INPUTS', 'nlcd_output_folder')
        nlcd_output_fmt = python_common.read_param('nlcd_output_fmt',
                                                   'nlcd_{:04d}.img', config)
    else:
        nlcd_input_path, nlcd_output_ws, nlcd_output_fmt = None, None, None

    if cdl_flag:
        cdl_input_path = config.get('INPUTS', 'cdl_input_path')
        cdl_ag_list = config.get('INPUTS', 'cdl_ag_list')
        cdl_ag_list = list(python_common.parse_int_set(cdl_ag_list))
        # default_cdl_ag_list = range(1,62) + range(66,78) + range(204,255)
        # cdl_ag_list = read_param(
        #    'cdl_ag_list', default_cdl_ag_list, config)
        # cdl_ag_list = list(map(int, cdl_ag_list))
        # cdl_non_ag_list = read_param(
        #    'cdl_non_ag_list', [], config)
        cdl_output_ws = config.get('INPUTS', 'cdl_output_folder')
        cdl_output_fmt = python_common.read_param('cdl_output_fmt',
                                                  'cdl_{:04d}.img', config)
        cdl_ag_output_fmt = python_common.read_param('cdl_ag_output_fmt',
                                                     'cdl_ag_{:04d}.img',
                                                     config)
    else:
        cdl_input_path, cdl_ag_list = None, None
        cdl_output_ws, cdl_output_fmt, cdl_ag_output_fmt = None, None, None

    if landfire_flag:
        landfire_input_path = config.get('INPUTS', 'landfire_input_path')
        landfire_ag_list = config.get('INPUTS', 'landfire_ag_list')
        landfire_ag_list = list(python_common.parse_int_set(landfire_ag_list))
        # default_landfire_ag_list = range(3960,4000)
        # landfire_ag_list = read_param(
        #    'landfire_ag_list', default_landfire_ag_list, config)
        # landfire_ag_list = list(map(int, landfire_ag_list))
        landfire_output_ws = config.get('INPUTS', 'landfire_output_folder')
        landfire_output_fmt = python_common.read_param('landfire_output_fmt',
                                                       'landfire_{:04d}.img',
                                                       config)
        landfire_ag_output_fmt = python_common.read_param(
            'landfire_ag_output_fmt', 'landfire_ag_{:04d}.img', config)
    else:
        landfire_input_path, landfire_ag_list = None, None
        landfire_output_ws = None
        landfire_output_fmt, landfire_ag_output_fmt = None, None

    if field_flag:
        field_input_path = config.get('INPUTS', 'field_input_path')
        field_output_ws = config.get('INPUTS', 'field_output_folder')
        field_output_fmt = python_common.read_param('field_output_fmt',
                                                    'fields_{:04d}.img',
                                                    config)
    else:
        field_input_path = None
        field_output_ws, field_output_fmt = None, None

    if monte_carlo_flag:
        etrf_training_path = config.get('INPUTS', 'etrf_training_path')
        # mc_iter_list = config.get('INPUTS', 'mc_iter_list')
        # mc_iter_list = list(python_common.parse_int_set(mc_iter_list))
    if monte_carlo_flag or interp_rasters_flag or interp_tables_flag:
        etrf_input_ws = python_common.read_param('etrf_input_folder', None,
                                                 config)
        # if etrf_input_ws is None:
        #     etrf_input_ws = os.path.join(project_ws, year)
        etr_input_ws = config.get('INPUTS', 'etr_input_folder')
        ppt_input_ws = config.get('INPUTS', 'ppt_input_folder')
        etr_input_re = config.get('INPUTS', 'etr_input_re')
        ppt_input_re = config.get('INPUTS', 'ppt_input_re')
    if monte_carlo_flag or interp_rasters_flag or interp_tables_flag:
        awc_input_path = config.get('INPUTS', 'awc_input_path')
        spinup_days = python_common.read_param('swb_spinup_days', 30, config,
                                               'INPUTS')
        min_spinup_days = python_common.read_param('swb_min_spinup_days', 5,
                                                   config, 'INPUTS')

    # Weather data parameters
    if metric_flag:
        metric_hourly_weather_list = ['NLDAS', 'REFET']
        metric_hourly_weather = config.get('INPUTS',
                                           'metric_hourly_weather').upper()
        if metric_hourly_weather not in metric_hourly_weather_list:
            logging.error(
                ('\nERROR: The METRIC hourly weather type {} is invalid.' +
                 '\nERROR: Set metric_hourly_weather to {}').format(
                     metric_hourly_weather,
                     ','.join(metric_hourly_weather_list)))
            sys.exit()
        elif metric_hourly_weather == 'REFET':
            refet_params_path = os.path.normpath(
                config.get('INPUTS', 'refet_params_path'))
        elif metric_hourly_weather == 'NLDAS':
            # metric_hourly_re = config.get('INPUTS', 'metric_hourly_re')
            # metric_daily_re = config.get('INPUTS', 'metric_daily_re')
            metric_ea_input_ws = config.get('INPUTS', 'metric_ea_input_folder')
            metric_wind_input_ws = config.get('INPUTS',
                                              'metric_wind_input_folder')
            metric_etr_input_ws = config.get('INPUTS',
                                             'metric_etr_input_folder')
            try:
                calc_metric_tair_flag = config.getboolean(
                    'INPUTS', 'calc_metric_tair_flag')
                metric_tair_input_ws = config.get('INPUTS',
                                                  'metric_tair_input_folder')
            except:
                calc_metric_tair_flag = False
                metric_tair_input_ws = ''

    # Check inputs folders/paths
    logging.info('\nChecking input folders/files')
    file_check(footprint_path)
    if landsat_flag:
        folder_check(landsat_input_ws)
    if ledaps_flag:
        folder_check(ledaps_input_ws)
    if dem_flag:
        folder_check(dem_input_ws)
    if nlcd_flag:
        file_check(nlcd_input_path)
    if cdl_flag:
        file_check(cdl_input_path)
    if landfire_flag:
        # Landfire will likely be an ESRI grid (set as a folder)
        if not (os.path.isdir(landfire_input_path)
                or os.path.isfile(landfire_input_path)):
            logging.error('  {} does not exist.'.format(landfire_input_path))
            sys.exit()
    if field_flag:
        file_check(field_input_path)
    if metric_flag:
        file_check(metric_ini)
        file_check(pixel_rating_ini)
    if interp_rasters_flag or interp_tables_flag or monte_carlo_flag:
        if etrf_input_ws is not None:
            folder_check(etrf_input_ws)
        folder_check(etr_input_ws)
        folder_check(ppt_input_ws)
        file_check(awc_input_path)
    if monte_carlo_flag:
        file_check(monte_carlo_ini)
        file_check(etrf_training_path)
    if metric_flag:
        if metric_hourly_weather == 'REFET':
            file_check(refet_params_path)
        elif metric_hourly_weather == 'NLDAS':
            folder_check(metric_ea_input_ws)
            folder_check(metric_wind_input_ws)
            folder_check(metric_etr_input_ws)
            if calc_metric_tair_flag:
                folder_check(metric_tair_input_ws)
    if skip_list_path:
        file_check(skip_list_path)

    # Build output folders
    if not os.path.isdir(project_ws):
        os.makedirs(project_ws)

    # For now assume path/row are two digit numbers
    tile_fmt = 'p{:03d}r{:03d}'

    # Set snap environment parameters
    snap_cs = 30
    snap_xmin, snap_ymin = (15, 15)
    env = gdc.env
    env.cellsize = snap_cs
    env.snap_xmin, env.snap_ymin = snap_xmin, snap_ymin

    # Use WGSS84 (EPSG 4326) for GCS spatial reference
    # Could also use NAD83 (EPSG 4269)
    # gcs_epsg = 4326
    # gcs_osr = epsg_osr(4326)
    # gcs_proj = osr_proj(gcs_osr)

    # Landsat Footprints (WRS2 Descending Polygons)
    logging.debug('\nFootprint (WRS2 descending should be GCS84):')
    tile_gcs_osr = gdc.feature_path_osr(footprint_path)
    logging.debug('  OSR: {}'.format(tile_gcs_osr))

    # Doublecheck that WRS2 descending shapefile is GCS84
    # if tile_gcs_osr != epsg_osr(4326):
    #     logging.error('  WRS2 is not GCS84')
    #     sys.exit()

    # Get geometry for each path/row
    tile_gcs_wkt_dict = path_row_wkt_func(footprint_path,
                                          path_field='PATH',
                                          row_field='ROW')

    # Get UTM zone for each path/row
    # DEADBEEF - Using "eval" is considered unsafe and should be changed
    tile_utm_zone_dict = eval(open(utm_path, 'r').read())

    # Check that each path/row extent and UTM zone exist
    logging.info('\nChecking path/row list against footprint shapefile')
    for tile_name in sorted(tile_list):
        if tile_name not in tile_gcs_wkt_dict.keys():
            logging.error(
                '  {} feature not in footprint shapefile'.format(tile_name))
            continue
        elif tile_name not in tile_utm_zone_dict.keys():
            logging.error(
                '  {} UTM zone not in footprint shapefile'.format(tile_name))
            continue
        elif tile_utm_zone_dict[tile_name] == 0:
            logging.error(('  UTM zone is not set for {} in ' +
                           'footprint shapefile').format(tile_name))
            continue

    # Read RefET parameters
    if metric_hourly_weather == 'REFET':
        refet_ws = os.path.dirname(refet_params_path)
        with open(refet_params_path, 'r') as input_f:
            lines = input_f.readlines()
        lines = [line.strip() for line in lines]
        lines = [line.split(',') for line in lines if line]
        columns = lines.pop(0)
        refet_params_dict = defaultdict(dict)
        for line in lines:
            tile_name = tile_fmt.format(int(line[columns.index('PATH')]),
                                        int(line[columns.index('ROW')]))
            yr_tile_name = '{}_{}'.format(line[columns.index('YEAR')],
                                          tile_name)
            for i, column in enumerate(columns):
                if column not in ['YEAR', 'PATH', 'ROW']:
                    refet_params_dict[yr_tile_name][column.lower()] = line[i]

    # Process input files for each year and path/row
    logging.info('\nBuilding path/row specific input files')
    for tile_name in tile_list:
        tile_output_ws = os.path.join(project_ws, str(year), tile_name)
        logging.info('{} {}'.format(year, tile_name))
        yr_tile_name = '{}_{}'.format(year, tile_name)
        if not os.path.isdir(tile_output_ws):
            os.makedirs(tile_output_ws)

        # File paths
        if metric_flag:
            tile_metric_ini = os.path.join(
                tile_output_ws,
                os.path.basename(metric_ini).replace(
                    '.ini', '_{}_{}.ini'.format(year, tile_name)))
            tile_pixel_rating_ini = os.path.join(
                tile_output_ws,
                os.path.basename(pixel_rating_ini).replace(
                    '.ini', '_{}_{}.ini'.format(year, tile_name)))
            if overwrite_flag and os.path.isfile(tile_metric_ini):
                os.remove(tile_metric_ini)
            if overwrite_flag and os.path.isfile(tile_pixel_rating_ini):
                os.remove(tile_pixel_rating_ini)

        # Monte Carlo is independent of tile and year, but process
        #   with METRIC input file
        if monte_carlo_flag:
            tile_monte_carlo_ini = os.path.join(
                tile_output_ws,
                os.path.basename(monte_carlo_ini).replace(
                    '.ini', '_{}_{}.ini'.format(year, tile_name)))
            if overwrite_flag and os.path.isfile(tile_monte_carlo_ini):
                os.remove(tile_monte_carlo_ini)

        if dem_flag:
            dem_output_path = os.path.join(dem_output_ws, tile_name,
                                           dem_output_name)
        if nlcd_flag:
            nlcd_output_path = os.path.join(nlcd_output_ws, tile_name,
                                            nlcd_output_fmt.format(year))
        if cdl_flag:
            cdl_ag_output_path = os.path.join(cdl_output_ws, tile_name,
                                              cdl_ag_output_fmt.format(year))
        if landfire_flag:
            landfire_ag_output_path = os.path.join(
                landfire_output_ws, tile_name,
                landfire_output_fmt.format(year))
        if field_flag:
            field_output_path = os.path.join(field_output_ws, tile_name,
                                             field_output_fmt.format(year))

        # Check that the path/row was in the RefET parameters file
        if (metric_flag and metric_hourly_weather == 'REFET'
                and yr_tile_name not in refet_params_dict.keys()):
            logging.error(
                ('    The year {} & path/row {} is not in the ' +
                 'RefET parameters csv, skipping').format(year, tile_name))
            continue

        if metric_flag and not os.path.isfile(tile_metric_ini):
            # DEADBEEF - This approach removes all formatting and comments
            config = configparser.RawConfigParser()
            config.read(metric_ini)
            # shutil.copy(metric_ini, tile_metric_ini)
            # config.read(tile_metric_ini)

            if metric_hourly_weather == 'REFET':
                # Add RefET options
                config.set('INPUTS', 'weather_data_source', 'REFET')
                config.set(
                    'INPUTS', 'refet_file',
                    os.path.join(
                        refet_ws,
                        os.path.normpath(
                            refet_params_dict[yr_tile_name]['refet_file'])))
                config.set('INPUTS', 'gmt_offset',
                           refet_params_dict[yr_tile_name]['gmt_offset'])
                config.set('INPUTS', 'datum',
                           refet_params_dict[yr_tile_name]['datum'])
            elif metric_hourly_weather == 'NLDAS':
                # Add NLDAS options
                config.set('INPUTS', 'weather_data_source', 'NLDAS')
                # Remove RefET options
                try:
                    config.remove_option('INPUTS', 'refet_file')
                except:
                    pass
                try:
                    config.remove_option('INPUTS', 'gmt_offset')
                except:
                    pass
                # try:
                #     config.remove_option('INPUTS', 'datum')
                # except:
                #     pass

            if dem_flag:
                config.set('INPUTS', 'dem_raster', dem_output_path)
            else:
                try:
                    config.remove_option('INPUTS', 'dem_raster')
                except:
                    pass
                # config.set('INPUTS', 'dem_raster', 'None')

            if nlcd_flag:
                config.set('INPUTS', 'landuse_raster', nlcd_output_path)
            else:
                try:
                    config.remove_option('INPUTS', 'landuse_raster')
                except:
                    pass
                # config.set('INPUTS', 'landuse_raster', 'None')

            logging.debug('  {}'.format(tile_metric_ini))
            with open(tile_metric_ini, 'w') as config_f:
                config.write(config_f)

        if metric_flag and not os.path.isfile(tile_pixel_rating_ini):
            config = configparser.RawConfigParser()
            config.read(pixel_rating_ini)
            if nlcd_flag:
                config.set('INPUTS', 'landuse_raster', nlcd_output_path)
            else:
                try:
                    config.remove_option('INPUTS', 'landuse_raster')
                except:
                    pass
                # config.set('INPUTS', 'landuse_raster', 'None')
            if cdl_flag:
                config.set('INPUTS', 'apply_cdl_ag_mask', True)
                config.set('INPUTS', 'cdl_ag_raster', cdl_ag_output_path)
            else:
                config.set('INPUTS', 'apply_cdl_ag_mask', False)
                try:
                    config.remove_option('INPUTS', 'cdl_ag_raster')
                except:
                    pass
                # config.set('INPUTS', 'cdl_ag_raster', 'None')
            if field_flag:
                config.set('INPUTS', 'apply_field_mask', True)
                config.set('INPUTS', 'fields_raster', field_output_path)
            else:
                config.set('INPUTS', 'apply_field_mask', False)
                try:
                    config.remove_option('INPUTS', 'fields_raster')
                except:
                    pass
                # config.set('INPUTS', 'fields_raster', 'None')
            # if landfire_flag:
            #     config.set('INPUTS', 'apply_landfire_ag_mask', True)
            #     config.set('INPUTS', 'landfire_ag_raster', cdl_ag_output_path)
            # else:
            #     config.set('INPUTS', 'apply_landfire_ag_mask', False)
            #     try: config.remove_option('INPUTS', 'landfire_ag_raster')
            #     except: pass
            #     # config.set('INPUTS', 'landfire_ag_raster', 'None')

            logging.debug('  {}'.format(tile_pixel_rating_ini))
            with open(tile_pixel_rating_ini, 'w') as config_f:
                config.write(config_f)

        if monte_carlo_flag and not os.path.isfile(tile_monte_carlo_ini):
            config = configparser.RawConfigParser()
            config.read(monte_carlo_ini)
            config.set('INPUTS', 'etrf_training_path', etrf_training_path)
            config.set('INPUTS', 'etr_ws', etr_input_ws)
            config.set('INPUTS', 'ppt_ws', ppt_input_ws)
            config.set('INPUTS', 'etr_re', etr_input_re)
            config.set('INPUTS', 'ppt_re', ppt_input_re)
            config.set('INPUTS', 'awc_path', awc_input_path)
            config.set('INPUTS', 'swb_spinup_days', spinup_days)
            config.set('INPUTS', 'swb_min_spinup_days', min_spinup_days)

            logging.debug('  {}'.format(tile_monte_carlo_ini))
            with open(tile_monte_carlo_ini, 'w') as config_f:
                config.write(config_f)

        # Cleanup
        del tile_output_ws, yr_tile_name

    # Interpolator input file
    if interp_rasters_flag or interp_tables_flag:
        logging.info('\nBuilding interpolator input files')
        year_interpolator_name = os.path.basename(interpolate_ini).replace(
            '.ini', '_{}_{}.ini'.format(year, interpolate_folder.lower()))
        year_interpolator_ini = os.path.join(project_ws, str(year),
                                             year_interpolator_name)
        if overwrite_flag and os.path.isfile(year_interpolator_ini):
            os.remove(year_interpolator_ini)
        if not os.path.isfile(year_interpolator_ini):
            # First copy the template config file to the year folder
            shutil.copy(interpolate_ini, year_interpolator_ini)

            # Open the existing config file and update the values
            # DEADBEEF - This approach removes all formatting and comments
            config = configparser.RawConfigParser()
            config.read(year_interpolator_ini)
            config.set('INPUTS', 'folder_name', interpolate_folder)
            config.set('INPUTS', 'tile_list', ', '.join(tile_list))
            if interp_rasters_flag:
                config.set('INPUTS', 'study_area_path', study_area_path)
                config.set('INPUTS', 'study_area_mask_flag',
                           study_area_mask_flag)
                config.set('INPUTS', 'study_area_snap',
                           ', '.join(map(str, study_area_snap)))
                config.set('INPUTS', 'study_area_cellsize',
                           study_area_cellsize)
                config.set('INPUTS', 'study_area_buffer', study_area_buffer)
                if study_area_proj:
                    config.set('INPUTS', 'study_area_proj', study_area_proj)
                else:
                    try:
                        config.remove_option('INPUTS', 'study_area_proj',
                                             study_area_proj)
                    except:
                        pass
            if interp_tables_flag:
                config.set('INPUTS', 'zones_path', zones_path)
                config.set('INPUTS', 'zones_snap',
                           ', '.join(map(str, zones_snap)))
                config.set('INPUTS', 'zones_cellsize', zones_cellsize)
                config.set('INPUTS', 'zones_name_field', zones_name_field)
                # zones_buffer is not currently implemented
                if zones_buffer:
                    config.set('INPUTS', 'zones_buffer', zones_buffer)
                else:
                    try:
                        config.remove_option('INPUTS', 'zones_buffer',
                                             zones_buffer)
                    except:
                        pass
                # zones proj., cellsize, and snap are not needed or
                #   read in if zones_mask is set
                # zones_proj is not currently implemented
                if zones_mask:
                    config.set('INPUTS', 'zones_mask', zones_mask)
                    try:
                        config.remove_option('INPUTS', 'zones_proj')
                    except:
                        pass
                    try:
                        config.remove_option('INPUTS', 'zones_cellsize')
                    except:
                        pass
                    try:
                        config.remove_option('INPUTS', 'zones_snap')
                    except:
                        pass
                # elif zones_proj:
                #     config.set('INPUTS', 'zones_proj', zones_proj)
                #     try:
                #         config.remove_option('INPUTS', 'zones_mask')
                #     except:
                #         pass
                else:
                    try:
                        config.remove_option('INPUTS', 'zones_proj')
                    except:
                        pass
                    try:
                        config.remove_option('INPUTS', 'zones_mask')
                    except:
                        pass
            config.set('INPUTS', 'year', year)
            config.set('INPUTS', 'footprint_path', footprint_path)
            if etrf_input_ws is not None:
                config.set('INPUTS', 'etrf_input_folder', etrf_input_ws)
            config.set('INPUTS', 'etr_input_folder', etr_input_ws)
            config.set('INPUTS', 'etr_input_re', etr_input_re)
            config.set('INPUTS', 'ppt_input_folder', ppt_input_ws)
            config.set('INPUTS', 'ppt_input_re', ppt_input_re)
            # DEADBEEF - add check for SWB flag
            config.set('INPUTS', 'awc_input_path', awc_input_path)
            config.set('INPUTS', 'swb_spinup_days', spinup_days)
            config.set('INPUTS', 'swb_min_spinup_days', min_spinup_days)

            # Albdeo and Ts correction
            config.set('INPUTS', 'Ts_correction_flag', ts_correction_flag)
            config.set('INPUTS', 'K_value ', k_value)
            config.set('INPUTS', 'albedo_correction_flag',
                       albedo_correction_flag)
            config.set('INPUTS', 'dense_veg_min_albedo', dense_veg_min_albedo)

            logging.debug('  {}'.format(year_interpolator_ini))
            with open(year_interpolator_ini, 'w') as config_f:
                config.write(config_f)

    logging.debug('\nScript complete')
Пример #7
0
def main(ini_path, tile_list=None, overwrite_flag=False, mp_procs=1):
    """Prep Landsat path/row specific data

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cores to use (the default is 1).

    Returns
    -------
    None

    """
    logging.info('\nPrepare path/row data')

    # Open config file
    config = python_common.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = python_common.read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    # study_area_path = config.get('INPUTS', 'study_area_path')
    footprint_path = config.get('INPUTS', 'footprint_path')
    # For now, assume the UTM zone file is colocated with the footprints shapefile
    utm_path = python_common.read_param(
        'utm_path',
        os.path.join(os.path.dirname(footprint_path),
                     'wrs2_tile_utm_zones.json'), config, 'INPUTS')
    skip_list_path = python_common.read_param('skip_list_path', '', config,
                                              'INPUTS')

    landsat_flag = python_common.read_param('landsat_flag', True, config,
                                            'INPUTS')
    ledaps_flag = False
    dem_flag = python_common.read_param('dem_flag', True, config, 'INPUTS')
    nlcd_flag = python_common.read_param('nlcd_flag', True, config, 'INPUTS')
    cdl_flag = python_common.read_param('cdl_flag', False, config, 'INPUTS')
    landfire_flag = python_common.read_param('landfire_flag', False, config,
                                             'INPUTS')
    field_flag = python_common.read_param('field_flag', False, config,
                                          'INPUTS')

    tile_gcs_buffer = python_common.read_param('tile_buffer', 0.25, config)

    # Input/output folder and file paths
    if landsat_flag:
        landsat_input_ws = config.get('INPUTS', 'landsat_input_folder')
    else:
        landsat_input_ws = None
    # if ledaps_flag:
    #     ledaps_input_ws = config.get('INPUTS', 'ledaps_input_folder')
    # else:
    #     ledaps_input_ws = None

    if dem_flag:
        dem_input_ws = config.get('INPUTS', 'dem_input_folder')
        dem_tile_fmt = config.get('INPUTS', 'dem_tile_fmt')
        dem_output_ws = config.get('INPUTS', 'dem_output_folder')
        dem_output_name = python_common.read_param('dem_output_name',
                                                   'dem.img', config)
        # dem_output_name = config.get('INPUTS', 'dem_output_name')
    else:
        dem_input_ws, dem_tile_fmt = None, None
        dem_output_ws, dem_output_name = None, None

    if nlcd_flag:
        nlcd_input_path = config.get('INPUTS', 'nlcd_input_path')
        nlcd_output_ws = config.get('INPUTS', 'nlcd_output_folder')
        nlcd_output_fmt = python_common.read_param('nlcd_output_fmt',
                                                   'nlcd_{:04d}.img', config)
    else:
        nlcd_input_path, nlcd_output_ws, nlcd_output_fmt = None, None, None

    if cdl_flag:
        cdl_input_path = config.get('INPUTS', 'cdl_input_path')
        cdl_ag_list = config.get('INPUTS', 'cdl_ag_list')
        cdl_ag_list = list(python_common.parse_int_set(cdl_ag_list))
        # default_cdl_ag_list = range(1,62) + range(66,78) + range(204,255)
        # cdl_ag_list = python_common.read_param(
        #    'cdl_ag_list', default_cdl_ag_list, config)
        # cdl_ag_list = list(map(int, cdl_ag_list))
        # cdl_non_ag_list = python_common.read_param(
        #    'cdl_non_ag_list', [], config)
        cdl_output_ws = config.get('INPUTS', 'cdl_output_folder')
        cdl_output_fmt = python_common.read_param('cdl_output_fmt',
                                                  'cdl_{:04d}.img', config)
        cdl_ag_output_fmt = python_common.read_param('cdl_ag_output_fmt',
                                                     'cdl_ag_{:04d}.img',
                                                     config)
    else:
        cdl_input_path, cdl_ag_list = None, None
        cdl_output_ws, cdl_output_fmt, cdl_ag_output_fmt = None, None, None

    if landfire_flag:
        landfire_input_path = config.get('INPUTS', 'landfire_input_path')
        landfire_ag_list = config.get('INPUTS', 'landfire_ag_list')
        landfire_ag_list = list(python_common.parse_int_set(landfire_ag_list))
        # default_landfire_ag_list = range(3960,4000)
        # landfire_ag_list = python_common.read_param(
        #    'landfire_ag_list', default_landfire_ag_list, config)
        # landfire_ag_list = list(map(int, landfire_ag_list))
        landfire_output_ws = config.get('INPUTS', 'landfire_output_folder')
        landfire_output_fmt = python_common.read_param('landfire_output_fmt',
                                                       'landfire_{:04d}.img',
                                                       config)
        landfire_ag_output_fmt = python_common.read_param(
            'landfire_ag_output_fmt', 'landfire_ag_{:04d}.img', config)
    else:
        landfire_input_path, landfire_ag_list = None, None
        landfire_output_ws = None
        landfire_output_fmt, landfire_ag_output_fmt = None, None

    if field_flag:
        field_input_path = config.get('INPUTS', 'field_input_path')
        field_output_ws = config.get('INPUTS', 'field_output_folder')
        field_output_fmt = python_common.read_param('field_output_fmt',
                                                    'fields_{:04d}.img',
                                                    config)
    else:
        field_input_path = None
        field_output_ws, field_output_fmt = None, None

    # File/folder names
    orig_data_folder_name = 'ORIGINAL_DATA'

    # Check inputs folders/paths
    logging.info('\nChecking input folders/files')
    file_check(footprint_path)
    file_check(utm_path)
    if landsat_flag:
        folder_check(landsat_input_ws)
    # if ledaps_flag:
    #     folder_check(ledaps_input_ws)
    if dem_flag:
        folder_check(dem_input_ws)
    if nlcd_flag:
        file_check(nlcd_input_path)
    if cdl_flag:
        file_check(cdl_input_path)
    if landfire_flag:
        # Landfire will likely be an ESRI grid (set as a folder)
        if not (os.path.isdir(landfire_input_path)
                or os.path.isfile(landfire_input_path)):
            logging.error('\n  {} does not exist'.format(landfire_input_path))
    if field_flag:
        file_check(field_input_path)
    if skip_list_path:
        file_check(skip_list_path)

    # Build output folders
    if not os.path.isdir(project_ws):
        os.makedirs(project_ws)
    if dem_flag and not os.path.isdir(dem_output_ws):
        os.makedirs(dem_output_ws)
    if nlcd_flag and not os.path.isdir(nlcd_output_ws):
        os.makedirs(nlcd_output_ws)
    if cdl_flag and not os.path.isdir(cdl_output_ws):
        os.makedirs(cdl_output_ws)
    if landfire_flag and not os.path.isdir(landfire_output_ws):
        os.makedirs(landfire_output_ws)
    if field_flag and not os.path.isdir(field_output_ws):
        os.makedirs(field_output_ws)

    # For now assume path/row are two digit numbers
    tile_fmt = 'p{:03d}r{:03d}'
    tile_re = re.compile('p(\d{3})r(\d{3})')
    image_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')
    snap_cs = 30
    snap_xmin, snap_ymin = (15, 15)

    # Set snap environment parameters
    env = drigo.env
    env.cellsize = snap_cs
    env.snap_xmin, env.snap_ymin = snap_xmin, snap_ymin

    # Use WGSS84 (EPSG 4326) for GCS spatial reference
    # Could also use NAD83 (EPSG 4269)
    # gcs_epsg = 4326
    # gcs_osr = epsg_osr(4326)
    # gcs_proj = osr_proj(gcs_osr)

    # Landsat Footprints (WRS2 Descending Polygons)
    logging.debug('\nFootprint (WRS2 descending should be GCS84):')
    tile_gcs_osr = drigo.feature_path_osr(footprint_path)
    logging.debug('  OSR: {}'.format(tile_gcs_osr))

    # Doublecheck that WRS2 descending shapefile is GCS84
    # if tile_gcs_osr != epsg_osr(4326):
    #     logging.error('  WRS2 is not GCS84')
    #     sys.exit()

    # Get geometry for each path/row
    tile_gcs_wkt_dict = path_row_wkt_func(footprint_path,
                                          path_field='PATH',
                                          row_field='ROW')

    # Get UTM zone for each path/row
    # DEADBEEF - Using "eval" is considered unsafe and should be changed
    tile_utm_zone_dict = eval(open(utm_path, 'r').read())

    # Project study area geometry to GCS coordinates
    # logging.debug('\nStudy area')
    # study_area_geom = feature_path_geom_union(study_area_path)
    # study_area_gcs_geom = study_area_geom.Clone()
    # study_area_gcs_geom.TransformTo(tile_gcs_osr)

    # Get list of all intersecting Landsat path/rows
    # logging.info('\nLandsat path/rows')
    # tile_list = []
    # for tile_name, tile_gcs_wkt in tile_gcs_wkt_dict.items():
    #     tile_gcs_geom = ogr.CreateGeometryFromWkt(tile_gcs_wkt)
    #     if tile_gcs_geom.Intersects(study_area_gcs_geom):
    #         tile_list.append(tile_name)
    # for tile_name in sorted(tile_list):
    #     logging.debug('  {}'.format(tile_name))

    # Check that each path/row extent and UTM zone exist
    logging.info('\nChecking path/row list against footprint shapefile')
    for tile_name in sorted(tile_list):
        if tile_name not in tile_gcs_wkt_dict.keys():
            logging.error(
                '  {} feature not in footprint shapefile'.format(tile_name))
            continue
        elif tile_name not in tile_utm_zone_dict.keys():
            logging.error(
                '  {} UTM zone not in footprint shapefile'.format(tile_name))
            continue
        elif tile_utm_zone_dict[tile_name] == 0:
            logging.error(('  UTM zone is not set for {} in ' +
                           'footprint shapefile').format(tile_name))
            continue

    # Build output folders for each path/row
    logging.info('\nBuilding path/row folders')
    for tile_name in tile_list:
        logging.debug('  {} {}'.format(year, tile_name))
        tile_output_ws = os.path.join(project_ws, str(year), tile_name)
        if ((landsat_flag or ledaps_flag)
                and not os.path.isdir(tile_output_ws)):
            os.makedirs(tile_output_ws)
        if (dem_flag
                and not os.path.isdir(os.path.join(dem_output_ws, tile_name))):
            os.makedirs(os.path.join(dem_output_ws, tile_name))
        if (nlcd_flag and
                not os.path.isdir(os.path.join(nlcd_output_ws, tile_name))):
            os.makedirs(os.path.join(nlcd_output_ws, tile_name))
        if (cdl_flag
                and not os.path.isdir(os.path.join(cdl_output_ws, tile_name))):
            os.makedirs(os.path.join(cdl_output_ws, tile_name))
        if (landfire_flag and not os.path.isdir(
                os.path.join(landfire_output_ws, tile_name))):
            os.makedirs(os.path.join(landfire_output_ws, tile_name))
        if (field_flag and
                not os.path.isdir(os.path.join(field_output_ws, tile_name))):
            os.makedirs(os.path.join(field_output_ws, tile_name))

    # Read skip list
    if (landsat_flag or ledaps_flag) and skip_list_path:
        logging.debug('\nReading scene skiplist')
        with open(skip_list_path) as skip_list_f:
            skip_list = skip_list_f.readlines()
            skip_list = [
                scene.strip() for scene in skip_list
                if image_re.match(scene.strip())
            ]
    else:
        logging.debug('\nSkip list not set in INI')
        skip_list = []

    # Copy and unzip raw Landsat scenes
    # Use these for thermal band, MTL file (scene time), and to run FMask
    if landsat_flag:
        logging.info('\nExtract raw Landsat scenes')
        # Process each path/row
        extract_targz_list = []
        for tile_name in tile_list:
            tile_output_ws = os.path.join(project_ws, str(year), tile_name)

            # path/row as strings with leading zeros
            path, row = map(str, tile_re.match(tile_name).groups())
            tile_input_ws = os.path.join(landsat_input_ws, path, row,
                                         str(year))
            if not os.path.isdir(tile_input_ws):
                continue
            logging.info('  {} {}'.format(year, tile_name))

            # Process each tar.gz file
            for input_name in sorted(os.listdir(tile_input_ws)):
                if (not image_re.match(input_name)
                        and not input_name.endswith('.tar.gz')):
                    continue

                # Get Landsat scene ID from tar.gz file name
                # DEADBEEF - For now this is the EE scene ID, but it could be
                #   changed to the full collection 1 ID
                scene_id = input_name.split('.')[0]

                # Output workspace
                image_output_ws = os.path.join(tile_output_ws, scene_id)
                orig_data_ws = os.path.join(image_output_ws,
                                            orig_data_folder_name)

                if skip_list and scene_id in skip_list:
                    logging.debug('    {} - Skipping scene'.format(scene_id))
                    # DEADBEEF - Should the script always remove the scene
                    #   if it is in the skip list?
                    # Maybe only if overwrite is set?
                    if os.path.isdir(image_output_ws):
                        # input('Press ENTER to delete {}'.format(scene_id))
                        shutil.rmtree(image_output_ws)
                    continue

                # If orig_data_ws doesn't exist, don't check images
                if not os.path.isdir(orig_data_ws):
                    os.makedirs(orig_data_ws)
                elif (not overwrite_flag
                      and landsat_files_check(image_output_ws)):
                    continue

                # Extract Landsat tar.gz file
                input_path = os.path.join(tile_input_ws, input_name)
                print(orig_data_ws)
                # sys.exit()
                if mp_procs > 1:
                    extract_targz_list.append([input_path, orig_data_ws])
                else:
                    python_common.extract_targz_func(input_path, orig_data_ws)

                # # Use a command line call
                # input_path = os.path.join(tile_input_ws, input_name)
                # if job_i % pbs_jobs != 0:
                #     job_list.append('tar -zxvf {} -C {} &\n'.format(
                #         input_path, orig_data_ws))
                # else:
                #     job_list.append('tar -zxvf {} -C {}\n'.format(
                #         input_path, orig_data_ws))
                #     # job_list.append('tar -zxvf {} -C {} &\n'.format(
                #     #     input_path, orig_data_ws))
                #     # job_list.append('wait\n')
                # job_i += 1

        # Extract Landsat tar.gz files using multiprocessing
        if extract_targz_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(python_common.extract_targz_mp,
                               extract_targz_list,
                               chunksize=1)
            pool.close()
            pool.join()
            del results, pool

    # Get projected extent for each path/row
    # This should probably be in a function
    if (dem_flag or nlcd_flag or cdl_flag or landfire_flag or field_flag):
        tile_utm_extent_dict = gcs_to_utm_dict(tile_list, tile_utm_zone_dict,
                                               tile_gcs_osr, tile_gcs_wkt_dict,
                                               tile_gcs_buffer, snap_xmin,
                                               snap_ymin, snap_cs)

    # Mosaic DEM tiles for each path/row
    if dem_flag:
        logging.info('\nBuild DEM for each path/row')
        mosaic_mp_list = []
        for tile_name in tile_list:
            # Output folder and path
            tile_output_path = os.path.join(dem_output_ws, tile_name,
                                            dem_output_name)
            if not overwrite_flag and os.path.isfile(tile_output_path):
                logging.debug('    {} already exists, skipping'.format(
                    os.path.basename(tile_output_path)))
                continue
            logging.info('  {}'.format(tile_name))

            # Get the path/row geometry in GCS for selecting intersecting tiles
            tile_gcs_geom = ogr.CreateGeometryFromWkt(
                tile_gcs_wkt_dict[tile_name])
            # Apply a small buffer (in degrees) to the extent
            # DEADBEEF - Buffer fails if GDAL is not built with GEOS support
            # tile_gcs_geom = tile_gcs_geom.Buffer(tile_gcs_buffer)
            tile_gcs_extent = drigo.Extent(tile_gcs_geom.GetEnvelope())
            tile_gcs_extent = tile_gcs_extent.ogrenv_swap()
            tile_gcs_extent.buffer_extent(tile_gcs_buffer)
            # tile_gcs_extent.ymin, tile_gcs_extent.xmax = tile_gcs_extent.xmax, tile_gcs_extent.ymin

            # Offsets are needed since tile name is upper left corner of tile
            # Tile n36w120 spans -120 <-> -119 and 35 <-> 36
            lon_list = range(
                int(tile_gcs_extent.xmin) - 1, int(tile_gcs_extent.xmax))
            lat_list = range(
                int(tile_gcs_extent.ymin) + 1,
                int(tile_gcs_extent.ymax) + 2)

            # Get list of DEM tile rasters
            dem_tile_list = []
            for lat, lon in itertools.product(lat_list, lon_list):
                # Convert sign of lat/lon to letter
                lat = ('n' + '{:02d}'.format(abs(lat)) if lat >= 0 else 's' +
                       '{:02d}'.format(abs(lat)))
                lon = ('w' + '{:03d}'.format(abs(lon)) if lon < 0 else 'e' +
                       '{:03d}'.format(abs(lon)))
                dem_tile_path = os.path.join(dem_input_ws,
                                             dem_tile_fmt.format(lat, lon))
                if os.path.isfile(dem_tile_path):
                    dem_tile_list.append(dem_tile_path)
            if not dem_tile_list:
                logging.warning('    WARNING: No DEM tiles were selected')
                continue

            # Mosaic tiles using mosaic function
            tile_utm_osr = drigo.epsg_osr(32600 +
                                          int(tile_utm_zone_dict[tile_name]))
            tile_utm_proj = drigo.epsg_proj(32600 +
                                            int(tile_utm_zone_dict[tile_name]))
            tile_utm_extent = tile_utm_extent_dict[tile_name]
            tile_utm_ullr = tile_utm_extent.ul_lr_swap()

            # Mosaic, clip, project using custom function
            if mp_procs > 1:
                mosaic_mp_list.append([
                    dem_tile_list, tile_output_path, tile_utm_proj, snap_cs,
                    tile_utm_extent
                ])
            else:
                drigo.mosaic_tiles(dem_tile_list, tile_output_path,
                                   tile_utm_osr, snap_cs, tile_utm_extent)

            # Cleanup
            del tile_output_path
            del tile_gcs_geom, tile_gcs_extent, tile_utm_extent
            del tile_utm_osr, tile_utm_proj
            del lon_list, lat_list, dem_tile_list
        # Mosaic DEM rasters using multiprocessing
        if mosaic_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(mosaic_tiles_mp, mosaic_mp_list, chunksize=1)
            pool.close()
            pool.join()
            del results, pool

    # Project/clip NLCD for each path/row
    if nlcd_flag:
        logging.info('\nBuild NLCD for each path/row')
        project_mp_list = []
        for tile_name in tile_list:
            nlcd_output_path = os.path.join(nlcd_output_ws, tile_name,
                                            nlcd_output_fmt.format(year))
            if not overwrite_flag and os.path.isfile(nlcd_output_path):
                logging.debug('    {} already exists, skipping'.format(
                    os.path.basename(nlcd_output_path)))
                continue
            logging.info('  {}'.format(tile_name))

            # Set the nodata value on the NLCD raster if it is not set
            nlcd_ds = gdal.Open(nlcd_input_path, 0)
            nlcd_band = nlcd_ds.GetRasterBand(1)
            nlcd_nodata = nlcd_band.GetNoDataValue()
            nlcd_ds = None
            if nlcd_nodata is None:
                nlcd_nodata = 255

            # Clip and project
            tile_utm_osr = drigo.epsg_osr(32600 +
                                          int(tile_utm_zone_dict[tile_name]))
            tile_utm_proj = drigo.epsg_proj(32600 +
                                            int(tile_utm_zone_dict[tile_name]))
            tile_utm_extent = tile_utm_extent_dict[tile_name]
            tile_utm_ullr = tile_utm_extent.ul_lr_swap()

            if mp_procs > 1:
                project_mp_list.append([
                    nlcd_input_path, nlcd_output_path,
                    gdal.GRA_NearestNeighbour, tile_utm_proj, snap_cs,
                    tile_utm_extent, nlcd_nodata
                ])
            else:
                drigo.project_raster(nlcd_input_path, nlcd_output_path,
                                     gdal.GRA_NearestNeighbour, tile_utm_osr,
                                     snap_cs, tile_utm_extent, nlcd_nodata)

            # Cleanup
            del nlcd_output_path
            del nlcd_ds, nlcd_band, nlcd_nodata
            del tile_utm_osr, tile_utm_proj, tile_utm_extent
        # Project NLCD rasters using multiprocessing
        if project_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(drigo.project_raster_mp,
                               project_mp_list,
                               chunksize=1)
            pool.close()
            pool.join()
            del results, pool

    # Project/clip CDL for each path/row
    if cdl_flag:
        logging.info('\nBuild CDL for each path/row')
        project_mp_list, remap_mp_list = [], []
        for tile_name in tile_list:
            cdl_output_path = os.path.join(cdl_output_ws, tile_name,
                                           cdl_output_fmt.format(year))
            cdl_ag_output_path = os.path.join(cdl_output_ws, tile_name,
                                              cdl_ag_output_fmt.format(year))
            if not os.path.isfile(cdl_input_path):
                logging.error('\n\n  {} does not exist'.format(cdl_input_path))
                sys.exit()
            if not overwrite_flag and os.path.isfile(cdl_output_path):
                logging.debug('    {} already exists, skipping'.format(
                    os.path.basename(cdl_output_path)))
                continue
            logging.info('  {}'.format(tile_name))

            # Set the nodata value on the CDL raster if it is not set
            cdl_ds = gdal.Open(cdl_input_path, 0)
            cdl_band = cdl_ds.GetRasterBand(1)
            cdl_nodata = cdl_band.GetNoDataValue()
            cdl_ds = None
            if cdl_nodata is None:
                cdl_nodata = 255

            # Clip and project
            tile_utm_osr = drigo.epsg_osr(32600 +
                                          int(tile_utm_zone_dict[tile_name]))
            tile_utm_proj = drigo.epsg_proj(32600 +
                                            int(tile_utm_zone_dict[tile_name]))
            tile_utm_extent = tile_utm_extent_dict[tile_name]
            if mp_procs > 1:
                project_mp_list.append([
                    cdl_input_path, cdl_output_path, gdal.GRA_NearestNeighbour,
                    tile_utm_proj, snap_cs, tile_utm_extent, cdl_nodata
                ])
                remap_mp_list.append(
                    [cdl_output_path, cdl_ag_output_path, cdl_ag_list])
            else:
                drigo.project_raster(cdl_input_path, cdl_output_path,
                                     gdal.GRA_NearestNeighbour, tile_utm_osr,
                                     snap_cs, tile_utm_extent, cdl_nodata)
                # Build a mask of CDL ag lands
                remap_mask_func(cdl_output_path, cdl_ag_output_path,
                                cdl_ag_list)

            # Cleanup
            del cdl_output_path
            del cdl_ds, cdl_band, cdl_nodata
            del tile_utm_osr, tile_utm_proj, tile_utm_extent
        # Project CDL rasters using multiprocessing
        if project_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(drigo.project_raster_mp,
                               project_mp_list,
                               chunksize=1)
            pool.close()
            pool.join()
            del results, pool
        if remap_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(remap_mask_mp, remap_mp_list, chunksize=1)
            pool.close()
            pool.join()
            del results, pool

    # Project/clip LANDFIRE for each path/row
    if landfire_flag:
        logging.info('\nBuild LANDFIRE for each path/row')
        project_mp_list, remap_mp_list = [], []
        for tile_name in tile_list:
            landfire_output_path = os.path.join(
                landfire_output_ws, tile_name,
                landfire_output_fmt.format(year))
            landfire_ag_output_path = os.path.join(
                landfire_output_ws, tile_name,
                landfire_ag_output_fmt.format(year))
            if not overwrite_flag and os.path.isfile(landfire_output_path):
                logging.debug('    {} already exists, skipping'.format(
                    os.path.basename(landfire_output_path)))
                continue
            logging.info('  {}'.format(tile_name))

            # Set the nodata value on the LANDFIRE raster if it is not set
            # landfire_ds = gdal.Open(landfire_input_path, 0)
            # landfire_band = landfire_ds.GetRasterBand(1)
            # landfire_nodata = landfire_band.GetNoDataValue()
            # landfire_ds = None
            # if landfire_nodata is None:
            #     landfire_nodata = 32767
            # del landfire_ds, landfire_band
            landfire_nodata = 32767

            # Clip and project
            tile_utm_osr = drigo.epsg_osr(32600 +
                                          int(tile_utm_zone_dict[tile_name]))
            tile_utm_proj = drigo.epsg_proj(32600 +
                                            int(tile_utm_zone_dict[tile_name]))
            tile_utm_extent = tile_utm_extent_dict[tile_name]
            if mp_procs > 1:
                project_mp_list.append([
                    landfire_input_path, landfire_output_path,
                    gdal.GRA_NearestNeighbour, tile_utm_proj, snap_cs,
                    tile_utm_extent, landfire_nodata
                ])
                remap_mp_list.append([
                    landfire_output_path, landfire_ag_output_path,
                    landfire_ag_list
                ])
            else:
                drigo.project_raster(landfire_input_path, landfire_output_path,
                                     gdal.GRA_NearestNeighbour, tile_utm_osr,
                                     snap_cs, tile_utm_extent, landfire_nodata)
                # Build a mask of LANDFIRE ag lands
                remap_mask_func(landfire_output_path, landfire_ag_output_path,
                                landfire_ag_list)

            # Cleanup
            del landfire_output_path
            del tile_utm_osr, tile_utm_proj, tile_utm_extent
        # Project LANDFIRE rasters using multiprocessing
        if project_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(drigo.project_raster_mp,
                               project_mp_list,
                               chunksize=1)
            pool.close()
            pool.join()
            del results, pool
        if remap_mp_list:
            pool = mp.Pool(mp_procs)
            results = pool.map(remap_mask_mp, remap_mp_list, chunksize=1)
            pool.close()
            pool.join()
            del results, pool

    # Convert field shapefiles to raster
    if field_flag:
        logging.info('\nBuild field rasters for each path/row')
        for tile_name in tile_list:
            logging.info('  {}'.format(tile_name))
            tile_output_ws = os.path.join(field_output_ws, tile_name)

            # Shapefile paths
            field_proj_name = (
                os.path.splitext(field_output_fmt.format(year))[0] +
                "_wgs84z{}.shp".format(tile_utm_zone_dict[tile_name]))
            field_proj_path = os.path.join(tile_output_ws, field_proj_name)
            field_output_path = os.path.join(tile_output_ws,
                                             field_output_fmt.format(year))
            if not overwrite_flag and os.path.isfile(field_output_path):
                logging.debug('    {} already exists, skipping'.format(
                    os.path.basename(field_output_path)))
                continue

            # The ogr2ogr spatial query is in the input spatial reference
            # Project the path/row extent to the field osr/proj
            field_input_osr = drigo.feature_path_osr(field_input_path)
            tile_utm_osr = drigo.epsg_osr(32600 +
                                          int(tile_utm_zone_dict[tile_name]))
            # field_input_proj = drigo.osr_proj(field_input_osr)
            # tile_utm_proj = drigo.osr_proj(tile_utm_osr)
            field_tile_extent = drigo.project_extent(
                tile_utm_extent_dict[tile_name], tile_utm_osr, field_input_osr,
                30)

            # Project shapefile to the path/row zone
            # Clipping requires GDAL to be built with GEOS support
            subprocess.call(
                [
                    'ogr2ogr', '-t_srs', 'EPSG:326{}'.format(
                        tile_utm_zone_dict[tile_name]), '-f', 'ESRI Shapefile',
                    '-overwrite'
                ] + ['-spat'] + list(map(str, field_tile_extent)) +
                ['-clipdst'] +
                list(map(str, tile_utm_extent_dict[tile_name])) +
                # ['-clipdst'] + list(map(str, tile_utm_extent_dict[tile_name])) +
                # ['-clipsrc'] + list(map(str, field_tile_extent)) +
                # ['-clipsrc'] + list(map(str, field_tile_extent)) +
                [field_proj_path, field_input_path])

            # Convert shapefile to raster
            field_mem_ds = drigo.polygon_to_raster_ds(
                field_proj_path,
                nodata_value=0,
                burn_value=1,
                output_osr=tile_utm_osr,
                output_extent=tile_utm_extent_dict[tile_name])
            field_output_driver = drigo.raster_driver(field_output_path)
            if field_output_path.lower().endswith('.img'):
                field_output_ds = field_output_driver.CreateCopy(
                    field_output_path, field_mem_ds, 0, ['COMPRESS=YES'])
            else:
                field_output_ds = field_output_driver.CreateCopy(
                    field_output_path, field_mem_ds, 0)
            field_output_ds, field_mem_ds = None, None

            # Remove field shapefile
            # try:
            #     remove_file(field_proj_path)
            # except:
            #     pass

            # Cleanup
            del tile_utm_osr, field_tile_extent, field_input_osr
            # del tile_utm_proj, field_input_proj
            del field_proj_name, field_proj_path, field_output_path

    logging.debug('\nScript complete')
Пример #8
0
def monte_carlo(image_ws,
                metric_ini_path,
                mc_ini_path,
                mc_iter=None,
                cold_tgt_pct=None,
                hot_tgt_pct=None,
                groupsize=64,
                blocksize=4096,
                multipoint_flag=False,
                shapefile_flag=False,
                stats_flag=False,
                overwrite_flag=False,
                debug_flag=False,
                no_etrf_final_plots=None,
                no_etrf_temp_plots=None):
    """METRIC Monte Carlo

    Parameters
    ----------
    image_ws : str
        The workspace (path) of the landsat scene folder.
    metric_ini_path : str
        The METRIC config file (path).
    mc_ini_path : str
        The Monte Carlo config file (path).
    mc_iter : int, optional
        Iteration number for Monte Carlo processing.
    cold_tgt_pct : float, optional
        Target percentage of pixels with ETrF > than cold Kc.
    hot_tgt_pct : float, optional
        Target percentage of pixels with ETrF < than hot Kc.
    groupsize : int, optional
        Script will try to place calibration point randomly
        into a labeled group of clustered values with at least n pixels.
        -1 = In the largest group
         0 = Anywhere in the image (not currently implemented)
         1 >= In any group with a pixel count greater or equal to n
    blocksize : int, optional
        Processing block size (the default is 4096).
    shapefile_flag : bool, optional
        If True, save calibration points to shapefile (the default is False).
    multipoint_flag : bool, optional
        If True, save cal. points to multipoint shapefile (the default is False).
    stats_flag : bool, optional
        If True, compute raster statistics (the default is False).
    ovewrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    no_final_plots : bool, optional
        If True, don't save final ETrF histograms (the default is None).
        This will override the flag in the INI file.
    no_temp_plots : bool
        If True, don't save temp ETrF histogram (the default is None).
        This will override the flag in the INI file.

    Returns
    -------
    None

    """
    logging.info('METRIC Automated Calibration')

    # Open config file
    config = open_ini(mc_ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    etrf_training_path = config.get('INPUTS', 'etrf_training_path')

    # Adjust Kc cold target value based on day of year
    # etrf_doy_adj_path = read_param(
    #     'etrf_doy_adj_path', None, config, 'INPUTS')

    # Intentionally set default to None, to trigger error in eval call
    kc_cold_doy_dict = read_param('kc_cold_doy_dict', None, config, 'INPUTS')
    kc_hot_doy_dict = read_param('kc_hot_doy_dict', None, config, 'INPUTS')

    # If the "no_" flags were set True, honor them and set the flag False
    # If the "no_" flags were not set by the user, use the INI flag values
    # If not set in the INI, default to False (don't save any plots)
    if no_etrf_temp_plots:
        save_etrf_temp_plots = False
    else:
        save_etrf_temp_plots = read_param('save_etrf_temp_plots', False,
                                          config, 'INPUTS')
    if no_etrf_final_plots:
        save_etrf_final_plots = False
    else:
        save_etrf_final_plots = read_param('save_etrf_final_plots', False,
                                           config, 'INPUTS')
    save_ndvi_plots = read_param('save_ndvi_plots', False, config, 'INPUTS')

    max_cal_iter = read_param('max_cal_iterations', 5, config, 'INPUTS')
    max_point_iter = read_param('max_point_iterations', 10, config, 'INPUTS')
    ts_diff_threshold = read_param('ts_diff_threshold', 4, config, 'INPUTS')
    etr_ws = config.get('INPUTS', 'etr_ws')
    ppt_ws = config.get('INPUTS', 'ppt_ws')
    etr_re = re.compile(config.get('INPUTS', 'etr_re'))
    ppt_re = re.compile(config.get('INPUTS', 'ppt_re'))
    awc_path = config.get('INPUTS', 'awc_path')
    spinup_days = read_param('swb_spinup_days', 5, config, 'INPUTS')
    min_spinup_days = read_param('swb_min_spinup_days', 30, config, 'INPUTS')

    log_fmt = '  {:<18s} {}'
    break_line = '\n{}'.format('#' * 80)

    env = drigo.env
    image = et_image.Image(image_ws, env)
    logging.info(log_fmt.format('Image:', image.folder_id))

    # Check inputs
    for file_path in [awc_path]:
        if not os.path.isfile(file_path):
            logging.error('\nERROR: File {} does not exist'.format(file_path))
            sys.exit()
    for folder in [etr_ws, ppt_ws]:
        if not os.path.isdir(folder):
            logging.error('\nERROR: Folder {} does not exist'.format(folder))
            sys.exit()
    # if (etrf_doy_adj_path and not
    #     os.path.isfile(etrf_doy_adj_path)):
    #     logging.error(
    #         '\nERROR: File {} does not exist.'.format(
    #             etrf_doy_adj_path))
    #     sys.exit()

    # Use iteration number to file iteration string
    if mc_iter is None:
        mc_str = ''
        mc_fmt = '.img'
    elif int(mc_iter) < 0:
        logging.error('\nERROR: Iteration number must be a positive integer')
        return False
    else:
        mc_str = 'MC{:02d}_'.format(int(mc_iter))
        mc_fmt = '_{:02d}.img'.format(int(mc_iter))
        logging.info('  {:<18s} {}'.format('Iteration:', mc_iter))

    # Folder names
    etrf_ws = os.path.join(image_ws, 'ETRF')
    # indices_ws = image.indices_ws
    region_ws = os.path.join(image_ws, 'PIXEL_REGIONS')
    pixels_ws = os.path.join(image_ws, 'PIXELS')
    plots_ws = os.path.join(image_ws, 'PLOTS')
    if shapefile_flag and not os.path.isdir(pixels_ws):
        os.mkdir(pixels_ws)
    if not os.path.isdir(plots_ws):
        os.mkdir(plots_ws)

    # File names
    r_fmt = '.img'
    etrf_path = os.path.join(etrf_ws, 'et_rf' + mc_fmt)
    region_path = os.path.join(region_ws, 'region_mask' + r_fmt)

    # Initialize calibration parameters dictionary
    logging.info(break_line)
    logging.info('Calibration Parameters')
    cal_dict = dict()

    logging.debug('  Reading target cold/hot Kc from INI')
    # Using eval is potentially a really bad way of reading this in
    try:
        kc_cold_doy_dict = eval('{' + kc_cold_doy_dict + '}')
    except:
        kc_cold_doy_dict = {1: 1.05, 366: 1.05}
        logging.info(
            '  ERROR: kc_cold_doy_dict was not parsed, using default values')
    try:
        kc_hot_doy_dict = eval('{' + kc_hot_doy_dict + '}')
    except:
        kc_hot_doy_dict = {1: 0.1, 366: 0.1}
        logging.info(
            '  ERROR: kc_hot_doy_dict was not parsed, using default values')
    logging.debug('  Kc cold dict: {}'.format(kc_cold_doy_dict))
    logging.debug('  Kc hot dict: {}\n'.format(kc_hot_doy_dict))
    # doy_cold, kc_cold = zip(*sorted(kc_cold_doy_dict.items()))
    cal_dict['cold_tgt_kc'] = np.interp(
        image.acq_doy,
        *zip(*sorted(kc_cold_doy_dict.items())),
        left=1.05,
        right=1.05)
    # doy_hot, kc_hot = zip(*sorted(kc_hot_doy_dict.items()))
    cal_dict['hot_tgt_kc'] = np.interp(image.acq_doy,
                                       *zip(*sorted(kc_hot_doy_dict.items())),
                                       left=0.1,
                                       right=0.1)

    # if etrf_doy_adj_path:
    #     doy_adj_df = pd.read_csv(etrf_doy_adj_path)
    #     doy_adj = float(
    #         doy_adj_df[doy_adj_df['DOY'] == image.acq_doy]['ETRF_ADJ'])
    #     cal_dict['cold_tgt_kc'] = cal_dict['cold_tgt_kc'] + doy_adj

    # Get hot/cold etrf fraction sizes
    if cold_tgt_pct is None or hot_tgt_pct is None:
        logging.info('ETrF Tail Size Percentages')
        logging.info('  Reading target tail size from file')
        cold_tgt_pct, hot_tgt_pct = auto_calibration.etrf_fractions(
            etrf_training_path)
        if cold_tgt_pct is None or hot_tgt_pct is None:
            logging.error('\nERROR: Tail sizes were not mannually set or '
                          'read from the the file\n')
            return False
    cal_dict['cold_tgt_pct'] = cold_tgt_pct
    cal_dict['hot_tgt_pct'] = hot_tgt_pct

    logging.info(pixel_str_fmt.format('', 'Cold Pixel', 'Hot Pixel'))
    logging.info(
        pixel_flt_fmt.format('Target kc:', cal_dict['cold_tgt_kc'],
                             cal_dict['hot_tgt_kc']))
    logging.info(
        pixel_pct_fmt.format('Tail Size:', cal_dict['cold_tgt_pct'],
                             cal_dict['hot_tgt_pct']))

    # # Create calibration database
    # # Set overwrite false to use existing database if it exists
    # cal_ws = os.path.join(image_ws, cal_folder)
    # if not os.path.isdir(cal_ws):
    #     os.mkdir(cal_ws)
    # cal_path = os.path.join(cal_ws, cal_name)
    # logging.info('{:<20s} {}\{}'.format(
    #     'Calibration DB:', cal_folder, cal_name))
    # calibration_database.create_calibration_database(
    #     image_ws, cal_path, overwrite_db_flag)
    # del cal_ws

    # Remove previous calibrations from database
    # logging.info(break_line)
    # calibration_database.remove_calibration_points(
    #     image_ws, cal_path, cal_initials, mc_iter)

    # Get ETrF and region mask (from pixel rating)
    # Assume they have identical extents
    try:
        region_mask = drigo.raster_to_array(region_path, return_nodata=False)
        region_mask = region_mask.astype(np.bool)
    except:
        logging.error(
            '\nERROR: Pixel regions mask does not exist or could not be read.\n'
            '  Please try re-running the METRIC Pixel Rating tool.')
        logging.debug('  {} '.format(region_path))
        return False

    # Remove previous plots
    logging.info(break_line)
    auto_calibration.remove_histograms(plots_ws, mc_iter)

    # Generate the NDVI histogram
    if save_ndvi_plots:
        logging.info(break_line)
        logging.info('NDVI Histograms')
        if os.path.isfile(image.ndvi_toa_raster):
            ndvi_array = drigo.raster_to_array(image.ndvi_toa_raster,
                                               return_nodata=False)
        else:
            logging.error(
                '\nERROR: NDVI raster does not exist. METRIC Model 1 may not '
                'have run successfully.')
            logging.debug('  {} '.format(image.ndvi_toa_raster))

        # Only process ag. ETrF pixels
        ndvi_array[~region_mask] = np.nan
        ndvi_sub_array = ndvi_array[region_mask]
        if np.any(ndvi_sub_array):
            auto_calibration.save_ndvi_histograms(ndvi_sub_array, plots_ws,
                                                  mc_iter)
        else:
            logging.error(
                '\nERROR: Empty NDVI array, histogram was not built\n')

    # Place points in suggested region allowing for a number of iterations
    #  dependent on whether or not Ts meets certain criteria
    logging.info(break_line)
    pixel_point_iters = 0
    while pixel_point_iters <= max_point_iter:
        if pixel_point_iters == max_point_iter:
            logging.error('\nERROR: Suitable hot and cold pixels could not be '
                          'determined. The scene will not calibrate.\n')
            return False
        cold_xy, hot_xy = pixel_points.pixel_points(
            image_ws,
            groupsize=groupsize,
            blocksize=blocksize,
            mc_iter=mc_iter,
            shapefile_flag=shapefile_flag,
            multipoint_flag=multipoint_flag,
            overwrite_flag=overwrite_flag,
            pixel_point_iters=pixel_point_iters)
        if any(x is None for x in cold_xy) or any(x is None for x in hot_xy):
            logging.error(('\nPixel points coordinates are invalid.  '
                           'The scene will not calibrate.'
                           '\n  Cold: {}\n  Hot: {}').format(cold_xy, hot_xy))
            return False
        cold_ts = drigo.raster_value_at_xy(image.ts_raster, cold_xy)
        hot_ts = drigo.raster_value_at_xy(image.ts_raster, hot_xy)
        if cold_ts > hot_ts:
            logging.info(
                '\nThe cold pixel is hotter than the hot pixel. Placing '
                'the points again.\n')
            logging.info(break_line)
            pixel_point_iters += 1
        elif abs(hot_ts - cold_ts) < ts_diff_threshold:
            logging.info((
                '\nThere is less than a {} degree difference in Ts hot and cold. '
                'Placing the points again.\n').format(ts_diff_threshold))
            logging.info(break_line)
            pixel_point_iters += 1
            # raise et_common.TemperatureError
        else:
            break

    # Adjust Kc hot for soil water balance
    logging.info(break_line)
    cal_dict = auto_calibration.hot_kc_swb_adjust(cal_dict, hot_xy,
                                                  env.snap_osr, image.acq_date,
                                                  awc_path, etr_ws, etr_re,
                                                  ppt_ws, ppt_re, spinup_days,
                                                  min_spinup_days)
    # Adjust Kc cold based on NDVI
    # cal_dict['tgt_c_kc'] = auto_calibration.kc_ndvi_adjust(
    #     cal_dict['tgt_c_kc'], cold_xy, ndvi_path, 'Cold')

    # Check that Kc hot (Ke) is not too high?
    if cal_dict['hot_tgt_kc'] >= 1.0:
        logging.error('\nERROR: Target Kc hot is too high for automated '
                      'calibration\n  ETrF will not be computed')
        return False
    elif (cal_dict['cold_tgt_kc'] - cal_dict['hot_tgt_kc']) <= 0.05:
        logging.error('\nERROR: Target Kc hot and Kc cold are too close for '
                      'automated calibration\n  ETrF will not be computed')
        return False

    # Initialize Kc values at targets
    cal_dict['kc_cold'] = cal_dict['cold_tgt_kc']
    cal_dict['kc_hot'] = cal_dict['hot_tgt_kc']

    # Iterate until max calibrations is reached or error is small
    cal_flag = False
    cal_iter = 1
    while not cal_flag:
        logging.info(break_line)
        logging.info('Calibration Iteration: {}'.format(cal_iter))

        # Run METRIC Model2 for initial ETrF map
        logging.info(break_line)
        metric_model2.metric_model2(image_ws,
                                    metric_ini_path,
                                    mc_iter=mc_iter,
                                    kc_cold=cal_dict['kc_cold'],
                                    kc_hot=cal_dict['kc_hot'],
                                    cold_xy=cold_xy,
                                    hot_xy=hot_xy,
                                    overwrite_flag=overwrite_flag)

        # Read in ETrF array
        if os.path.isfile(etrf_path):
            etrf_array = drigo.raster_to_array(etrf_path, return_nodata=False)
        else:
            logging.warning(
                ('WARNING: ETrF raster does not exist. METRIC Model 2 '
                 'may not have run successfully.\n {}').format(etrf_path))
            break
        etrf_geo = drigo.raster_path_geo(etrf_path)

        # Only process ag. ETrF pixels
        etrf_array[~region_mask] = np.nan
        etrf_sub_array = etrf_array[np.isfinite(etrf_array)]
        if not np.any(etrf_sub_array):
            logging.error(
                '\nERROR: Empty ETrF array, scene cannot be calibrated\n')
            break

        # Calculate calibration parameters
        logging.debug(break_line)
        cal_dict = auto_calibration.calibration_params(cal_dict,
                                                       etrf_sub_array)

        # Plot intermediates calibration histograms
        if save_etrf_temp_plots:
            logging.debug(break_line)
            auto_calibration.save_etrf_histograms(etrf_sub_array, plots_ws,
                                                  cal_dict, mc_iter, cal_iter)

        # Check calibration
        logging.debug(break_line)
        cal_flag = auto_calibration.check_calibration(cal_dict)

        # Don't re-calibrate if initial calibration was suitable
        if cal_flag:
            break
        # Limit calibration attempts
        # cal_iter index is 1 based for Monte Carlo
        # cal_iter is 0 for stand alone mode
        elif cal_iter >= max_cal_iter:
            logging.info(break_line)
            logging.info(
                ('All {} iteration attempts were made, '
                 'the scene will not calibrate.').format(max_cal_iter))
            if os.path.isfile(etrf_path):
                os.remove(etrf_path)
            return False
            # break

        # Adjust Kc value of calibration points (instead of moving them)
        cal_dict = auto_calibration.kc_calibration_adjust(
            cal_dict, etrf_sub_array)

        # # Select new calibration points based on ETrF distribution
        # logging.info(break_line)
        # cold_xy, hot_xy = auto_calibration.build_pixel_points(
        #     etrf_array, etrf_geo, cal_dict,
        #     shapefile_flag=shapefile_flag, pixels_ws=pixels_ws)

        del etrf_array, etrf_geo

        # Increment calibration iteration counter
        cal_iter += 1

    # Only save 'final' results if the scene was calibrated
    if cal_flag and save_etrf_final_plots:
        # Plot final ETrF distribution
        # logging.info(break_line)
        auto_calibration.save_etrf_histograms(etrf_sub_array, plots_ws,
                                              cal_dict, mc_iter, None)

        # Save final calibration points to database
        # logging.info(break_line)
        # calibration_database.save_calibration_points(
        #     image_ws, cal_path, cal_dict, mc_iter, 0)

    return True
Пример #9
0
def main(ini_path,
         tile_list=None,
         blocksize=None,
         stats_flag=True,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         new_window_flag=False):
    """Run METRIC Model 2 for all images

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    blocksize : int, optional
        Processing block size (the default is None).  If set, this blocksize
        parameter will be used instead of the value in the INI file.
    stats_flag : bool, optional
        If True, compute raster statistics (the default is True).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cores to use (the default is 1).
    delay : float, optional
        max random delay starting function in seconds (the default is 0).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    new_window_flag : bool, optional
        If True, open each process in new terminal window (the default is False).
        Microsoft Windows only.

    Returns
    -------
    None

    """
    logging.info('\nRunning METRIC model 2 for all images')

    # Open config file
    config = dripy.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = dripy.read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'metric_model2_func')
    keep_list_path = dripy.read_param('keep_list_path', '', config, 'INPUTS')
    # skip_list_path = dripy.read_param('skip_list_path', '', config, 'INPUTS')

    # For now build INI file name from template INI names
    ini_name = os.path.basename(config.get('INPUTS', 'metric_ini'))
    ini_name = os.path.splitext(os.path.basename(ini_name))[0]

    # INI file is built as a function of year and tile_name
    ini_fmt = '{}_{}_{}.ini'

    # Calculate pixel points before running Model 2
    # pixel_points_flag = True

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # Regular expressions
    # For now assume path/row are two digit numbers
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_id_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(?:\w{4})_(\d{3})(\d{3})_'
        '(\d{4})(\d{2})(\d{2})_(?:\d{8})_(?:\d{2})_(?:\w{2})$')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    # Read keep/skip lists
    if keep_list_path:
        logging.debug('\nReading scene keep list')
        with open(keep_list_path) as keep_list_f:
            image_keep_list = keep_list_f.readlines()
            image_keep_list = [
                image_id.strip() for image_id in image_keep_list
                if image_id_re.match(image_id.strip())
            ]
    else:
        logging.debug('\nScene keep list not set in INI')
        image_keep_list = []

    # # DEADBEEF
    # if skip_list_path:
    #     logging.debug('\nReading scene skip list')
    #     with open(skip_list_path) as skip_list_f:
    #         image_skip_list = skip_list_f.readlines()
    #         image_skip_list = [image_id.strip() for image_id in image_skip_list
    #                            if image_id_re.match(image_id.strip())]
    # else:
    #     logging.debug('\nScene skip list not set in INI')
    #     image_skip_list = []

    mp_list = []
    for tile_name in sorted(tile_list):
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            logging.debug('  {} {} - invalid tile, skipping'.format(
                year, tile_name))
            continue

        # Check that there are image folders
        image_id_list = [
            image_id for image_id in sorted(os.listdir(tile_ws))
            if (image_id_re.match(image_id)
                and os.path.isdir(os.path.join(tile_ws, image_id)) and (
                    image_keep_list and image_id in image_keep_list))
        ]
        #     (image_skip_list and image_id not in image_skip_list))]
        if not image_id_list:
            logging.debug('  {} {} - no available images, skipping'.format(
                year, tile_name))
            continue
        else:
            logging.debug('  {} {}'.format(year, tile_name))

        # Check that there is an input file for the path/row
        ini_path = os.path.join(tile_ws,
                                ini_fmt.format(ini_name, year, tile_name))
        if not os.path.join(ini_path):
            logging.warning('    METRIC input file {} does not exist, '
                            'skipping'.format(ini_path))
            continue

        # Setup command line argument
        call_args = [sys.executable, func_path, '-i', ini_path]
        if blocksize is not None:
            call_args.extend(['--blocksize', str(blocksize)])
        if stats_flag:
            call_args.append('--stats')
        if overwrite_flag:
            call_args.append('--overwrite')
        if debug_flag:
            call_args.append('--debug')

        # Run METRIC Model 2 for each Monte Carlo iteration
        for image_id in image_id_list:
            logging.debug('  {}'.format(image_id))
            scene_ws = os.path.join(tile_ws, image_id)
            if mp_procs > 1:
                mp_list.append([call_args, scene_ws, delay, new_window_flag])
            else:
                subprocess.call(call_args, cwd=scene_ws)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(dripy.call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool

    logging.debug('\nScript complete')
Пример #10
0
def main(ini_path,
         tile_list=None,
         groupsize=1,
         blocksize=2046,
         shapefile_flag=True,
         geojson_flag=False,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         new_window_flag=False):
    """Run pixel points for all images

    Args:
        ini_path (str): file path of the input parameters file
        tile_list (list): list of Landsat path/row (i.e. [p45r43, p45r33])
            This will override the tile list in the INI file
        groupsize (int):
            Script will try to place calibration point randomly into a
            labeled group of clustered values with at least n pixels.
            -1 = In the largest group
             0 = Anywhere in the image (not currently implemented)
             1 >= In any group with a pixel count greater or equal to n
        blocksize (int):
        shapefile_flag (bool): if True, save calibration points to shapefile
            (default False)
        geojson_flag (bool): if True, save calibration points to GeoJSON
        overwrite_flag (bool): if True, overwrite existing files
        mp_procs (int): number of cores to use
        delay (float): max random delay starting function in seconds
        debug_flag (bool): if True, enable debug level logging
        new_window_flag (bool): if True, open each process in a new terminal.
            Microsoft Windows only.

    Returns:
        None
    """
    logging.info('\nRunning METRIC Pixel Points for all images')
    log_fmt = '  {:<18s} {}'

    # Open config file
    config = open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'pixel_points_func')

    # DEADBEEF - seems like this is passed in at the command line
    # groupsize = config.getint('INPUTS', 'groupsize')

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # Regular expressions
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    # Setup command line argument
    call_args = [sys.executable, func_path]
    call_args.extend(['--groupsize', str(groupsize)])
    if blocksize:
        call_args.extend(['--blocksize', str(blocksize)])
    if shapefile_flag:
        call_args.append('--shapefile')
    if geojson_flag:
        call_args.append('--geojson')
    if overwrite_flag:
        call_args.append('--overwrite')
    if debug_flag:
        call_args.append('--debug')

    mp_list = []
    for tile_name in sorted(tile_list):
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            continue

        # Check that there are scene folders
        scene_id_list = [
            scene_id for scene_id in sorted(os.listdir(tile_ws))
            if (os.path.isdir(os.path.join(tile_ws, scene_id))
                or image_re.match(scene_id))
        ]
        if not scene_id_list:
            continue
        logging.debug('  {} {}'.format(year, tile_name))

        # Run METRIC Pixel Points
        for scene_id in scene_id_list:
            logging.debug('  {}'.format(scene_id))
            scene_ws = os.path.join(tile_ws, scene_id)
            pixel_ws = os.path.join(scene_ws, 'PIXELS')
            # Since the GeoJSON will be appended, delete it in the wrapper
            #  script if the overwrite_flag=True
            if geojson_flag and os.path.isdir(pixel_ws):
                for pixel_file in os.listdir(pixel_ws):
                    if re.match('\w+.geojson$', pixel_file):
                        os.remove(os.path.join(pixel_ws, pixel_file))
            if mp_procs > 1:
                mp_list.append([call_args, scene_ws, delay, new_window_flag])
            else:
                subprocess.call(call_args, cwd=scene_ws)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool
Пример #11
0
def main(ini_path,
         rasters_flag=None,
         tables_flag=None,
         mc_iter_str='',
         tile_list=None,
         pyramids_flag=True,
         stats_flag=True,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         no_file_logging=False):
    """Run interpolater for all Landsat scenes

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    rasters_flag : bool, optional
        If True, override INI and interpolate rasters.
    tables_flag : bool, optional
        If True, override INI and interpolate zone tables.
    mc_iter_str : str, optional
        MonteCarlo iteration list and/or range.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    pyramids_flag : bool, optional
        If True, compute raster pyramids (the default is True).
    stats_flag : bool, optional
        If True, compute raster statistics (the default is True).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cpu cores to use (the default is 1).
    delay : float, optional
        Max random delay starting function in seconds (the default is 0).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    no_file_logging : bool, optional
        If True, don't write logging to file (the default is False).

    Returns
    -------
    None

    """
    logging.info('\nRunning Interpolator')

    # Open config file
    config = open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    interpolate_folder = config.get('INPUTS', 'interpolate_folder')
    logging.debug('  Folder: {}'.format(interpolate_folder))

    # If both flags were not set, read from INI
    if rasters_flag is None and tables_flag is None:
        logging.info('  Reading interpolator flags from INI file')
        if rasters_flag is None:
            rasters_flag = read_param('interpolate_rasters_flag', True, config,
                                      'INPUTS')
        if tables_flag is None:
            tables_flag = read_param('interpolate_tables_flag', True, config,
                                     'INPUTS')
    # If both flags were set false, for now, exit the script
    # It may make more sense to assumethe user wants to interpolate something
    elif rasters_flag is False and tables_flag is False:
        logging.error('Raster and table interpolator flags are both False\n')
        logging.error('  Exiting the script')
        return False
        # sys.exit()
        # logging.info('Raster and table interpolator flags are both False\n')
        # logging.info('    Defaulting to rasters_flag=True')
        # rasters_flag = True

    if rasters_flag:
        rasters_func_path = config.get('INPUTS', 'interpolate_rasters_func')
    if tables_flag:
        tables_func_path = config.get('INPUTS', 'interpolate_tables_func')

    # For now, get mc_iter list from command line, not from project file
    # mc_iter_list = config.get('INPUTS', 'mc_iter_list')
    mc_iter_list = list(parse_int_set(mc_iter_str))

    # Need soemthing in mc_iter_list to iterate over
    if not mc_iter_list:
        mc_iter_list = [None]

    # For now build INI file name from template INI names
    ini_name = os.path.basename(config.get('INPUTS', 'interpolate_ini'))
    ini_name = os.path.splitext(os.path.basename(ini_name))[0]

    # INI file is built as a function of year
    ini_fmt = '{}_{}_{}.ini'

    # Regular expressions
    # For now assume path/row are two digit numbers
    # tile_re = re.compile('p(\d{3})r(\d{3})', re.IGNORECASE)
    # image_re = re.compile(
    #     '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    # Check that there is an input file for the year and folder
    year_ws = os.path.join(project_ws, str(year))
    ini_path = os.path.join(
        year_ws, ini_fmt.format(ini_name, str(year),
                                interpolate_folder.lower()))
    if not os.path.join(ini_path):
        logging.warning('    Input file does not exist\n  {}'.format(ini_path))
        return False

    # Run Interpolater for each Monte Carlo iteration
    # mp_list = []
    for mc_iter in sorted(mc_iter_list):
        logging.debug('  Year: {} Iteration: {}'.format(str(year), mc_iter))
        rasters_args = []
        tables_args = []
        if rasters_flag:
            rasters_args = [
                'python', rasters_func_path, year_ws, '-i', ini_path
            ]
        if tables_flag:
            tables_args = ['python', tables_func_path, year_ws, '-i', ini_path]
        if mc_iter is not None:
            rasters_args.extend(['-mc', str(mc_iter)])
            tables_args.extend(['-mc', str(mc_iter)])
        if pyramids_flag:
            rasters_args.append('--pyramids')
        if stats_flag:
            rasters_args.append('--stats')
        if overwrite_flag:
            rasters_args.append('--overwrite')
            tables_args.append('--overwrite')
        if debug_flag:
            rasters_args.append('--debug')
            tables_args.append('--debug')
        if delay > 0:
            rasters_args.extend(['--delay', str(delay)])
            tables_args.extend(['--delay', str(delay)])
        if no_file_logging:
            rasters_args.append('--no_file_logging')
            tables_args.append('--no_file_logging')
        if mp_procs > 1:
            rasters_args.extend(['-mp', str(mp_procs)])
            tables_args.extend(['-mp', str(mp_procs)])
        if rasters_flag:
            subprocess.call(rasters_args, cwd=year_ws)
        if tables_flag:
            subprocess.call(tables_args, cwd=year_ws)

    logging.debug('\nScript complete')
Пример #12
0
def main(ini_path,
         tile_list=None,
         groupsize=1,
         blocksize=2048,
         shapefile_flag=True,
         geojson_flag=False,
         overwrite_flag=False,
         mp_procs=1,
         delay=0,
         debug_flag=False,
         new_window_flag=False):
    """Run pixel points for all images

    Parameters
    ----------
    ini_path : str
        File path of the input parameters file.
    tile_list : list, optional
        Landsat path/rows to process (i.e. [p045r043, p045r033]).
        This will override the tile list in the INI file.
    groupsize : int, optional
        Script will try to place calibration point randomly into a labeled
        group of clustered values with at least n pixels (the default is 64).
        -1 = In the largest group
         0 = Anywhere in the image (not currently implemented)
         1 >= In any group with a pixel count greater or equal to n
    blocksize : int, optional
        Processing block size (the default is 2048).
    shapefile_flag : bool, optional
        If True, save calibration points to shapefile (the default False).
    geojson_flag : bool, optional
        If True, save calibration points to GeoJSON (the default is False).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).
    mp_procs : int, optional
        Number of cores to use (the default is 1).
    delay : float, optional
        Max random delay starting function in seconds (the default is 0).
    debug_flag : bool, optional
        If True, enable debug level logging (the default is False).
    new_window_flag : bool, optional
        If True, open each process in new terminal window (the default is False).
        Microsoft Windows only.

    Returns
    -------
    None

    """
    logging.info('\nRunning METRIC Pixel Points for all images')
    log_fmt = '  {:<18s} {}'

    # Open config file
    config = dripy.open_ini(ini_path)

    # Get input parameters
    logging.debug('  Reading Input File')
    year = config.getint('INPUTS', 'year')
    if tile_list is None:
        tile_list = dripy.read_param('tile_list', [], config, 'INPUTS')
    project_ws = config.get('INPUTS', 'project_folder')
    logging.debug('  Year: {}'.format(year))
    logging.debug('  Path/rows: {}'.format(', '.join(tile_list)))
    logging.debug('  Project: {}'.format(project_ws))

    func_path = config.get('INPUTS', 'pixel_points_func')
    keep_list_path = dripy.read_param('keep_list_path', '', config, 'INPUTS')
    # skip_list_path = dripy.read_param('skip_list_path', '', config, 'INPUTS')

    # DEADBEEF - seems like this is passed in at the command line
    # groupsize = config.getint('INPUTS', 'groupsize')

    # Only allow new terminal windows on Windows
    if os.name is not 'nt':
        new_window_flag = False

    # Regular expressions
    tile_re = re.compile('p\d{3}r\d{3}', re.IGNORECASE)
    image_id_re = re.compile(
        '^(LT04|LT05|LE07|LC08)_(?:\w{4})_(\d{3})(\d{3})_'
        '(\d{4})(\d{2})(\d{2})_(?:\d{8})_(?:\d{2})_(?:\w{2})$')

    # Check inputs folders/paths
    if not os.path.isdir(project_ws):
        logging.error('\n Folder {} does not exist'.format(project_ws))
        sys.exit()

    # Setup command line argument
    call_args = [sys.executable, func_path]
    call_args.extend(['--groupsize', str(groupsize)])
    if blocksize:
        call_args.extend(['--blocksize', str(blocksize)])
    if shapefile_flag:
        call_args.append('--shapefile')
    if geojson_flag:
        call_args.append('--geojson')
    if overwrite_flag:
        call_args.append('--overwrite')
    if debug_flag:
        call_args.append('--debug')

    # Read keep/skip lists
    if keep_list_path:
        logging.debug('\nReading scene keep list')
        with open(keep_list_path) as keep_list_f:
            image_keep_list = keep_list_f.readlines()
            image_keep_list = [
                image_id.strip() for image_id in image_keep_list
                if image_id_re.match(image_id.strip())
            ]
    else:
        logging.debug('\nScene keep list not set in INI')
        image_keep_list = []
    # if skip_list_path:
    #     logging.debug('\nReading scene skip list')
    #     with open(skip_list_path) as skip_list_f:
    #         image_skip_list = skip_list_f.readlines()
    #         image_skip_list = [image_id.strip() for image_id in image_skip_list
    #                            if image_id_re.match(image_id.strip())]
    # else:
    #     logging.debug('\nScene skip list not set in INI')
    #     image_skip_list = []

    mp_list = []
    for tile_name in sorted(tile_list):
        tile_ws = os.path.join(project_ws, str(year), tile_name)
        if not os.path.isdir(tile_ws) and not tile_re.match(tile_name):
            logging.debug('  {} {} - invalid tile, skipping'.format(
                year, tile_name))
            continue

        # Check that there are image folders
        image_id_list = [
            image_id for image_id in sorted(os.listdir(tile_ws))
            if (image_id_re.match(image_id)
                and os.path.isdir(os.path.join(tile_ws, image_id)) and (
                    image_keep_list and image_id in image_keep_list))
        ]
        #     (image_skip_list and image_id not in image_skip_list))]
        if not image_id_list:
            logging.debug('  {} {} - no available images, skipping'.format(
                year, tile_name))
            continue
        else:
            logging.debug('  {} {}'.format(year, tile_name))

        # Run METRIC Pixel Points
        for image_id in image_id_list:
            logging.debug('  {}'.format(image_id))
            image_ws = os.path.join(tile_ws, image_id)
            pixel_ws = os.path.join(image_ws, 'PIXELS')
            # Since the GeoJSON will be appended, delete it in the wrapper
            #  script if the overwrite_flag=True
            if geojson_flag and os.path.isdir(pixel_ws):
                for pixel_file in os.listdir(pixel_ws):
                    if re.match('\w+.geojson$', pixel_file):
                        os.remove(os.path.join(pixel_ws, pixel_file))
            if mp_procs > 1:
                mp_list.append([call_args, image_ws, delay, new_window_flag])
            else:
                subprocess.call(call_args, cwd=image_ws)

    if mp_list:
        pool = mp.Pool(mp_procs)
        results = pool.map(dripy.call_mp, mp_list, chunksize=1)
        pool.close()
        pool.join()
        del results, pool