예제 #1
0
def test_compute_dem_intersection_with_poly():

    # test 100% coverage
    inter_poly, inter_epsg = utils.read_vector(
        absolute_data_path("input/utils_input/envelopes_intersection.gpkg"))

    dem_inter_poly, cover = projection.compute_dem_intersection_with_poly(
        absolute_data_path("input/phr_ventoux/srtm"), inter_poly, inter_epsg)
    assert dem_inter_poly == inter_poly
    assert cover == 100.0

    # test partial coverage over with several srtm tiles with no data holes
    inter_poly = Polygon([(4.8, 44.2), (4.8, 44.3), (6.2, 44.3), (6.2, 44.2),
                          (4.8, 44.2)])
    dem_inter_poly, cover = projection.compute_dem_intersection_with_poly(
        absolute_data_path("input/utils_input/srtm_with_hole"), inter_poly,
        inter_epsg)

    ref_dem_inter_poly = Polygon([(4.999583333333334, 44.2),
                                  (4.999583333333334, 44.3), (6.2, 44.3),
                                  (6.2, 44.2), (4.999583333333334, 44.2)])

    assert dem_inter_poly.exterior == ref_dem_inter_poly.exterior
    assert len(list(dem_inter_poly.interiors)) == 6
    assert cover == 85.72172619047616

    # test no coverage
    inter_poly = Polygon([(1.5, 2.0), (1.5, 2.1), (1.8, 2.1), (1.8, 2.0),
                          (1.5, 2.0)])

    with pytest.raises(Exception) as e:
        dem_inter_poly, cover = projection.compute_dem_intersection_with_poly(
            absolute_data_path("input/phr_ventoux/srtm"), inter_poly,
            inter_epsg)
    assert str(e.value) == 'The input DEM does not intersect the useful zone'
예제 #2
0
파일: test_utils.py 프로젝트: cuulee/cars
def test_fix_shapely():
    """
    Test read_vector fix shapely with poly.gpkg example
    """
    poly, _ = utils.read_vector(
        absolute_data_path("input/utils_input/poly.gpkg"))
    assert poly.is_valid is False
    poly = poly.buffer(0)
    assert poly.is_valid is True
예제 #3
0
def test_read_vector():

    path_to_shapefile = absolute_data_path(
        "input/utils_input/left_envelope.shp")

    poly, epsg = utils.read_vector(path_to_shapefile)

    assert epsg == 4326
    assert isinstance(poly, Polygon)
    assert list(poly.exterior.coords) == [
        (5.193406138843349, 44.20805805252155),
        (5.1965650939582435, 44.20809526197842),
        (5.196654349708835, 44.205901416036546),
        (5.193485218293437, 44.205842790578764),
        (5.193406138843349, 44.20805805252155)
    ]

    # test exception
    with pytest.raises(Exception) as e:
        utils.read_vector('test.shp')
        assert str(e) == 'Impossible to read test.shp shapefile'
예제 #4
0
파일: cars_cli.py 프로젝트: cuulee/cars
def parse_roi_file(arg_roi_file: str, stop_now: bool)-> Tuple[List[float], int]:
    """
    Parse ROI file argument and generate bounding box


    :param arg_roi_file : ROI file argument
    :param stop_now: Argument check
    :return: ROI Bounding box + EPSG code : xmin, ymin, xmax, ymax, epsg_code
    :rtype: Tuple with array of 4 floats and int
    """
    # TODO : refactor in order to avoid a slow argparse
    # Don't move the local function imports for now

    import logging
    import rasterio
    from cars import utils


    # Declare output
    roi = None

    _, extension = os.path.splitext(arg_roi_file)

    # test file existence
    if not os.path.exists(arg_roi_file):
        logging.warning('{} does not exist'.format(arg_roi_file))
        stop_now = True
    else:
        # if it is a vector file
        if extension in ['.gpkg', '.shp', '.kml']:
            try:
                roi_poly, roi_epsg = utils.read_vector(arg_roi_file)
                roi = (roi_poly.bounds, roi_epsg)
            except BaseException:
                logging.critical(
                    'Impossible to read {} file'.format(arg_roi_file))
                stop_now = True

        # if not, it is an image
        elif utils.rasterio_can_open(arg_roi_file):
            data = rasterio.open(arg_roi_file)
            xmin = min(data.bounds.left, data.bounds.right)
            ymin = min(data.bounds.bottom, data.bounds.top)
            xmax = max(data.bounds.left, data.bounds.right)
            ymax = max(data.bounds.bottom, data.bounds.top)

            try:
                roi_epsg = data.crs.to_epsg()
                roi = ([xmin, ymin, xmax, ymax], roi_epsg)
            except AttributeError as error:
                logging.critical(
                    'Impossible to read the ROI '
                    'image epsg code: {}'.format(error))
                stop_now = True

        else:
            logging.critical(
                '{} has an unsupported file format'.format(arg_roi_file))
            stop_now = True

    return roi, stop_now
예제 #5
0
def run(in_json: params.input_configuration_type,
        out_dir: str,
        epi_step: int = 30,
        region_size: int = 500,
        disparity_margin: float = 0.02,
        epipolar_error_upper_bound: float = 10.,
        epipolar_error_maximum_bias: float = 0.,
        elevation_delta_lower_bound: float = -1000.,
        elevation_delta_upper_bound: float = 1000.,
        mode: str = "local_dask",
        nb_workers: int = 4,
        walltime: str = "00:59:00",
        check_inputs: bool = False):
    """
    Main function of the prepare subcommand

    This function will perform the following steps:

    1. Compute stereo-rectification grids for the input pair
    2. Compute all possible sift matches in epipolar geometry
    3. Derive an optimal disparity range to explore from the matches
    4. Derive a bilinear correction model of the stereo-rectification grid for right image in order to minimize epipolar error
    5. Apply correction to right grid
    6. Export left and corrected right grid

    :param in_json:  dictionary describing input data (see README.md for format)
    :param out_dir: Directory where all outputs will be written, including a content.json file describing its content
    :param epi_step: Step of the epipolar grid to compute (in pixels in epipolar geometry)
    :param region_size: Size of regions used for sift matching
    :param disparity_margin: Percent of the disparity range width to add at each end as security margin
    :param epipolar_error_upper_bound: Upper bound of expected epipolar error (in pixels)
    :param epipolar_error_maximum_bias: Maximum bias for epipolar error (in pixels)
    :param elevation_delta_lower_bound: Lower bound for elevation delta with respect to initial MNT (in meters)
    :param elevation_delta_upper_bound: Upper bound for elevation delta with respect to initial MNT (in meters)
    :param mode: Parallelization mode
    :param nb_workers: Number of dask workers to use for the sift matching step
    :param walltime: Walltime of the dask workers
    :param check_inputs: activation of the inputs consistency checking
    """
    out_dir = os.path.abspath(out_dir)
    # Ensure that outdir exists
    try:
        os.makedirs(out_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(out_dir):
            pass
        else:
            raise

    utils.add_log_file(out_dir, 'prepare')

    if not check_inputs:
        logging.warning(
            'The inputs consistency will not be checked. To enable the inputs checking, add \'--check_'
            'inputs\' to your command line')

    # Check configuration dict
    config = utils.check_json(in_json, params.input_configuration_schema)

    # Retrieve static parameters (sift and low res dsm)
    static_params = static_cfg.get_cfg()

    # Initialize output json dict
    out_json = {
        params.input_section_tag: config,
        params.preprocessing_section_tag: {
            params.preprocessing_version_tag: utils.get_version(),
            params.preprocessing_parameters_section_tag: {
                params.epi_step_tag: epi_step,
                params.disparity_margin_tag: disparity_margin,
                params.epipolar_error_upper_bound_tag:
                epipolar_error_upper_bound,
                params.epipolar_error_maximum_bias_tag:
                epipolar_error_maximum_bias,
                params.elevation_delta_lower_bound_tag:
                elevation_delta_lower_bound,
                params.elevation_delta_upper_bound_tag:
                elevation_delta_upper_bound
            },
            params.static_params_tag: static_params[static_cfg.prepare_tag],
            params.preprocessing_output_section_tag: {}
        }
    }

    # Read input parameters
    img1 = config[params.img1_tag]
    img2 = config[params.img2_tag]
    srtm_dir = config[params.srtm_dir_tag]

    nodata1 = config.get(params.nodata1_tag, None)
    nodata2 = config.get(params.nodata2_tag, None)
    mask1 = config.get(params.mask1_tag, None)
    mask2 = config.get(params.mask2_tag, None)
    color1 = config.get(params.color1_tag, None)

    if check_inputs:
        logging.info('Checking inputs consistency')
        if utils.rasterio_get_nb_bands(
                img1) != 1 or utils.rasterio_get_nb_bands(img2) != 1:
            raise Exception('{} and {} are not mono-band images'.format(
                img1, img2))
        if mask1 is not None:
            if utils.rasterio_get_size(img1) != utils.rasterio_get_size(mask1):
                raise Exception(
                    'The image {} and the mask {} do not have the same size'.
                    format(img1, mask1))
        if mask2 is not None:
            if utils.rasterio_get_size(img2) != utils.rasterio_get_size(mask2):
                raise Exception(
                    'The image {} and the mask {} do not have the same size'.
                    format(img2, mask2))

        if not utils.otb_can_open(img1):
            raise Exception(
                'Problem while opening image {} with the otb'.format(img1))
        if not utils.otb_can_open(img2):
            raise Exception(
                'Problem while opening image {} with the otb'.format(img1))

        with rio.open(img1) as im:
            trans = im.transform
            if trans.e < 0:
                logging.warning(
                    '{} seems to have an incoherent pixel size. '
                    'Input images has to be in sensor geometry.'.format(img1))

        with rio.open(img2) as im:
            trans = im.transform
            if trans.e < 0:
                logging.warning(
                    '{} seems to have an incoherent pixel size. '
                    'Input images has to be in sensor geometry.'.format(img2))

    # Check that the envelopes intersect one another
    logging.info("Computing images envelopes and their intersection")
    shp1 = os.path.join(out_dir, "left_envelope.shp")
    shp2 = os.path.join(out_dir, "right_envelope.shp")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.left_envelope_tag] = shp1
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.right_envelope_tag] = shp2
    preprocessing.image_envelope(img1, shp1, srtm_dir)
    preprocessing.image_envelope(img2, shp2, srtm_dir)

    poly1, epsg1 = utils.read_vector(shp1)
    poly2, epsg2 = utils.read_vector(shp2)

    inter_poly, (inter_xmin, inter_ymin, inter_xmax, inter_ymax) = \
        tiling.ground_polygon_from_envelopes(poly1, poly2, epsg1, epsg2, epsg1)

    out_envelopes_intersection = os.path.join(out_dir,
                                              'envelopes_intersection.gpkg')
    utils.write_vector([inter_poly], out_envelopes_intersection, epsg1)

    conf_out_dict = out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag]
    conf_out_dict[
        params.envelopes_intersection_tag] = out_envelopes_intersection
    conf_out_dict[params.envelopes_intersection_bb_tag] = [
        inter_xmin, inter_ymin, inter_xmax, inter_ymax
    ]

    if check_inputs:
        logging.info('Checking DEM coverage')
        dem_useful_polygon, dem_coverage = projection.compute_dem_intersection_with_poly(
            srtm_dir, inter_poly, epsg1)

        if dem_coverage < 100.0:
            logging.warning(
                'The input DEM covers {}% of the useful zone'.format(
                    int(dem_coverage)))

    # Generate rectification grids
    logging.info("Generating epipolar rectification grid ...")
    grid1, grid2, epipolar_size_x, epipolar_size_y, alt_to_disp_ratio, stereogrid_pipeline = pipelines.build_stereorectification_grid_pipeline(
        img1, img2, srtm_dir, epi_step)
    # we want disp_to_alt_ratio = resolution/(B/H), in m.pixel^-1
    disp_to_alt_ratio = 1 / alt_to_disp_ratio

    # Export grids to numpy
    left_grid_as_array = np.copy(
        stereogrid_pipeline["stereo_app"].GetVectorImageAsNumpyArray(
            "io.outleft"))
    right_grid_as_array = np.copy(
        stereogrid_pipeline["stereo_app"].GetVectorImageAsNumpyArray(
            "io.outright"))
    grid_origin = stereogrid_pipeline["stereo_app"].GetImageOrigin(
        "io.outleft")
    grid_spacing = stereogrid_pipeline["stereo_app"].GetImageSpacing(
        "io.outleft")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_size_x_tag] = epipolar_size_x
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_size_y_tag] = epipolar_size_y
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_origin_x_tag] = grid_origin[0]
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_origin_y_tag] = grid_origin[1]
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_spacing_x_tag] = grid_spacing[0]
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.epipolar_spacing_y_tag] = grid_spacing[1]
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.disp_to_alt_ratio_tag] = disp_to_alt_ratio

    logging.info("Size of epipolar images: {}x{} pixels".format(
        epipolar_size_x, epipolar_size_y))
    logging.info(
        "Disparity to altitude factor: {} m/pixel".format(disp_to_alt_ratio))

    logging.info("Sparse matching ...")
    nb_threads = int(os.environ.get('OMP_NUM_THREADS', '1'))

    # Compute the full range needed for sparse matching
    disp_lower_bound = elevation_delta_lower_bound / disp_to_alt_ratio
    disp_upper_bound = elevation_delta_upper_bound / disp_to_alt_ratio

    disparity_range_width = disp_upper_bound - disp_lower_bound
    logging.info(
        "Full disparity range width for sparse matching: {} pixels".format(
            disparity_range_width))
    disparity_range_center = (elevation_delta_upper_bound +
                              elevation_delta_lower_bound) / (
                                  2 * disp_to_alt_ratio)

    # Compute the number of offsets to consider in order to explore the full range
    nb_splits = 1 + int(math.floor(float(disparity_range_width) / region_size))
    actual_region_size = int(
        math.ceil((region_size + disparity_range_width) / nb_splits))
    actual_range = nb_splits * actual_region_size
    actual_range_start = disparity_range_center - actual_range / 2 + region_size / 2
    logging.info(
        "Disparity range will be explored in {} regions of size {}, starting at {} pixels"
        .format(nb_splits, actual_region_size, actual_range_start))

    regions = tiling.split(0, 0, epipolar_size_x, epipolar_size_y, region_size,
                           region_size)

    logging.info("Number of splits to process for sparse matching: {}".format(
        len(regions)))

    cluster = None
    client = None

    # TODO: prepare mp mode
    # Use dask
    use_dask = {"local_dask": True, "pbs_dask": True}
    if mode not in use_dask.keys():
        raise NotImplementedError('{} mode is not implemented'.format(mode))

    if mode == "local_dask":
        cluster, client = start_local_cluster(nb_workers)
    else:
        cluster, client = start_cluster(nb_workers, walltime, out_dir)

    # Write temporary grid
    tmp1 = os.path.join(out_dir, "tmp1.tif")
    preprocessing.write_grid(left_grid_as_array, tmp1, grid_origin,
                             grid_spacing)
    tmp2 = os.path.join(out_dir, "tmp2.tif")
    preprocessing.write_grid(right_grid_as_array, tmp2, grid_origin,
                             grid_spacing)

    # Compute margins for right region
    margins = [
        int(
            math.floor(epipolar_error_upper_bound +
                       epipolar_error_maximum_bias)),
        int(
            math.floor(epipolar_error_upper_bound +
                       epipolar_error_maximum_bias)),
        int(
            math.floor(epipolar_error_upper_bound +
                       epipolar_error_maximum_bias)),
        int(math.ceil(epipolar_error_upper_bound +
                      epipolar_error_maximum_bias))
    ]

    logging.info(
        "Margins added to right region for matching: {}".format(margins))

    # Matching tasks as delayed objects
    delayed_matches = []
    for left_region in regions:
        for offset in range(nb_splits):
            offset_ = actual_range_start + offset * actual_region_size
            # Pad region to include margins for right image
            right_region = [
                left_region[0] + offset_, left_region[1],
                left_region[0] + offset_ + actual_region_size, left_region[3]
            ]

            # Pad with margin and crop to largest region
            right_region = tiling.crop(
                tiling.pad(right_region,
                           margins), [0, 0, epipolar_size_x, epipolar_size_y])

            # Avoid empty regions
            if not tiling.empty(right_region):

                delayed_matches.append(
                    dask.delayed(matching_wrapper)(left_region, right_region,
                                                   img1, img2, tmp1, tmp2,
                                                   mask1, mask2, nodata1,
                                                   nodata2, epipolar_size_x,
                                                   epipolar_size_y))

    # Transform delayed tasks to future
    logging.info("Submitting {} tasks to dask".format(len(delayed_matches)))
    future_matches = client.compute(delayed_matches)

    # Initialize output matches array
    matches = np.empty((0, 4))

    # Wait for all matching tasks to be completed
    for future, result in tqdm(as_completed(future_matches, with_results=True),
                               total=len(future_matches),
                               desc="Performing matching ..."):
        matches = np.concatenate((matches, result))

    raw_nb_matches = matches.shape[0]

    logging.info(
        "Raw number of matches found: {} matches".format(raw_nb_matches))

    # Export matches
    logging.info("Writing raw matches file")
    raw_matches_array_path = os.path.join(out_dir, "raw_matches.npy")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.raw_matches_tag] = raw_matches_array_path
    np.save(raw_matches_array_path, matches)

    # Filter matches that are out of margin
    if epipolar_error_maximum_bias == 0:
        epipolar_median_shift = 0
    else:
        epipolar_median_shift = np.median(matches[:, 3] - matches[:, 1])

    matches = matches[((matches[:, 3] - matches[:, 1]) -
                       epipolar_median_shift) >= -epipolar_error_upper_bound]
    matches = matches[((matches[:, 3] - matches[:, 1]) -
                       epipolar_median_shift) <= epipolar_error_upper_bound]

    matches_discarded_message = "{} matches discarded because their epipolar error is greater \
than --epipolar_error_upper_bound = {} pix".format(
        raw_nb_matches - matches.shape[0], epipolar_error_upper_bound)

    if epipolar_error_maximum_bias != 0:
        matches_discarded_message += " considering a shift of {} pix".format(
            epipolar_median_shift)

    logging.info(matches_discarded_message)

    filtered_nb_matches = matches.shape[0]

    matches = matches[matches[:, 2] - matches[:, 0] >= disp_lower_bound]
    matches = matches[matches[:, 2] - matches[:, 0] <= disp_upper_bound]

    logging.info(
        "{} matches discarded because they fall outside of disparity range defined by --elevation_delta_lower_bound = {} m and --elevation_delta_upper_bound = {} m : [{} pix., {} pix.]"
        .format(filtered_nb_matches - matches.shape[0],
                elevation_delta_lower_bound, elevation_delta_upper_bound,
                disp_lower_bound, disp_upper_bound))

    # Retrieve number of matches
    nb_matches = matches.shape[0]

    # Check if we have enough matches
    # TODO: we could also make it a warning and continue with uncorrected grid
    # and default disparity range
    if nb_matches < 100:
        logging.critical(
            "Insufficient amount of matches found (< 100), can not safely estimate epipolar error correction and disparity range"
        )
        # stop cluster
        stop_cluster(cluster, client)
        # Exit immediately
        return

    logging.info(
        "Number of matches kept for epipolar error correction: {} matches".
        format(nb_matches))

    # Remove temporary files
    os.remove(tmp1)
    os.remove(tmp2)

    # Compute epipolar error
    epipolar_error = matches[:, 1] - matches[:, 3]
    logging.info(
        "Epipolar error before correction: mean = {:.3f} pix., standard deviation = {:.3f} pix., max = {:.3f} pix."
        .format(np.mean(epipolar_error), np.std(epipolar_error),
                np.max(np.fabs(epipolar_error))))

    # Commpute correction for right grid
    logging.info("Generating correction for right epipolar grid ...")
    corrected_right_grid, corrected_matches, in_stats, out_stats = preprocessing.correct_right_grid(
        matches, right_grid_as_array, grid_origin, grid_spacing)

    corrected_epipolar_error = corrected_matches[:, 1] - corrected_matches[:,
                                                                           3]

    logging.info(
        "Epipolar error after correction: mean = {:.3f} pix., standard deviation = {:.3f} pix., max = {:.3f} pix."
        .format(np.mean(corrected_epipolar_error),
                np.std(corrected_epipolar_error),
                np.max(np.fabs(corrected_epipolar_error))))

    # TODO: add stats in content.json

    out_left_grid = os.path.join(out_dir, "left_epipolar_grid.tif")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.left_epipolar_grid_tag] = out_left_grid
    preprocessing.write_grid(left_grid_as_array, out_left_grid, grid_origin,
                             grid_spacing)

    # Export corrected right grid
    out_right_grid = os.path.join(out_dir, "right_epipolar_grid.tif")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.right_epipolar_grid_tag] = out_right_grid
    preprocessing.write_grid(corrected_right_grid, out_right_grid, grid_origin,
                             grid_spacing)

    # Export uncorrected right grid
    logging.info("Writing uncorrected right grid")
    out_right_grid_uncorrected = os.path.join(
        out_dir, "right_epipolar_grid_uncorrected.tif")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.
            right_epipolar_uncorrected_grid_tag] = out_right_grid_uncorrected
    preprocessing.write_grid(right_grid_as_array, out_right_grid_uncorrected,
                             grid_origin, grid_spacing)

    # Compute the disparity range (we filter matches that are too off epipolar
    # lins after correction)
    corrected_std = np.std(corrected_epipolar_error)

    corrected_matches = corrected_matches[
        np.fabs(corrected_epipolar_error) < 3 * corrected_std]
    logging.info(
        "{} matches discarded because their epipolar error is greater than 3*stdev of epipolar error after correction (3*stddev = {:.3f} pix.)"
        .format(nb_matches - corrected_matches.shape[0], 3 * corrected_std))

    logging.info(
        "Number of matches kept for disparity range estimation: {} matches".
        format(corrected_matches.shape[0]))

    dmin, dmax = preprocessing.compute_disparity_range(
        corrected_matches,
        static_cfg.get_disparity_outliers_rejection_percent())
    margin = abs(dmax - dmin) * disparity_margin
    dmin -= margin
    dmax += margin
    logging.info(
        "Disparity range with margin: [{:.3f} pix., {:.3f} pix.] (margin = {:.3f} pix.)"
        .format(dmin, dmax, margin))
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.minimum_disparity_tag] = dmin
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.maximum_disparity_tag] = dmax

    logging.info(
        "Equivalent range in meters: [{:.3f} m, {:.3f} m] (margin = {:.3f} m)".
        format(dmin * disp_to_alt_ratio, dmax * disp_to_alt_ratio,
               margin * disp_to_alt_ratio))

    # Export matches
    logging.info("Writing matches file")
    matches_array_path = os.path.join(out_dir, "matches.npy")
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.matches_tag] = matches_array_path
    np.save(matches_array_path, corrected_matches)

    # Now compute low resolution DSM and its initial DEM counterpart

    # First, triangulate matches
    logging.info("Generating low resolution DSM from matches")
    points_cloud_from_matches = stereo.triangulate_matches(
        out_json, corrected_matches)

    # Then define the size of the lower res DSM to rasterize
    low_res_dsm_params = static_cfg.get_low_res_dsm_params()
    lowres_dsm_resolution = getattr(
        low_res_dsm_params,
        static_cfg.low_res_dsm_resolution_in_degree_tag)  # Value in degree
    lowres_dsm_sizex = int(
        math.ceil((inter_xmax - inter_xmin) / lowres_dsm_resolution))
    lowres_dsm_sizey = int(
        math.ceil((inter_ymax - inter_ymin) / lowres_dsm_resolution))
    lowres_dsm = rasterization.simple_rasterization_dataset(
        [points_cloud_from_matches],
        lowres_dsm_resolution,
        4326,
        color_list=None,
        xstart=inter_xmin,
        ystart=inter_ymax,
        xsize=lowres_dsm_sizex,
        ysize=lowres_dsm_sizey)

    lowres_dsm_file = os.path.join(
        out_dir, "lowres_dsm_from_matches.nc")  # TODO add propoer crs info
    lowres_dsm.to_netcdf(lowres_dsm_file)
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.lowres_dsm_tag] = lowres_dsm_file

    # Now read the exact same grid on initial DEM
    lowres_initial_dem = preprocessing.read_lowres_dem(
        srtm_dir,
        startx=inter_xmin,
        starty=inter_ymax,
        sizex=lowres_dsm_sizex,
        sizey=lowres_dsm_sizey,
        resolution=lowres_dsm_resolution)
    lowres_initial_dem_file = os.path.join(out_dir, "lowres_initial_dem.nc")
    lowres_initial_dem.to_netcdf(lowres_initial_dem_file)
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.lowres_initial_dem_tag] = lowres_initial_dem_file

    # also write the difference
    lowres_elevation_difference_file = os.path.join(
        out_dir, "lowres_elevation_diff.nc")
    lowres_dsm_diff = lowres_initial_dem - lowres_dsm
    (lowres_dsm_diff).to_netcdf(lowres_elevation_difference_file)
    out_json[params.preprocessing_section_tag][
        params.preprocessing_output_section_tag][
            params.
            lowres_elevation_difference_tag] = lowres_elevation_difference_file

    # Now, estimate a correction to align DSM on the lowres initial DEM
    splines = None
    if lowres_dsm_sizex > getattr(low_res_dsm_params, static_cfg.low_res_dsm_min_sizex_for_align_tag) and \
                    lowres_dsm_sizey > getattr(low_res_dsm_params, static_cfg.low_res_dsm_min_sizey_for_align_tag):

        logging.info(
            "Estimating correction between low resolution DSM and initial DEM")

        # First, we estimate direction of acquisition time for both images
        vec1 = preprocessing.get_time_ground_direction(img1, dem=srtm_dir)
        vec2 = preprocessing.get_time_ground_direction(img2, dem=srtm_dir)
        time_direction_vector = (vec1 + vec2) / 2

        display_angle = lambda x: 180 * math.atan2(x[1], x[0]) / math.pi

        logging.info(
            "Time direction average azimuth: {}° (img1: {}°, img2: {}°)".
            format(display_angle(time_direction_vector), display_angle(vec1),
                   display_angle(vec2)))

        origin = [
            float(lowres_dsm_diff.x[0].values),
            float(lowres_dsm_diff.y[0].values)
        ]
        out_json[params.preprocessing_section_tag][
            params.preprocessing_output_section_tag][
                params.time_direction_line_origin_x_tag] = origin[0]
        out_json[params.preprocessing_section_tag][
            params.preprocessing_output_section_tag][
                params.time_direction_line_origin_y_tag] = origin[1]
        out_json[params.preprocessing_section_tag][
            params.preprocessing_output_section_tag][
                params.
                time_direction_line_vector_x_tag] = time_direction_vector[0]
        out_json[params.preprocessing_section_tag][
            params.preprocessing_output_section_tag][
                params.
                time_direction_line_vector_y_tag] = time_direction_vector[1]

        # Then we estimate the correction splines
        splines = preprocessing.lowres_initial_dem_splines_fit(
            lowres_dsm,
            lowres_initial_dem,
            origin,
            time_direction_vector,
            ext=getattr(low_res_dsm_params, static_cfg.low_res_dsm_ext_tag),
            order=getattr(low_res_dsm_params, static_cfg.low_res_dsm_ext_tag))

    else:
        logging.warning(
            "Low resolution DSM is not large enough (minimum size is 100x100) to estimate correction to fit initial DEM, skipping ..."
        )

    if splines is not None:
        # Save model to file
        lowres_dem_splines_fit_file = os.path.join(
            out_dir, "lowres_dem_splines_fit.pck")
        with open(lowres_dem_splines_fit_file, 'wb') as f:
            pickle.dump(splines, f)
            out_json[params.preprocessing_section_tag][
                params.preprocessing_output_section_tag][
                    params.
                    lowres_dem_splines_fit_tag] = lowres_dem_splines_fit_file

            logging.info(
                "Generating corrected low resolution DSM from matches")

            # Estimate correction on point cloud from matches
            points_cloud_from_matches_z_correction = splines(
                preprocessing.project_coordinates_on_line(
                    points_cloud_from_matches.x, points_cloud_from_matches.y,
                    origin, time_direction_vector))

            # Estimate disparity correction
            points_cloud_disp_correction = points_cloud_from_matches_z_correction / disp_to_alt_ratio

            # Correct matches disparity
            z_corrected_matches = corrected_matches
            z_corrected_matches[:,
                                2] = z_corrected_matches[:,
                                                         2] - points_cloud_disp_correction[:,
                                                                                           0]

            # Triangulate and rasterize again
            corrected_points_cloud_from_matches = stereo.triangulate_matches(
                out_json, z_corrected_matches)

            corrected_lowres_dsm = rasterization.simple_rasterization_dataset(
                [corrected_points_cloud_from_matches],
                lowres_dsm_resolution,
                corrected_points_cloud_from_matches.attrs['epsg'],
                xstart=inter_xmin,
                ystart=inter_ymax,
                xsize=lowres_dsm_sizex,
                ysize=lowres_dsm_sizey)

            # Write corrected lowres dsm
            corrected_lowres_dsm_file = os.path.join(
                out_dir, "corrected_lowres_dsm_from_matches.nc"
            )  # TODO add propoer crs info
            corrected_lowres_dsm.to_netcdf(corrected_lowres_dsm_file)
            out_json[params.preprocessing_section_tag][
                params.preprocessing_output_section_tag][
                    params.
                    corrected_lowres_dsm_tag] = corrected_lowres_dsm_file

            # also write the difference
            corrected_lowres_elevation_difference_file = os.path.join(
                out_dir, "corrected_lowres_elevation_diff.nc")
            corrected_lowres_dsm_diff = lowres_initial_dem - corrected_lowres_dsm
            (corrected_lowres_dsm_diff
             ).to_netcdf(corrected_lowres_elevation_difference_file)
            out_json[params.preprocessing_section_tag][
                params.preprocessing_output_section_tag][
                    params.
                    corrected_lowres_elevation_difference_tag] = corrected_lowres_elevation_difference_file

    # Write the output json
    try:
        utils.check_json(out_json, params.preprocessing_content_schema)
    except CheckerError as e:
        logging.warning(
            "content.json does not comply with schema: {}".format(e))

    out_json_path = os.path.join(out_dir, "content.json")
    params.write_preprocessing_content_file(out_json, out_json_path)

    # stop cluster
    stop_cluster(cluster, client)
예제 #6
0
파일: cars_cli.py 프로젝트: zhhongsh/cars
def parse_roi_argument(roi_args, stop_now):
    """
    Parse ROI argument

    :param roi_args: ROI argument
    :type region: str or array of four numbers
    :param stop_now: Argument check
    :type stop_now: Boolean
    :return: ROI (Bounds, EPSG code)
    :rtype: Tuple with array of 4 floats and int
    """
    import logging
    import rasterio
    from cars import utils

    roi = None
    if roi_args is not None:
        if len(roi_args) == 1:
            # in case the input is a string
            if isinstance(roi_args[0], str):
                roi_file = roi_args[0]
                name, extension = os.path.splitext(roi_file)

                # test file existence
                if not os.path.exists(roi_file):
                    logging.warning('{} does not exist'.format(roi_file))
                    stop_now = True

                # if it is a vector file
                if extension in ['.gpkg', '.shp', '.kml']:
                    try:
                        roi_poly, roi_epsg = utils.read_vector(roi_file)
                        roi = (roi_poly.bounds, roi_epsg)
                    except BaseException:
                        logging.critical(
                            'Impossible to read {} file'.format(roi_args))
                        stop_now = True

                # if not, it is an image
                elif utils.rasterio_can_open(roi_file):
                    data = rasterio.open(roi_file)
                    xmin = min(data.bounds.left, data.bounds.right)
                    ymin = min(data.bounds.bottom, data.bounds.top)
                    xmax = max(data.bounds.left, data.bounds.right)
                    ymax = max(data.bounds.bottom, data.bounds.top)

                    try:
                        roi_epsg = data.crs.to_epsg()
                        roi = ([xmin, ymin, xmax, ymax], roi_epsg)
                    except AttributeError as e:
                        logging.critical(
                            'Impossible to read the ROI image epsg code: {}'.
                            format(e))
                        stop_now = True

                else:
                    logging.critical(
                        '{} has an unsupported file format'.format(roi_args))
                    stop_now = True

        elif len(roi_args) == 4:
            # in case the input has a [xmin, ymin, xmax, ymax] ROI
            try:
                roi = ([float(elt) for elt in roi_args], None)
            except BaseException:
                logging.critical('Cannot parse {} argument'.format(roi_args))
                stop_now = True
            logging.warning('Input ROI shall be in final projection')
        else:
            logging.critical('--roi is not set properly')
            stop_now = True
    return roi, stop_now
예제 #7
0
def run(in_jsons: List[params.preprocessing_content_type],
        out_dir: str,
        resolution: float = 0.5,
        min_elevation_offset: float = None,
        max_elevation_offset: float = None,
        epsg: int = None,
        sigma: float = None,
        dsm_radius: int = 1,
        dsm_no_data: int = -32768,
        color_no_data: int = 0,
        corr_config: Dict = None,
        output_stats: bool = False,
        mode: str = "local_dask",
        nb_workers: int = 4,
        walltime: str = "00:59:00",
        roi: Tuple[List[int], int] = None,
        use_geoid_alt: bool = False,
        use_sec_disp: bool = False,
        snap_to_img1: bool = False,
        align: bool = False,
        cloud_small_components_filter: bool = True,
        cloud_statistical_outliers_filter: bool = True,
        epi_tile_size: int = None):
    """
    Main function for the compute_dsm subcommand

    This function will compute independent tiles of the final DSM, with the following steps:

    1. Epipolar resampling (including mask)
    2. Disparity map estimation
    3. Triangulation of disparity map
    4. Rasterization to DSM

    :param in_jsons: dictionaries describing the input pair (as produced by cars_preproc tool)
    :param out_dir: directory where output raster and color images will be written
    :param resolution: resolution of DSM to produce
    :param min_elevation_offset: Override minimum disparity from prepare step with this offset in meters
    :param max_elevation_offset: Override maximum disparity from prepare step with this offset in meters
    :param epsg: epsg code for the CRS of the output DSM
    :param sigma: width of gaussian weight for rasterization
    :param dsm_radius: Radius around a cell for gathering points for rasterization
    :param dsm_no_data: No data value to use in the final DSM file
    :param color_no_data: No data value to use in the final colored image
    :param corr_config: Correlator configuration
    :param output_stats: flag, if true, outputs dsm as a geotiff file with quality statistics.
    :param mode: Parallelization mode
    :param nb_workers: Number of dask workers to use for the sift matching step
    :param walltime: Walltime of the dask workers
    :param roi: DSM ROI in final projection with the corresponding epsg code ([xmin, ymin, xmax, ymax], roi_epsg))
    (roi_epsg can be set to None if the ROI is in final projection)
    :param use_geoid_alt: Wheter altitude should be computed wrt geoid height or not.
    :param use_sec_disp: Boolean activating the use of the secondary disparity map
    :param snap_to_img1: If this is True, Lines of Sight of img2 are moved so as to cross those of img1
    :param align: If this is True, use the correction estimated during prepare to align to lowres DEM (if available)
    :param cloud_small_components_filter: Boolean activating the points cloud small components filtering. The filter's
    parameters are set in the static configuration json.
    :param cloud_statistical_outliers_filter: Boolean activating the points cloud statistical outliers filtering.
    The filter's parameters are set in the static configuration json.
    :param epi_tile_size: Force the size of epipolar tiles (None by default)
    """
    out_dir = os.path.abspath(out_dir)
    # Ensure that outdir exists
    try:
        os.makedirs(out_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(out_dir):
            pass
        else:
            raise
    tmp_dir = os.path.join(out_dir, 'tmp')

    utils.add_log_file(out_dir, 'compute_dsm')
    logging.info("Received {} stereo pairs configurations".format(
        len(in_jsons)))

    # Retrieve static parameters (rasterization and cloud filtering)
    static_params = static_cfg.get_cfg()

    # Initiate ouptut json dictionary
    out_json = {
        params.stereo_inputs_section_tag: [],
        params.stereo_section_tag: {
            params.stereo_version_tag: utils.get_version(),
            params.stereo_parameters_section_tag: {
                params.resolution_tag: resolution,
                params.sigma_tag: sigma,
                params.dsm_radius_tag: dsm_radius
            },
            params.static_params_tag:
            static_params[static_cfg.compute_dsm_tag],
            params.stereo_output_section_tag: {}
        }
    }

    if use_geoid_alt:
        geoid_data = utils.read_geoid_file()
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.alt_reference_tag] = 'geoid'
    else:
        geoid_data = None
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.alt_reference_tag] = 'ellipsoid'

    if epsg is not None:
        out_json[params.stereo_section_tag][
            params.stereo_parameters_section_tag][params.epsg_tag] = epsg

    roi_epsg = None
    if roi is not None:
        (roi_xmin, roi_ymin, roi_xmax, roi_ymax), roi_epsg = roi
        roi_poly = Polygon([(roi_xmin, roi_ymin), (roi_xmax, roi_ymin),
                            (roi_xmax, roi_ymax), (roi_xmin, roi_ymax),
                            (roi_xmin, roi_ymin)])

    # set the timeout for each job in multiprocessing mode (in seconds)
    perJobTimeout = 600

    configurations_data = {}

    config_idx = 1

    ref_left_image = None

    for in_json in in_jsons:
        # Build config id
        config_id = "config_{}".format(config_idx)

        # Check configuration with respect to schema
        configuration = utils.check_json(in_json,
                                         params.preprocessing_content_schema)

        preprocessing_output_config = configuration[
            params.preprocessing_section_tag][
                params.preprocessing_output_section_tag]

        # Append input configuration to output json
        out_json[params.stereo_inputs_section_tag].append(configuration)

        configurations_data[config_id] = {}

        configurations_data[config_id]['configuration'] = configuration

        # Check left image and raise a warning if different left images are used along with snap_to_img1 mpode
        if ref_left_image is None:
            ref_left_image = configuration[params.input_section_tag][
                params.img1_tag]
        else:
            if snap_to_img1 and ref_left_image != configuration[
                    params.input_section_tag][params.img1_tag]:
                logging.warning(
                    "--snap_to_left_image mode is used but input configurations have different images as their left image in pair. This may result in increasing registration discrepencies between pairs"
                )

        # Get largest epipolar regions from configuration file
        largest_epipolar_region = [
            0, 0, preprocessing_output_config[params.epipolar_size_x_tag],
            preprocessing_output_config[params.epipolar_size_y_tag]
        ]

        configurations_data[config_id][
            'largest_epipolar_region'] = largest_epipolar_region

        disp_min = preprocessing_output_config[params.minimum_disparity_tag]
        disp_max = preprocessing_output_config[params.maximum_disparity_tag]
        disp_to_alt_ratio = preprocessing_output_config[
            params.disp_to_alt_ratio_tag]

        # Check if we need to override disp_min
        if min_elevation_offset is not None:
            user_disp_min = min_elevation_offset / disp_to_alt_ratio
            if user_disp_min > disp_min:
                logging.warning((
                    'Overriden disparity minimum = {:.3f} pix. (or {:.3f} m.) is greater '
                    'than disparity minimum estimated in prepare step = {:.3f} pix. (or '
                    '{:.3f} m.) for configuration {}').format(
                        user_disp_min, min_elevation_offset, disp_min,
                        disp_min * disp_to_alt_ratio, config_id))
                disp_min = user_disp_min

        # Check if we need to override disp_max
        if max_elevation_offset is not None:
            user_disp_max = max_elevation_offset / disp_to_alt_ratio
            if user_disp_max < disp_max:
                logging.warning((
                    'Overriden disparity maximum = {:.3f} pix. (or {:.3f} m.) is lower '
                    'than disparity maximum estimated in prepare step = {:.3f} pix. (or '
                    '{:.3f} m.) for configuration {}').format(
                        user_disp_max, max_elevation_offset, disp_max,
                        disp_max * disp_to_alt_ratio, config_id))
            disp_max = user_disp_max

        logging.info(
            'Disparity range for config {}: [{:.3f} pix., {:.3f} pix.] (or [{:.3f} m., {:.3f} m.])'
            .format(config_id, disp_min, disp_max,
                    disp_min * disp_to_alt_ratio,
                    disp_max * disp_to_alt_ratio))

        configurations_data[config_id]['disp_min'] = disp_min
        configurations_data[config_id]['disp_max'] = disp_max

        origin = [
            preprocessing_output_config[params.epipolar_origin_x_tag],
            preprocessing_output_config[params.epipolar_origin_y_tag]
        ]
        spacing = [
            preprocessing_output_config[params.epipolar_spacing_x_tag],
            preprocessing_output_config[params.epipolar_spacing_y_tag]
        ]

        configurations_data[config_id]['origin'] = origin
        configurations_data[config_id]['spacing'] = spacing

        logging.info(
            "Size of epipolar image: {}".format(largest_epipolar_region))
        logging.debug("Origin of epipolar grid: {}".format(origin))
        logging.debug("Spacing of epipolar grid: {}".format(spacing))

        # Warning if align is set but correction is missing
        if align and params.lowres_dem_splines_fit_tag not in preprocessing_output_config:
            logging.warning((
                'Align with low resolution DSM option is set but splines correction file '
                'is not available for configuration {}. Correction '
                'will not be applied for this configuration'
            ).format(config_id))

        # Numpy array with corners of largest epipolar region. Order
        # does not matter here, since it will be passed to stereo.compute_epipolar_grid_min_max
        corners = np.array(
            [[[largest_epipolar_region[0], largest_epipolar_region[1]],
              [largest_epipolar_region[0], largest_epipolar_region[3]]],
             [[largest_epipolar_region[2], largest_epipolar_region[3]],
              [largest_epipolar_region[2], largest_epipolar_region[1]]]],
            dtype=np.float64)

        # get utm zone with the middle point of terrain_min if epsg is
        # None
        if epsg is None:
            # Compute terrain position of epipolar image corners for min and max disparity
            terrain_dispmin, terrain_dispmax = stereo.compute_epipolar_grid_min_max(
                corners, 4326, configuration, disp_min, disp_max)
            epsg = rasterization.get_utm_zone_as_epsg_code(
                *np.mean(terrain_dispmin, axis=0))
            logging.info("EPSG code: {}".format(epsg))

        # Compute terrain min and max again, this time using estimated epsg code
        terrain_dispmin, terrain_dispmax = stereo.compute_epipolar_grid_min_max(
            corners, epsg, configuration, disp_min, disp_max)

        if roi_epsg is not None:
            if roi_epsg != epsg:
                roi_poly = projection.polygon_projection(
                    roi_poly, roi_epsg, epsg)

        # Compute bounds from epipolar image corners and dispmin/dispmax
        terrain_bounds = np.stack((terrain_dispmin, terrain_dispmax), axis=0)
        terrain_min = np.amin(terrain_bounds, axis=(0, 1))
        terrain_max = np.amax(terrain_bounds, axis=(0, 1))

        terrain_area = (terrain_max[0] - terrain_min[0]) * (terrain_max[1] -
                                                            terrain_min[1])

        configurations_data[config_id]['terrain_area'] = terrain_area

        logging.info(
            "Terrain area covered: {} square meters (or square degrees)".
            format(terrain_area))

        # Retrieve bounding box of the ground intersection of the envelopes
        inter_poly, inter_epsg = utils.read_vector(
            preprocessing_output_config[params.envelopes_intersection_tag])

        if epsg != inter_epsg:
            inter_poly = projection.polygon_projection(inter_poly, inter_epsg,
                                                       epsg)

        (inter_xmin, inter_ymin, inter_xmax, inter_ymax) = inter_poly.bounds

        # Align bounding box to integer resolution steps
        xmin, ymin, xmax, ymax = tiling.snap_to_grid(inter_xmin, inter_ymin,
                                                     inter_xmax, inter_ymax,
                                                     resolution)

        logging.info("Terrain bounding box : [{}, {}] x [{}, {}]".format(
            xmin, xmax, ymin, ymax))

        configurations_data[config_id]['terrain_bounding_box'] = [
            xmin, ymin, xmax, ymax
        ]

        if roi is not None:
            if not roi_poly.intersects(inter_poly):
                logging.warning(
                    "The pair composed of {} and {} does not intersect the requested ROI"
                    .format(
                        configuration[params.input_section_tag][
                            params.img1_tag], configuration[
                                params.input_section_tag][params.img2_tag]))

        # Get optimal tile size
        if epi_tile_size is not None:
            opt_epipolar_tile_size = epi_tile_size
        else:
            opt_epipolar_tile_size = stereo.optimal_tile_size(
                disp_min, disp_max)
        logging.info(
            "Optimal tile size for epipolar regions: {}x{} pixels".format(
                opt_epipolar_tile_size, opt_epipolar_tile_size))

        configurations_data[config_id][
            'opt_epipolar_tile_size'] = opt_epipolar_tile_size

        # Split epipolar image in pieces
        epipolar_regions = tiling.split(
            0, 0, preprocessing_output_config[params.epipolar_size_x_tag],
            preprocessing_output_config[params.epipolar_size_y_tag],
            opt_epipolar_tile_size, opt_epipolar_tile_size)
        epipolar_regions_grid = tiling.grid(
            0, 0, preprocessing_output_config[params.epipolar_size_x_tag],
            preprocessing_output_config[params.epipolar_size_y_tag],
            opt_epipolar_tile_size, opt_epipolar_tile_size)

        configurations_data[config_id]['epipolar_regions'] = epipolar_regions
        configurations_data[config_id][
            'epipolar_regions_grid'] = epipolar_regions_grid

        logging.info("Epipolar image will be processed in {} splits".format(
            len(epipolar_regions)))

        # Increment config index
        config_idx += 1

    xmin, ymin, xmax, ymax = tiling.union([
        conf['terrain_bounding_box']
        for config_id, conf in configurations_data.items()
    ])

    if roi is not None:
        # terrain bounding box polygon
        terrain_poly = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax),
                                (xmin, ymax), (xmin, ymin)])

        if not roi_poly.intersects(terrain_poly):
            raise Exception(
                'None of the input pairs intersect the requested ROI')
        else:
            logging.info('Setting terrain bounding box to the requested ROI')
            xmin, ymin, xmax, ymax = roi_poly.bounds
            xmin, ymin, xmax, ymax = tiling.snap_to_grid(
                xmin, ymin, xmax, ymax, resolution)

    logging.info("Total terrain bounding box : [{}, {}] x [{}, {}]".format(
        xmin, xmax, ymin, ymax))

    # Compute optimal terrain tile size
    optimal_terrain_tile_widths = []

    for config_id, conf in configurations_data.items():
        # Compute terrain area covered by a single epipolar tile
        terrain_area_covered_by_epipolar_tile = conf["terrain_area"] / len(
            epipolar_regions)

        # Compute tile width in pixels
        optimal_terrain_tile_widths.append(
            math.sqrt(terrain_area_covered_by_epipolar_tile))

    # In case of multiple json configuration, take the average optimal size,
    # and align to multiple of resolution
    optimal_terrain_tile_width = int(
        math.ceil(
            np.mean(optimal_terrain_tile_widths) / resolution)) * resolution

    logging.info("Optimal terrain tile size: {}x{} pixels".format(
        int(optimal_terrain_tile_width / resolution),
        int(optimal_terrain_tile_width / resolution)))

    # Split terrain bounding box in pieces
    terrain_grid = tiling.grid(xmin, ymin, xmax, ymax,
                               optimal_terrain_tile_width,
                               optimal_terrain_tile_width)
    number_of_terrain_splits = (terrain_grid.shape[0] -
                                1) * (terrain_grid.shape[1] - 1)

    logging.info("Terrain bounding box will be processed in {} splits".format(
        number_of_terrain_splits))

    # Start dask cluster
    cluster = None
    client = None

    # Use dask
    use_dask = {"local_dask": True, "pbs_dask": True, "mp": False}
    if mode not in use_dask.keys():
        raise NotImplementedError('{} mode is not implemented'.format(mode))

    if use_dask[mode]:
        if mode == "local_dask":
            cluster, client = start_local_cluster(nb_workers)
        else:
            cluster, client = start_cluster(nb_workers, walltime, out_dir)

        # Add plugin to monitor memory of workers
        plugin = ComputeDSMMemoryLogger(out_dir)
        client.register_worker_plugin(plugin)

        geoid_data_futures = None
        if geoid_data is not None:
            # Broadcast geoid data to all dask workers
            geoid_data_futures = client.scatter(geoid_data, broadcast=True)

    # Retrieve the epsg code which will be used for the triangulation's output points clouds
    # (ecef if filters are activated)
    if cloud_small_components_filter or cloud_statistical_outliers_filter:
        stereo_out_epsg = 4978
    else:
        stereo_out_epsg = epsg

    # Submit all epipolar regions to be processed as delayed tasks, and
    # project terrain grid to epipolar
    for config_id, conf in configurations_data.items():
        # This list will hold the different epipolar tiles to be
        # processed as points cloud
        delayed_point_clouds = []

        if use_dask[mode]:
            # Use Dask delayed
            for region in conf['epipolar_regions']:
                delayed_point_clouds.append(
                    dask.delayed(stereo.images_pair_to_3d_points)(
                        conf['configuration'],
                        region,
                        corr_config,
                        disp_min=conf['disp_min'],
                        disp_max=conf['disp_max'],
                        geoid_data=geoid_data_futures,
                        out_epsg=stereo_out_epsg,
                        use_sec_disp=use_sec_disp,
                        snap_to_img1=snap_to_img1,
                        align=align))
            logging.info(
                "Submitted {} epipolar delayed tasks to dask for stereo configuration {}"
                .format(len(delayed_point_clouds), config_id))
        else:
            # Use multiprocessing module

            # create progress bar with an update callback
            pbar = tqdm(total=len(conf['epipolar_regions']))

            def update(args):
                pbar.update()

            # create a thread pool
            pool = multiprocessing.Pool(nb_workers)

            # launch several 'write_3d_points()' to process each epipolar region
            for region in conf['epipolar_regions']:
                delayed_point_clouds.append(
                    pool.apply_async(write_3d_points,
                                     args=(conf['configuration'], region,
                                           corr_config, tmp_dir, config_id),
                                     kwds={
                                         'disp_min': conf['disp_min'],
                                         'disp_max': conf['disp_max'],
                                         'geoid_data': geoid_data,
                                         'out_epsg': stereo_out_epsg,
                                         'use_sec_disp': use_sec_disp
                                     },
                                     callback=update))

            # Wait computation results (timeout in seconds) and replace the
            # async objects by the actual output of write_3d_points(), meaning
            # the paths to cloud files
            delayed_point_clouds = [
                delayed_pc.get(timeout=perJobTimeout)
                for delayed_pc in delayed_point_clouds
            ]

            # closing thread pool when computation is done
            pool.close()
            pool.join()

        configurations_data[config_id][
            'delayed_point_clouds'] = delayed_point_clouds

        # build list of epipolar region hashes
        configurations_data[config_id]['epipolar_regions_hash'] = [
            region_hash_string(k) for k in conf['epipolar_regions']
        ]

        # Compute disp_min and disp_max location for epipolar grid
        epipolar_grid_min, epipolar_grid_max = stereo.compute_epipolar_grid_min_max(
            conf['epipolar_regions_grid'], epsg, conf["configuration"],
            conf['disp_min'], conf['disp_max'])

        epipolar_regions_grid_flat = conf['epipolar_regions_grid'].reshape(
            -1, conf['epipolar_regions_grid'].shape[-1])

        # in the following code a factor is used to increase the precision
        spatial_ref = osr.SpatialReference()
        spatial_ref.ImportFromEPSG(epsg)
        if spatial_ref.IsGeographic():
            precision_factor = 1000.0
        else:
            precision_factor = 1.0

        # Build delaunay_triangulation
        delaunay_min = Delaunay(epipolar_grid_min * precision_factor)
        delaunay_max = Delaunay(epipolar_grid_max * precision_factor)

        # Build kdtrees
        tree_min = cKDTree(epipolar_grid_min * precision_factor)
        tree_max = cKDTree(epipolar_grid_max * precision_factor)

        # Look-up terrain_grid with Delaunay
        s_min = tsearch(delaunay_min, terrain_grid * precision_factor)
        s_max = tsearch(delaunay_max, terrain_grid * precision_factor)

        points_disp_min = epipolar_regions_grid_flat[
            delaunay_min.simplices[s_min]]
        points_disp_max = epipolar_regions_grid_flat[
            delaunay_max.simplices[s_max]]
        nn_disp_min = epipolar_regions_grid_flat[tree_min.query(
            terrain_grid * precision_factor)[1]]
        nn_disp_max = epipolar_regions_grid_flat[tree_max.query(
            terrain_grid * precision_factor)[1]]

        points_disp_min_min = np.min(points_disp_min, axis=2)
        points_disp_min_max = np.max(points_disp_min, axis=2)
        points_disp_max_min = np.min(points_disp_max, axis=2)
        points_disp_max_max = np.max(points_disp_max, axis=2)

        # Use either Delaunay search or NN search if delaunay search fails (point outside triangles)
        points_disp_min_min = np.where(
            np.stack((s_min, s_min), axis=-1) != -1, points_disp_min_min,
            nn_disp_min)
        points_disp_min_max = np.where(
            np.stack((s_min, s_min), axis=-1) != -1, points_disp_min_max,
            nn_disp_min)
        points_disp_max_min = np.where(
            np.stack((s_max, s_max), axis=-1) != -1, points_disp_max_min,
            nn_disp_max)
        points_disp_max_max = np.where(
            np.stack((s_max, s_max), axis=-1) != -1, points_disp_max_max,
            nn_disp_max)

        points = np.stack((points_disp_min_min, points_disp_min_max,
                           points_disp_max_min, points_disp_max_max),
                          axis=0)

        points_min = np.min(points, axis=0)
        points_max = np.max(points, axis=0)

        configurations_data[config_id]['epipolar_points_min'] = points_min
        configurations_data[config_id]['epipolar_points_max'] = points_max

    # Retrieve number of bands
    if params.color1_tag in configuration[params.input_section_tag]:
        nb_bands = utils.rasterio_get_nb_bands(
            configuration[params.input_section_tag][params.color1_tag])
    else:
        logging.info(
            'No color image has been given in input, {} will be used as the color image'
            .format(configuration[params.input_section_tag][params.img1_tag]))
        nb_bands = utils.rasterio_get_nb_bands(
            configuration[params.input_section_tag][params.img1_tag])
    logging.info("Number of bands in color image: {}".format(nb_bands))

    rank = []

    # This list will contained the different raster tiles to be written by cars
    delayed_dsm_tiles = []
    number_of_epipolar_tiles_per_terrain_tiles = []

    if not use_dask[mode]:
        # create progress bar with update callback
        pbar = tqdm(
            total=number_of_terrain_splits,
            desc="Finding correspondences between terrain and epipolar tiles")

        def update(args):
            pbar.update()

        # initialize a thread pool for multiprocessing mode
        pool = multiprocessing.Pool(nb_workers)

    # Loop on terrain regions and derive dependency to epipolar regions
    for terrain_region_dix in tqdm(range(number_of_terrain_splits),
                                   total=number_of_terrain_splits,
                                   desc="Delaunay look-up"):

        j = int(terrain_region_dix / (terrain_grid.shape[1] - 1))
        i = terrain_region_dix % (terrain_grid.shape[1] - 1)

        logging.debug("Processing tile located at {},{} in tile grid".format(
            i, j))

        terrain_region = [
            terrain_grid[j, i, 0], terrain_grid[j, i, 1],
            terrain_grid[j + 1, i + 1, 0], terrain_grid[j + 1, i + 1, 1]
        ]

        logging.debug(
            "Corresponding terrain region: {}".format(terrain_region))

        # This list will hold the required points clouds for this terrain tile
        required_point_clouds = []

        # For each stereo configuration
        for config_id, conf in configurations_data.items():

            epipolar_points_min = conf['epipolar_points_min']
            epipolar_points_max = conf['epipolar_points_max']

            tile_min = np.minimum(
                np.minimum(
                    np.minimum(epipolar_points_min[j, i],
                               epipolar_points_min[j + 1, i]),
                    np.minimum(epipolar_points_min[j + 1, i + 1],
                               epipolar_points_min[j, i + 1])),
                np.minimum(
                    np.minimum(epipolar_points_max[j, i],
                               epipolar_points_max[j + 1, i]),
                    np.minimum(epipolar_points_max[j + 1, i + 1],
                               epipolar_points_max[j, i + 1])))

            tile_max = np.maximum(
                np.maximum(
                    np.maximum(epipolar_points_min[j, i],
                               epipolar_points_min[j + 1, i]),
                    np.maximum(epipolar_points_min[j + 1, i + 1],
                               epipolar_points_min[j, i + 1])),
                np.maximum(
                    np.maximum(epipolar_points_max[j, i],
                               epipolar_points_max[j + 1, i]),
                    np.maximum(epipolar_points_max[j + 1, i + 1],
                               epipolar_points_max[j, i + 1])))

            # Bouding region of corresponding cell
            epipolar_region_minx = tile_min[0]
            epipolar_region_miny = tile_min[1]
            epipolar_region_maxx = tile_max[0]
            epipolar_region_maxy = tile_max[1]

            # This mimics the previous code that was using
            # transform_terrain_region_to_epipolar
            epipolar_region = [
                epipolar_region_minx, epipolar_region_miny,
                epipolar_region_maxx, epipolar_region_maxy
            ]

            # Crop epipolar region to largest region
            epipolar_region = tiling.crop(epipolar_region,
                                          conf['largest_epipolar_region'])

            logging.debug(
                "Corresponding epipolar region: {}".format(epipolar_region))

            # Check if the epipolar region contains any pixels to process
            if tiling.empty(epipolar_region):
                logging.debug(
                    "Skipping terrain region because corresponding epipolar region is empty"
                )
            else:

                # Loop on all epipolar tiles covered by epipolar region
                for epipolar_tile in tiling.list_tiles(
                        epipolar_region, conf['largest_epipolar_region'],
                        conf['opt_epipolar_tile_size']):

                    cur_hash = region_hash_string(epipolar_tile)

                    # Look for corresponding hash in delayed point clouds
                    # dictionnary
                    if cur_hash in conf['epipolar_regions_hash']:

                        # If hash can be found, append it to the required
                        # clouds to compute for this terrain tile
                        pos = conf['epipolar_regions_hash'].index(cur_hash)
                        required_point_clouds.append(
                            conf['delayed_point_clouds'][pos])

        # start and size parameters for the rasterization function
        xstart, ystart, xsize, ysize = tiling.roi_to_start_and_size(
            terrain_region, resolution)

        # cloud filtering params
        if cloud_small_components_filter:
            small_cpn_filter_params = static_cfg.get_small_components_filter_params(
            )
        else:
            small_cpn_filter_params = None

        if cloud_statistical_outliers_filter:
            statistical_filter_params = static_cfg.get_statistical_outliers_filter_params(
            )
        else:
            statistical_filter_params = None

        # rasterization grid division factor
        rasterization_params = static_cfg.get_rasterization_params()
        grid_points_division_factor = getattr(
            rasterization_params, static_cfg.grid_points_division_factor_tag)

        if len(required_point_clouds) > 0:
            logging.debug(
                "Number of clouds to process for this terrain tile: {}".format(
                    len(required_point_clouds)))

            if use_dask[mode]:
                # Delayed call to rasterization operations using all required
                # point clouds
                rasterized = dask.delayed(rasterization_wrapper)(
                    required_point_clouds,
                    resolution,
                    epsg,
                    xstart=xstart,
                    ystart=ystart,
                    xsize=xsize,
                    ysize=ysize,
                    radius=dsm_radius,
                    sigma=sigma,
                    dsm_no_data=dsm_no_data,
                    color_no_data=color_no_data,
                    small_cpn_filter_params=small_cpn_filter_params,
                    statistical_filter_params=statistical_filter_params,
                    grid_points_division_factor=grid_points_division_factor)

                # Keep track of delayed raster tiles
                delayed_dsm_tiles.append(rasterized)
                rank.append(i * i + j * j)

            else:
                # Launch asynchrone job for write_dsm_by_tile()
                delayed_dsm_tiles.append(
                    pool.apply_async(
                        write_dsm_by_tile,
                        args=(required_point_clouds, resolution, epsg, tmp_dir,
                              nb_bands, static_cfg.get_color_image_encoding(),
                              output_stats),
                        kwds={
                            'xstart':
                            xstart,
                            'ystart':
                            ystart,
                            'xsize':
                            xsize,
                            'ysize':
                            ysize,
                            'radius':
                            dsm_radius,
                            'sigma':
                            sigma,
                            'dsm_no_data':
                            dsm_no_data,
                            'color_no_data':
                            color_no_data,
                            'small_cpn_filter_params':
                            small_cpn_filter_params,
                            'statistical_filter_params':
                            statistical_filter_params,
                            'grid_points_division_factor':
                            grid_points_division_factor
                        },
                        callback=update))

            number_of_epipolar_tiles_per_terrain_tiles.append(
                len(required_point_clouds))

    logging.info(
        "Average number of epipolar tiles for each terrain tile: {}".format(
            int(np.round(
                np.mean(number_of_epipolar_tiles_per_terrain_tiles)))))
    logging.info(
        "Max number of epipolar tiles for each terrain tile: {}".format(
            np.max(number_of_epipolar_tiles_per_terrain_tiles)))

    bounds = (xmin, ymin, xmax, ymax)
    # Derive output image files parameters to pass to rasterio
    xsize, ysize = tiling.roi_to_start_and_size([xmin, ymin, xmax, ymax],
                                                resolution)[2:]

    out_dsm = os.path.join(out_dir, "dsm.tif")
    out_clr = os.path.join(out_dir, "clr.tif")
    out_dsm_mean = os.path.join(out_dir, "dsm_mean.tif")
    out_dsm_std = os.path.join(out_dir, "dsm_std.tif")
    out_dsm_n_pts = os.path.join(out_dir, "dsm_n_pts.tif")
    out_dsm_points_in_cell = os.path.join(out_dir, "dsm_pts_in_cell.tif")

    if use_dask[mode]:
        # Sort tiles according to rank
        delayed_dsm_tiles = [
            delayed for _, delayed in sorted(zip(rank, delayed_dsm_tiles),
                                             key=lambda pair: pair[0])
        ]

        logging.info("Submitting {} tasks to dask".format(
            len(delayed_dsm_tiles)))
        # Transform all delayed raster tiles to futures (computation starts
        # immediatly on workers, assynchronously)
        future_dsm_tiles = client.compute(delayed_dsm_tiles)

        logging.info("DSM output image size: {}x{} pixels".format(
            xsize, ysize))

        readwrite.write_geotiff_dsm(
            future_dsm_tiles,
            out_dir,
            xsize,
            ysize,
            bounds,
            resolution,
            epsg,
            nb_bands,
            dsm_no_data,
            color_no_data,
            color_dtype=static_cfg.get_color_image_encoding(),
            write_color=True,
            write_stats=output_stats)

        # stop cluster
        stop_cluster(cluster, client)

    else:
        logging.info("Computing DSM tiles ...")
        # Wait for asynchrone jobs (timeout in seconds) and replace them by
        # write_dsm_by_tile() output
        delayed_dsm_tiles = [
            delayed_tile.get(timeout=perJobTimeout)
            for delayed_tile in delayed_dsm_tiles
        ]

        # closing the tread pool after computation
        pool.close()
        pool.join()

        # vrt to tif
        logging.info("Building VRT")
        vrt_options = gdal.BuildVRTOptions(resampleAlg='nearest')

        def vrt_mosaic(tiles_glob, vrt_name, vrt_options, output):
            vrt_file = os.path.join(out_dir, vrt_name)
            tiles_list = glob(os.path.join(out_dir, 'tmp', tiles_glob))
            vrt = gdal.BuildVRT(vrt_file, tiles_list, options=vrt_options)
            vrt = None
            ds = gdal.Open(vrt_file)
            ds = gdal.Translate(output, ds)
            ds = None

        vrt_mosaic('*_dsm.tif', 'dsm.vrt', vrt_options, out_dsm)
        vrt_mosaic('*_clr.tif', 'clr.vrt', vrt_options, out_clr)

        if output_stats:
            vrt_mosaic('*_dsm_mean.tif', 'dsm_mean.vrt', vrt_options,
                       out_dsm_mean)
            vrt_mosaic('*_dsm_std.tif', 'dsm_std.vrt', vrt_options,
                       out_dsm_std)
            vrt_mosaic('*_dsm_n_pts.tif', 'dsm_n_pts.vrt', vrt_options,
                       out_dsm_n_pts)
            vrt_mosaic('*_pts_in_cell.tif', 'dsm_pts_in_cell.vrt', vrt_options,
                       out_dsm_points_in_cell)

    # Fill output json file
    out_json[params.stereo_section_tag][params.stereo_output_section_tag][
        params.epsg_tag] = epsg
    out_json[params.stereo_section_tag][params.stereo_output_section_tag][
        params.dsm_tag] = out_dsm
    out_json[params.stereo_section_tag][params.stereo_output_section_tag][
        params.dsm_no_data_tag] = float(dsm_no_data)
    out_json[params.stereo_section_tag][params.stereo_output_section_tag][
        params.color_no_data_tag] = float(color_no_data)
    out_json[params.stereo_section_tag][params.stereo_output_section_tag][
        params.color_tag] = out_clr

    if output_stats:
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.dsm_mean_tag] = out_dsm_mean
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.dsm_std_tag] = out_dsm_std
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.dsm_n_pts_tag] = out_dsm_n_pts
        out_json[params.stereo_section_tag][params.stereo_output_section_tag][
            params.dsm_points_in_cell_tag] = out_dsm_points_in_cell

    # Write the output json
    out_json_path = os.path.join(out_dir, "content.json")

    try:
        utils.check_json(out_json, params.stereo_content_schema)
    except CheckerError as e:
        logging.warning(
            "content.json does not comply with schema: {}".format(e))

    params.write_stereo_content_file(out_json, out_json_path)