Exemple #1
0
def cloud_water_image_domain(x, y, w, h, rpc, roi_gml=None, cld_gml=None,
                             wat_msk=None):
    """
    Compute a mask for pixels masked by clouds, water, or out of image domain.

    Args:
        x, y, w, h: coordinates of the ROI
        roi_gml (optional): path to a gml file containing a mask
            defining the area contained in the full image
        cld_gml (optional): path to a gml file containing a mask
            defining the areas covered by clouds

    Returns:
        2D array containing the output binary mask. 0 indicate masked pixels, 1
        visible pixels.
    """
    # coefficients of the transformation associated to the crop
    H = common.matrix_translation(-x, -y)
    hij = ' '.join([str(el) for el in H.flatten()])

    mask = np.ones((h, w), dtype=np.bool)

    if roi_gml is not None:  # image domain mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               roi_gml, tmp),
                              shell=True)
        with rasterio.open(tmp, 'r') as f:
            mask = np.logical_and(mask, f.read().squeeze())

    if not mask.any():
        return mask

    if cld_gml is not None:  # cloud mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               cld_gml, tmp),
                              shell=True)
        with rasterio.open(tmp, 'r') as f:
            mask = np.logical_and(mask, ~f.read().squeeze().astype(bool))

    if not mask.any():
        return mask

    if wat_msk is not None:  # water mask (raster)
        x, y, w, h = map(int, (x, y, w, h))
        with rasterio.open(wat_msk, 'r') as f:
            mask = np.logical_and(mask, f.read(window=((y, y+h), (x, x+w))).squeeze())

    return mask
Exemple #2
0
def create_rejection_mask(disp, im1, im2, mask):
    """
    Create rejection mask (0 means rejected, 1 means accepted)
    Keep only the points that are matched and present in both input images

    Args:
        disp: path to the input disparity map
        im1, im2: rectified stereo pair
        mask: path to the output rejection mask
    """
    tmp1 = common.tmpfile('.tif')
    tmp2 = common.tmpfile('.tif')
    common.run(["plambda", disp, "x 0 join", "-o", tmp1])
    common.run(["backflow", tmp1, im2, tmp2])
    common.run(["plambda", disp, im1, tmp2, "x isfinite y isfinite z isfinite and and", "-o", mask])
Exemple #3
0
def height_map_rectified(rpc1,
                         rpc2,
                         H1,
                         H2,
                         disp,
                         mask,
                         height,
                         rpc_err,
                         A=None):
    """
    Computes a height map from a disparity map, using rpc.

    Args:
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        height: path to the output height map
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    if A is not None:
        HH2 = common.tmpfile('.txt')
        np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt(A))))
    else:
        HH2 = H2

    common.run("disp_to_h %s %s %s %s %s %s %s %s" %
               (rpc1, rpc2, H1, HH2, disp, mask, height, rpc_err))
Exemple #4
0
def height_map_to_point_cloud(cloud,
                              heights,
                              rpc,
                              H=None,
                              crop_colorized='',
                              off_x=None,
                              off_y=None,
                              ascii_ply=False,
                              with_normals=False,
                              utm_zone=None,
                              llbbx=None):
    """
    Computes a color point cloud from a height map.

    Args:
        cloud: path to the output points cloud (ply format)
        heights: height map, sampled on the same grid as the crop_colorized
            image. In particular, its size is the same as crop_colorized.
        rpc: instances of the rpcm.RPCModel class
        H (optional, default None): numpy array of size 3x3 defining the
            homography transforming the coordinates system of the original full
            size image into the coordinates system of the crop we are dealing
            with.
        crop_colorized (optional, default ''): path to a colorized crop of a
            Pleiades image
        off_{x,y} (optional, default None): coordinates of the point we want to
            use as origin in the local coordinate system of the computed cloud
        ascii_ply (optional, default false): boolean flag to tell if the output
            ply file should be encoded in plain text (ascii).
        utm_zone (optional, default None):
    """
    # write rpc coefficients to txt file
    rpcfile = common.tmpfile('.txt')
    rpc.write_to_file(rpcfile)

    if not os.path.exists(crop_colorized):
        crop_colorized = ''
    hij = " ".join(str(x) for x in H.flatten()) if H is not None else ""
    command = ["colormesh", cloud, heights, rpcfile, crop_colorized, "-h", hij]
    if ascii_ply:
        command.append("--ascii")
    if with_normals:
        command.append("--with-normals")
    if utm_zone:
        command.extend(["--utm-zone", utm_zone])
    if llbbx:
        lonm, lonM, latm, latM = llbbx
        command.extend([
            "--lon-m", lonm, "--lon-M", lonM, "--lat-m", latm, "--lat-M", latM
        ])
    if off_x:
        command.extend(["--offset_x", "%d" % off_x])
    if off_y:
        command.extend(["--offset_y", "%d" % off_y])
    common.run(command)
Exemple #5
0
def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None,
                    model=None, epipolar_threshold=10):
    """
    Find matches among two lists of sift keypoints.

    Args:
        k1, k2: paths to text files containing the lists of sift descriptors
        method (optional, default is 'relative'): flag ('relative' or
            'absolute') indicating wether to use absolute distance or relative
            distance
        sift_thresh (optional, default is 0.6): threshold for distance between SIFT
            descriptors. These descriptors are 128-vectors, whose coefficients
            range from 0 to 255, thus with absolute distance a reasonable value
            for this threshold is between 200 and 300. With relative distance
            (ie ratio between distance to nearest and distance to second
            nearest), the commonly used value for the threshold is 0.6.
        F (optional): affine fundamental matrix
        model (optional, default is None): model imposed by RANSAC when
            searching the set of inliers. If None all matches are considered as
            inliers.
        epipolar_threshold (optional, default is 10): maximum distance allowed for
            a point to the epipolar line of its match.

    Returns:
        if any, a numpy 2D array containing the list of inliers matches.
    """
    # compute matches
    mfile = common.tmpfile('.txt')
    cmd = "matching %s %s -o %s --sift-threshold %f" % (k1, k2, mfile,
                                                        sift_thresh)
    if method == 'absolute':
        cmd += " --absolute"
    if F is not None:
        fij = ' '.join(str(x) for x in [F[0, 2], F[1, 2], F[2, 0],
                                        F[2, 1], F[2, 2]])
        cmd = "%s -f \"%s\"" % (cmd, fij)
        cmd += " --epipolar-threshold {}".format(epipolar_threshold)
    common.run(cmd)

    matches = np.loadtxt(mfile)
    if matches.ndim == 2:  # filter outliers with ransac
        if model == 'fundamental' and len(matches) >= 7:
            common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
        elif model == 'homography' and len(matches) >= 4:
            common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (mfile,
                                                                            mfile))
        elif model == 'hom_fund' and len(matches) >= 7:
            common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (mfile,
                                                                            mfile))
            common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))

    if os.stat(mfile).st_size > 0:  # return numpy array of matches
        return np.loadtxt(mfile)
Exemple #6
0
def image_keypoints(im, x, y, w, h, max_nb=None, thresh_dog=0.0133, nb_octaves=8, nb_scales=3):
    """
    Runs SIFT (the keypoints detection and description only, no matching).

    It uses Ives Rey Otero's implementation published in IPOL:
    http://www.ipol.im/pub/pre/82/

    Args:
        im: path to the input image
        max_nb (optional): maximal number of keypoints. If more keypoints are
            detected, those at smallest scales are discarded

    Returns:
        path to the file containing the list of descriptors
    """
    # Read file with rasterio
    with rio.open(im) as ds:
        # clip roi to stay inside the image boundaries
        if x < 0:  # if x is negative then replace it with 0 and reduce w
            w += x
            x = 0
        if y < 0:
            h += y
            y = 0
        # if extract not completely inside the full image then resize (w, h)
        w = min(w, ds.width - x)
        h = min(h, ds.height - y)
        in_buffer = ds.read(window=rio.windows.Window(x, y, w, h))

    # Detect keypoints on first band
    keypoints = keypoints_from_nparray(in_buffer[0], thresh_dog=thresh_dog,
                                       nb_octaves=nb_octaves,
                                       nb_scales=nb_scales, offset=(x, y))

    # Limit number of keypoints if needed
    if max_nb is not None:
        keypoints = keypoints[:max_nb]

    keyfile = common.tmpfile('.txt')
    np.savetxt(keyfile, keypoints, delimiter=' ', fmt='%.3f')

    return keyfile
Exemple #7
0
def height_map(out,
               x,
               y,
               w,
               h,
               rpc1,
               rpc2,
               H1,
               H2,
               disp,
               mask,
               rpc_err,
               out_filt,
               A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    tmp = common.tmpfile('.tif')
    height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, out)

    # apply output filter
    common.run('plambda {0} {1} "x 0 > y nan if" -o {1}'.format(out_filt, out))
Exemple #8
0
def plot_matches_low_level(im1, im2, matches):
    """
    Displays two images side by side with matches highlighted

    Args:
        im1, im2: paths to the two input images
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2)

    Returns:
        path to the resulting image, to be displayed
    """
    # load images
    with rasterio.open(im1, 'r') as f:
        img1 = f.read().squeeze()
    with rasterio.open(im2, 'r') as f:
        img2 = f.read().squeeze()

    # transform single channel to 3-channels
    if img1.ndim < 3:
        img1 = np.dstack([img1] * 3)
    if img2.ndim < 3:
        img2 = np.dstack([img2] * 3)

    # if images have more than 3 channels, keep only the first 3
    if img1.shape[2] > 3:
        img1 = img1[:, :, 0:3]
    if img2.shape[2] > 3:
        img2 = img2[:, :, 0:3]

    # build the output image
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    w = w1 + w2
    h = max(h1, h2)
    out = np.zeros((h, w, 3), np.uint8)
    out[:h1, :w1] = img1
    out[:h2, w1:w] = img2

    # define colors, according to min/max intensity values
    out_min = min(np.nanmin(img1), np.nanmin(img2))
    out_max = max(np.nanmax(img1), np.nanmax(img2))
    green = [out_min, out_max, out_min]
    blue = [out_min, out_min, out_max]

    # plot the matches
    for i in range(len(matches)):
        x1 = matches[i, 0]
        y1 = matches[i, 1]
        x2 = matches[i, 2] + w1
        y2 = matches[i, 3]
        # convert endpoints to int (nn interpolation)
        x1, y1, x2, y2 = list(map(int, np.round([x1, y1, x2, y2])))
        plot_line(out, x1, y1, x2, y2, blue)
        try:
            out[y1, x1] = green
            out[y2, x2] = green
        except IndexError:
            pass
    # save the output image, and return its path
    outfile = common.tmpfile('.png')
    common.rasterio_write(outfile, out)
    return outfile
Exemple #9
0
    # plot the matches
    for i in range(len(matches)):
        x1 = matches[i, 0]
        y1 = matches[i, 1]
        x2 = matches[i, 2] + w1
        y2 = matches[i, 3]
        # convert endpoints to int (nn interpolation)
        x1, y1, x2, y2 = list(map(int, np.round([x1, y1, x2, y2])))
        plot_line(out, x1, y1, x2, y2, blue)
        try:
            out[y1, x1] = green
            out[y2, x2] = green
        except IndexError:
            pass
    # save the output image, and return its path
    outfile = common.tmpfile('.png')
    common.rasterio_write(outfile, out)
    return outfile


def plot_matches(im1, im2, rpc1, rpc2, matches, x=None, y=None, w=None, h=None,
                 outfile=None):
    """
    Plot matches on Pleiades images

    Args:
        im1, im2: paths to full Pleiades images
        rpc1, rpc2: two  instances of the RPCModel class, or paths to xml files
            containing the rpc coefficients
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
Exemple #10
0
def image_tile_mask(x, y, w, h, roi_gml=None, cld_gml=None, raster_mask=None,
                    img_shape=None, border_margin=10):
    """
    Compute a validity mask for an image tile from vector/raster image masks.

    Args:
        x, y, w, h (ints): top-left pixel coordinates and size of the tile
        roi_gml (str): path to a gml file containing a mask defining the valid
            area in the input reference image
        cld_gml (str): path to a gml file containing a mask defining the cloudy
            areas in the input reference image
        raster_mask (str): path to a raster mask file
        img_shape (tuple): height and width of the reference input (full) image
        border_margin (int): width, in pixels, of a stripe of pixels to discard
            along the reference input image borders

    Returns:
        2D array containing the output binary mask. 0 indicate masked pixels, 1
        visible pixels.
    """
    x, y, w, h = map(int, (x, y, w, h))

    # coefficients of the transformation associated to the crop
    H = common.matrix_translation(-x, -y)
    hij = ' '.join([str(el) for el in H.flatten()])

    mask = np.ones((h, w), dtype=np.bool)

    if roi_gml is not None:  # image domain mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               roi_gml, tmp),
                              shell=True)
        with rasterio.open(tmp, 'r') as f:
            mask = np.logical_and(mask, f.read().squeeze().astype(bool))

        if not mask.any():
            return mask

    if cld_gml is not None:  # cloud mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               cld_gml, tmp),
                              shell=True)
        with rasterio.open(tmp, 'r') as f:
            mask = np.logical_and(mask, ~f.read().squeeze().astype(bool))

        if not mask.any():
            return mask

    if raster_mask is not None:
        with rasterio.open(raster_mask, 'r') as f:
            mask = np.logical_and(mask, f.read(window=((y, y+h), (x, x+w)),
                                               boundless=True).squeeze())

    # image borders mask
    if img_shape is not None:
        m = np.ones(img_shape, dtype=np.bool)
        m[:border_margin] = 0  # first rows
        m[-border_margin:] = 0  # last rows
        m[:, :border_margin] = 0  # first columns
        m[:, -border_margin:] = 0  # last columns
        mask = np.logical_and(mask, common.crop_array(m, x, y, w, h))

    return mask
Exemple #11
0
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None,
                          disp_max=None, extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        disp: path to the output diparity map
        mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08',
            'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
            'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
        disp_min : smallest disparity to consider
        disp_max : biggest disparity to consider
        extra_params: optional string with algorithm-dependent parameters
    """
    if rectify_secondary_tile_only(algo) is False:
        disp_min = [disp_min]
        disp_max = [disp_max]

    # limit disparity bounds
    np.alltrue(len(disp_min) == len(disp_max))
    for dim in range(len(disp_min)):
        if disp_min[dim] is not None and disp_max[dim] is not None:
            image_size = common.image_size_gdal(im1)
            if disp_max[dim] - disp_min[dim] > image_size[dim]:
                center = 0.5 * (disp_min[dim] + disp_max[dim])
                disp_min[dim] = int(center - 0.5 * image_size[dim])
                disp_max[dim] = int(center + 0.5 * image_size[dim])

        # round disparity bounds
        if disp_min[dim] is not None:
            disp_min[dim] = int(np.floor(disp_min[dim]))
        if disp_max is not None:
            disp_max[dim] = int(np.ceil(disp_max[dim]))

    if rectify_secondary_tile_only(algo) is False:
        disp_min = disp_min[0]
        disp_max = disp_max[0]

    # define environment variables
    env = os.environ.copy()
    env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])

    # call the block_matching binary
    if algo == 'hirschmuller02':
        bm_binary = 'subpix.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if algo == 'hirschmuller08':
        bm_binary = 'callSGBM.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if algo == 'hirschmuller08_laplacian':
        bm_binary = 'callSGBM_lap.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'hirschmuller08_cauchy':
        bm_binary = 'callSGBM_cauchy.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'sgbm':
        # opencv sgbm function implements a modified version of Hirschmuller's
        # Semi-Global Matching (SGM) algorithm described in "Stereo Processing
        # by Semiglobal Matching and Mutual Information", PAMI, 2008

        p1 = 8  # penalizes disparity changes of 1 between neighbor pixels
        p2 = 32  # penalizes disparity changes of more than 1
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity

        win = 3  # matched block size. It must be a positive odd number
        lr = 1  # maximum difference allowed in the left-right disparity check
        cost = common.tmpfile('.tif')
        common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2,
                                                               disp, cost,
                                                               disp_min,
                                                               disp_max,
                                                               win, p1, p2, lr))

        # create rejection mask (0 means rejected, 1 means accepted)
        # keep only the points that are matched and present in both input images
        common.run('plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'.format(disp, im1, im2, mask))

    if algo == 'tvl1':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
                   env)

    if algo == 'tvl1_2d':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask,
                                                    1), env)


    if algo == 'msmw':
        bm_binary = 'iip_stereo_correlation_multi_win2'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw2':
        bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask), env)

    if algo == 'msmw3':
        bm_binary = 'msmw'
        common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'mgm':
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['TSGM'] = '3'
        conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
        common.run('{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5} -confidence_consensusL {6}'.format('mgm',
                                                                                 disp_min,
                                                                                 disp_max,
                                                                                 im1, im2,
                                                                                 disp, conf),
                   env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))


    if algo == 'mgm_multi_lsd':


        ref = im1
        sec = im2

      
        wref = common.tmpfile('.tif')
        wsec = common.tmpfile('.tif')
        # TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images?
        # image dependent weights based on lsd segments
        image_size = common.image_size_gdal(ref)
        common.run('qauto %s | \
                   lsd  -  - | \
                   cut -d\' \' -f1,2,3,4   | \
                   pview segments %d %d | \
                   plambda -  "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref))
        # image dependent weights based on lsd segments
        image_size = common.image_size_gdal(sec)
        common.run('qauto %s | \
                   lsd  -  - | \
                   cut -d\' \' -f1,2,3,4   | \
                   pview segments %d %d | \
                   plambda -  "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec))


        env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
        env['SUBPIX'] = '2'
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity
        regularity_multiplier = cfg['stereo_regularity_multiplier']

        # increasing these numbers compensates the loss of regularity after incorporating LSD weights
        P1 = 12*regularity_multiplier   # penalizes disparity changes of 1 between neighbor pixels
        P2 = 48*regularity_multiplier  # penalizes disparity changes of more than 1
        conf = disp+'.confidence.tif'
        common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census -O 8 -P1 {7} -P2 {8} -wl {3} -wr {4} -confidence_consensusL {10} {5} {6} {9}'.format('mgm_multi',
                                                                                 disp_min,
                                                                                 disp_max,
                                                                                 wref,wsec,
                                                                                 im1, im2,
                                                                                 P1, P2,
                                                                                 disp, conf),
                   env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))

        
    if algo == 'mgm_multi':
        env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
        env['MINDIFF'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['SUBPIX'] = '2'
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity
        regularity_multiplier = cfg['stereo_regularity_multiplier']
        P1 = 8*regularity_multiplier   # penalizes disparity changes of 1 between neighbor pixels
        P2 = 32*regularity_multiplier  # penalizes disparity changes of more than 1
        conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
        common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census {3} {4} {5} -confidence_consensusL {6}'.format('mgm_multi',
                                                                                 disp_min,
                                                                                 disp_max,
                                                                                 im1, im2,
                                                                                 disp, conf),
                   env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))

    if (algo == 'micmac'):
        # add micmac binaries to the PATH environment variable
        s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
        micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
        os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin

        # prepare micmac xml params file
        micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
        work_dir = os.path.dirname(os.path.abspath(im1))
        common.run('cp {0} {1}'.format(micmac_params, work_dir))

        # run MICMAC
        common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml')))

        # copy output disp map
        micmac_disp = os.path.join(work_dir, 'MEC-EPI',
                                   'Px1_Num6_DeZoom1_LeChantier.tif')
        disp = os.path.join(work_dir, 'rectified_disp.tif')
        common.run('cp {0} {1}'.format(micmac_disp, disp))

        # compute mask by rejecting the 10% of pixels with lowest correlation score
        micmac_cost = os.path.join(work_dir, 'MEC-EPI',
                                   'Correl_LeChantier_Num_5.tif')
        mask = os.path.join(work_dir, 'rectified_mask.png')
        common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(micmac_cost, mask))
Exemple #12
0
def multidisparities_to_ply(tile):
    """
    Compute a point cloud from the disparity maps of N-pairs of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.

    # There is no guarantee that this function works with z!=1
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']

    rpc_ref = cfg['images'][0]['rpc']
    disp_list = list()
    rpc_list = list()

    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    print('triangulating tile {} {}...'.format(x, y))
    n = len(cfg['images']) - 1
    for i in range(n):
        pair = 'pair_%d' % (i + 1)
        H_ref = os.path.join(out_dir, pair, 'H_ref.txt')
        H_sec = os.path.join(out_dir, pair, 'H_sec.txt')
        disp = os.path.join(out_dir, pair, 'rectified_disp.tif')
        mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png')
        disp2D = os.path.join(out_dir, pair, 'disp2D.tif')
        rpc_sec = cfg['images'][i + 1]['rpc']

        if os.path.exists(disp):
            # homography for warp
            T = common.matrix_translation(x, y)
            hom_ref = np.loadtxt(H_ref)
            hom_ref_shift = np.dot(hom_ref, T)

            # homography for 1D to 2D conversion
            hom_sec = np.loadtxt(H_sec)
            if cfg["use_global_pointing_for_geometric_triangulation"] is True:
                pointing = os.path.join(cfg['out_dir'],
                                        'global_pointing_%s.txt' % pair)
                hom_pointing = np.loadtxt(pointing)
                hom_sec = np.dot(hom_sec, np.linalg.inv(hom_pointing))
            hom_sec_shift_inv = np.linalg.inv(hom_sec)

            h1 = " ".join(str(x) for x in hom_ref_shift.flatten())
            h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten())

            # relative disparity map to absolute disparity map
            tmp_abs = common.tmpfile('.tif')
            os.environ["PLAMBDA_GETPIXEL"] = "0"
            common.run(
                'plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s'
                % (disp, mask_rect, tmp_abs))

            # 1d to 2d conversion
            tmp_1d_to_2d = common.tmpfile('.tif')
            common.run('plambda %s "%s 9 njoin x mprod" -o %s' %
                       (tmp_abs, h2, tmp_1d_to_2d))

            # warp
            tmp_warp = common.tmpfile('.tif')
            common.run('homwarp -o 2 "%s" %d %d %s %s' %
                       (h1, w, h, tmp_1d_to_2d, tmp_warp))

            # set masked value to NaN
            exp = 'y 0 = nan x if'
            common.run('plambda %s %s "%s" -o %s' %
                       (tmp_warp, mask_orig, exp, disp2D))
            # disp2D contains positions in the secondary image

            # added input data for triangulation module
            disp_list.append(disp2D)
            rpc_list.append(rpc_sec)

            if cfg['clean_intermediate']:
                common.remove(H_ref)
                common.remove(H_sec)
                common.remove(disp)
                common.remove(mask_rect)
                common.remove(mask_orig)

    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(
            common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h),
            colors)

    # compute the point cloud
    triangulation.multidisp_map_to_point_cloud(ply_file,
                                               disp_list,
                                               rpc_ref,
                                               rpc_list,
                                               colors,
                                               utm_zone=cfg['utm_zone'],
                                               llbbx=tuple(cfg['ll_bbx']),
                                               xybbx=(x, x + w, y, y + h))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(colors)
Exemple #13
0
def disparity_to_ply(tile):
    """
    Compute a point cloud from the disparity map of a pair of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']
    rpc1 = cfg['images'][0]['rpc']
    rpc2 = cfg['images'][1]['rpc']

    if os.path.exists(os.path.join(out_dir, 'stderr.log')):
        print('triangulation: stderr.log exists')
        print('pair_1 not processed on tile {} {}'.format(x, y))
        return

    print('triangulating tile {} {}...'.format(x, y))
    # This function is only called when there is a single pair (pair_1)
    H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt')
    H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt')
    pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt')
    disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif')
    extra = os.path.join(out_dir, 'pair_1', 'rectified_disp_confidence.tif')
    if not os.path.exists(extra):
        extra = ''
    mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png')
    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    # prepare the image needed to colorize point cloud
    colors = os.path.join(out_dir, 'rectified_ref.png')
    if cfg['images'][0]['clr']:
        hom = np.loadtxt(H_ref)
        roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
        ww, hh = common.bounding_box2D(common.points_apply_homography(
            hom, roi))[2:]
        tmp = common.tmpfile('.tif')
        common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom,
                                      ww + 2 * cfg['horizontal_margin'],
                                      hh + 2 * cfg['vertical_margin'])
        common.image_qauto(tmp, colors)
    else:
        common.image_qauto(
            os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors)

    # compute the point cloud
    triangulation.disp_map_to_point_cloud(ply_file,
                                          disp,
                                          mask_rect,
                                          rpc1,
                                          rpc2,
                                          H_ref,
                                          H_sec,
                                          pointing,
                                          colors,
                                          extra,
                                          utm_zone=cfg['utm_zone'],
                                          llbbx=tuple(cfg['ll_bbx']),
                                          xybbx=(x, x + w, y, y + h),
                                          xymsk=mask_orig)

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(H_ref)
        common.remove(H_sec)
        common.remove(disp)
        common.remove(mask_rect)
        common.remove(mask_orig)
        common.remove(colors)
        common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
Exemple #14
0
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None,
                          disp_max=None, timeout=600, max_disp_range=None,
                          extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        disp: path to the output diparity map
        mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08',
            'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
            'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
        disp_min: smallest disparity to consider
        disp_max: biggest disparity to consider
        timeout: time in seconds after which the disparity command will
            raise an error if it hasn't returned.
            Only applies to `mgm*` algorithms.
        extra_params: optional string with algorithm-dependent parameters

    Raises:
        MaxDisparityRangeError: if max_disp_range is defined,
            and if the [disp_min, disp_max] range is greater
            than max_disp_range, to avoid endless computation.
    """
    # limit disparity bounds
    if disp_min is not None and disp_max is not None:
        image_size = common.image_size_gdal(im1)
        if disp_max - disp_min > image_size[0]:
            center = 0.5 * (disp_min + disp_max)
            disp_min = int(center - 0.5 * image_size[0])
            disp_max = int(center + 0.5 * image_size[0])

    # round disparity bounds
    if disp_min is not None:
        disp_min = int(np.floor(disp_min))
    if disp_max is not None:
        disp_max = int(np.ceil(disp_max))

    if (
        max_disp_range is not None
        and disp_max - disp_min > max_disp_range
    ):
        raise MaxDisparityRangeError(
            'Disparity range [{}, {}] greater than {}'.format(
                disp_min, disp_max, max_disp_range
            )
        )

    # define environment variables
    env = os.environ.copy()
    env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])

    # call the block_matching binary
    if algo == 'hirschmuller02':
        bm_binary = 'subpix.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if algo == 'hirschmuller08':
        bm_binary = 'callSGBM.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if algo == 'hirschmuller08_laplacian':
        bm_binary = 'callSGBM_lap.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'hirschmuller08_cauchy':
        bm_binary = 'callSGBM_cauchy.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'sgbm':
        # opencv sgbm function implements a modified version of Hirschmuller's
        # Semi-Global Matching (SGM) algorithm described in "Stereo Processing
        # by Semiglobal Matching and Mutual Information", PAMI, 2008

        p1 = 8  # penalizes disparity changes of 1 between neighbor pixels
        p2 = 32  # penalizes disparity changes of more than 1
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity

        win = 3  # matched block size. It must be a positive odd number
        lr = 1  # maximum difference allowed in the left-right disparity check
        cost = common.tmpfile('.tif')
        common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2,
                                                               disp, cost,
                                                               disp_min,
                                                               disp_max,
                                                               win, p1, p2, lr))

        create_rejection_mask(disp, im1, im2, mask)

    if algo == 'tvl1':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
                   env)

    if algo == 'msmw':
        bm_binary = 'iip_stereo_correlation_multi_win2'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw2':
        bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask), env)

    if algo == 'msmw3':
        bm_binary = 'msmw'
        common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'mgm':
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['TSGM'] = '3'

        nb_dir = cfg['mgm_nb_directions']

        conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])

        common.run(
            '{executable} '
            '-r {disp_min} -R {disp_max} '
            '-s vfit '
            '-t census '
            '-O {nb_dir} '
            '-confidence_consensusL {conf} '
            '{im1} {im2} {disp}'.format(
                executable='mgm',
                disp_min=disp_min,
                disp_max=disp_max,
                nb_dir=nb_dir,
                conf=conf,
                im1=im1,
                im2=im2,
                disp=disp,
            ),
            env=env,
            timeout=timeout,
        )

        create_rejection_mask(disp, im1, im2, mask)


    if algo == 'mgm_multi_lsd':
        ref = im1
        sec = im2
        wref = common.tmpfile('.tif')
        wsec = common.tmpfile('.tif')
        # TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images?
        # image dependent weights based on lsd segments
        image_size = common.image_size_gdal(ref)
        #TODO refactor this command to not use shell=True
        common.run('qauto %s | \
                   lsd  -  - | \
                   cut -d\' \' -f1,2,3,4   | \
                   pview segments %d %d | \
                   plambda -  "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref),
                   shell=True)
        # image dependent weights based on lsd segments
        image_size = common.image_size_gdal(sec)
        #TODO refactor this command to not use shell=True
        common.run('qauto %s | \
                   lsd  -  - | \
                   cut -d\' \' -f1,2,3,4   | \
                   pview segments %d %d | \
                   plambda -  "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec),
                   shell=True)


        env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
        env['SUBPIX'] = '2'
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity
        regularity_multiplier = cfg['stereo_regularity_multiplier']

        nb_dir = cfg['mgm_nb_directions']

        # increasing these numbers compensates the loss of regularity after incorporating LSD weights
        P1 = 12*regularity_multiplier   # penalizes disparity changes of 1 between neighbor pixels
        P2 = 48*regularity_multiplier  # penalizes disparity changes of more than 1
        conf = disp+'.confidence.tif'

        common.run(
            '{executable} '
            '-r {disp_min} -R {disp_max} '
            '-S 6 '
            '-s vfit '
            '-t census '
            '-O {nb_dir} '
            '-wl {wref} -wr {wsec} '
            '-P1 {P1} -P2 {P2} '
            '-confidence_consensusL {conf} '
            '{im1} {im2} {disp}'.format(
                executable='mgm_multi',
                disp_min=disp_min,
                disp_max=disp_max,
                nb_dir=nb_dir,
                wref=wref,
                wsec=wsec,
                P1=P1,
                P2=P2,
                conf=conf,
                im1=im1,
                im2=im2,
                disp=disp,
            ),
            env=env,
            timeout=timeout,
        )

        create_rejection_mask(disp, im1, im2, mask)


    if algo == 'mgm_multi':
        env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
        env['MINDIFF'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['SUBPIX'] = '2'
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity
        regularity_multiplier = cfg['stereo_regularity_multiplier']

        nb_dir = cfg['mgm_nb_directions']

        P1 = 8*regularity_multiplier   # penalizes disparity changes of 1 between neighbor pixels
        P2 = 32*regularity_multiplier  # penalizes disparity changes of more than 1
        conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])

        common.run(
            '{executable} '
            '-r {disp_min} -R {disp_max} '
            '-S 6 '
            '-s vfit '
            '-t census '
            '-O {nb_dir} '
            '-P1 {P1} -P2 {P2} '
            '-confidence_consensusL {conf} '
            '{im1} {im2} {disp}'.format(
                executable='mgm_multi',
                disp_min=disp_min,
                disp_max=disp_max,
                nb_dir=nb_dir,
                P1=P1,
                P2=P2,
                conf=conf,
                im1=im1,
                im2=im2,
                disp=disp,
            ),
            env=env,
            timeout=timeout,
        )

        create_rejection_mask(disp, im1, im2, mask)

    if (algo == 'micmac'):
        # add micmac binaries to the PATH environment variable
        s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
        micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
        os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin

        # prepare micmac xml params file
        micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
        work_dir = os.path.dirname(os.path.abspath(im1))
        common.run('cp {0} {1}'.format(micmac_params, work_dir))

        # run MICMAC
        common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml')))

        # copy output disp map
        micmac_disp = os.path.join(work_dir, 'MEC-EPI',
                                   'Px1_Num6_DeZoom1_LeChantier.tif')
        disp = os.path.join(work_dir, 'rectified_disp.tif')
        common.run('cp {0} {1}'.format(micmac_disp, disp))

        # compute mask by rejecting the 10% of pixels with lowest correlation score
        micmac_cost = os.path.join(work_dir, 'MEC-EPI',
                                   'Correl_LeChantier_Num_5.tif')
        mask = os.path.join(work_dir, 'rectified_mask.png')
        common.run(["plambda", micmac_cost, "x x%q10 < 0 255 if", "-o", mask])