Ejemplo n.º 1
0
def multidisp_map_to_point_cloud(out, disp_list, rpc_ref, rpc_list, colors,
                                 utm_zone=None, llbbx=None, xybbx=None):
    """
    Computes a 3D point cloud from N disparity maps.

    Args:
        out: path to the output ply file
        disp_list: paths to the diparity maps
        rpc_ref, rpc_list: paths to the xml files
        colors: path to the png image containing the colors
    """

    disp_command = ['--disp%d %s' % (i+1, disp) for i, disp in enumerate(disp_list)]
    rpc_command = ['--rpc_sec%d %s' % (i+1, rpc) for i, rpc in enumerate(rpc_list)]

    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else ""

    command = 'multidisp2ply {} {} {} {} {}'.format(out, len(disp_list),
                                                    " ".join(disp_command),
                                                    "--rpc_ref %s" % rpc_ref,
                                                    " ".join(rpc_command))
    command += ' --color {}'.format(colors)
    command += ' {} {} {}'.format(utm, lbb, xbb)
    common.run(command)
Ejemplo n.º 2
0
def disp_map_to_point_cloud(out, disp, mask, rpc1, rpc2, H1, H2, A, colors,
                            utm_zone=None, llbbx=None, xybbx=None, xymsk=None):
    """
    Computes a 3D point cloud from a disparity map.

    Args:
        out: path to the output ply file
        disp, mask: paths to the diparity and mask maps
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        A: path to txt file containing the pointing correction matrix
            for im2
        colors: path to the png image containing the colors
    """
    href = " ".join(str(x) for x in np.loadtxt(H1).flatten())
    hsec = " ".join(str(x) for x in np.dot(np.loadtxt(H2),
                                           np.linalg.inv(np.loadtxt(A))).flatten())
    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else ""
    msk = "--mask-orig %s" % xymsk if xymsk else ""

    command = 'disp2ply {} {} {} {} {}'.format(out, disp, mask, rpc1, rpc2)
    command += ' {} -href "{}" -hsec "{}"'.format(colors, href, hsec)
    command += ' {} {} {} {}'.format(utm, lbb, xbb, msk)
    common.run(command)
Ejemplo n.º 3
0
def height_map(out, x, y, w, h, z, rpc1, rpc2, H1, H2, disp, mask, rpc_err,
               out_filt, A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        z: zoom factor (usually 1, 2 or 4) used to produce the input disparity
            map
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    tmp = common.tmpfile('.tif')
    height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, z, out)

    # apply output filter
    common.run('plambda {0} {1} "x 0 > y nan if" -o {1}'.format(out_filt, out))
Ejemplo n.º 4
0
def transfer_map(in_map, H, x, y, w, h, zoom, out_map):
    """
    Transfer the heights computed on the rectified grid to the original
    Pleiades image grid.

    Args:
        in_map: path to the input map, usually a height map or a mask, sampled
            on the rectified grid
        H: path to txt file containing a numpy 3x3 array representing the
            rectifying homography
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        zoom: zoom factor (usually 1, 2 or 4) used to produce the input height
            map
        out_map: path to the output map
    """
    # write the inverse of the resampling transform matrix. In brief it is:
    # homography * translation * zoom
    # This matrix transports the coordinates of the original cropped and
    # zoomed grid (the one desired for out_height) to the rectified cropped and
    # zoomed grid (the one we have for height)
    Z = np.diag([zoom, zoom, 1])
    A = common.matrix_translation(x, y)
    HH = np.dot(np.loadtxt(H), np.dot(A, Z))

    # apply the homography
    # write the 9 coefficients of the homography to a string, then call synflow
    # to produce the flow, then backflow to apply it
    # zero:256x256 is the iio way to create a 256x256 image filled with zeros
    hij = ' '.join(['%r' % num for num in HH.flatten()])
    common.run('synflow hom "%s" zero:%dx%d /dev/null - | BILINEAR=1 backflow - %s %s' % (
        hij, w/zoom, h/zoom, in_map, out_map))
Ejemplo n.º 5
0
def loop_zhang(F, w, h):
    """
    Computes rectifying homographies from a fundamental matrix, with Loop-Zhang.

    Args:
        F: 3x3 numpy array containing the fundamental matrix
        w, h: images size. The two images are supposed to have same size

    Returns:
        The two rectifying homographies.

    The rectifying homographies are computed using the Pascal Monasse binary
    named rectify_mindistortion. It uses the Loop-Zhang algorithm.
    """
    Ffile = common.tmpfile('.txt')
    Haf = common.tmpfile('.txt')
    Hbf = common.tmpfile('.txt')
    common.matrix_write(Ffile, F)
    common.run('rectify_mindistortion %s %d %d %s %s > /dev/null' % (Ffile, w,
                                                                     h, Haf,
                                                                     Hbf))
    Ha = common.matrix_read(Haf, size=(3, 3))
    Hb = common.matrix_read(Hbf, size=(3, 3))

    # check if both the images are rotated
    a = does_this_homography_change_the_vertical_direction(Ha)
    b = does_this_homography_change_the_vertical_direction(Hb)
    if a and b:
        R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
        Ha = np.dot(R, Ha)
        Hb = np.dot(R, Hb)
    return Ha, Hb
Ejemplo n.º 6
0
def plot_vectors(p, v, x, y, w, h, f=1, out_file=None):
    """
    Plots vectors on an image, using gnuplot

    Args:
        p: points (origins of vectors),represented as a numpy Nx2 array
        v: vectors, represented as a numpy Nx2 array
        x, y, w, h: rectangular ROI
        f: (optional, default is 1) exageration factor
        out_file: (optional, default is None) path to the output file

    Returns:
        nothing, but opens a display or write a png file
    """
    tmp = common.tmpfile('.txt')
    data = np.hstack((p, v))
    np.savetxt(tmp, data, fmt='%6f')
    gp_string = 'set term png size %d,%d;unset key;unset tics;plot [%d:%d] [%d:%d] "%s" u($1):($2):(%d*$3):(%d*$4) w vectors head filled' % (w, h, x, x+w, y, y+h, tmp, f, f)

    if out_file is None:
        out_file = common.tmpfile('.png')

    common.run("gnuplot -p -e '%s' > %s" % (gp_string, out_file))
    print(out_file)

    if out_file is None:
        os.system("v %s &" % out_file)
Ejemplo n.º 7
0
Archivo: s2p.py Proyecto: mnhrdt/s2p
def global_dsm(tiles):
    """
    """
    out_dsm_vrt = os.path.join(cfg['out_dir'], 'dsm.vrt')
    out_dsm_tif = os.path.join(cfg['out_dir'], 'dsm.tif')

    dsms_list = [os.path.join(t['dir'], 'dsm.tif') for t in tiles]
    dsms = '\n'.join(d for d in dsms_list if os.path.exists(d) is True)

    input_file_list = os.path.join(cfg['out_dir'], 'gdalbuildvrt_input_file_list.txt')

    with open(input_file_list, 'w') as f:
        f.write(dsms)

    common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" % (input_file_list,
                                                                       out_dsm_vrt))
    global_srcwin = np.loadtxt(os.path.join(cfg['out_dir'],
                                            "global_srcwin.txt"))
    res = cfg['dsm_resolution']
    xoff, yoff, xsize, ysize = global_srcwin

    common.run(" ".join(["gdal_translate",
                         "-co TILED=YES -co BIGTIFF=IF_SAFER",
                         "-projwin %s %s %s %s %s %s" % (xoff,
                                                         yoff,
                                                         xoff + xsize * res,
                                                         yoff - ysize * res,
                                                         out_dsm_vrt, out_dsm_tif)]))
Ejemplo n.º 8
0
def erosion(out, msk, radius):
    """
    Erodes the accepted regions (ie eliminates more pixels)

    Args:
        out: path to the ouput mask image file
        msk: path to the input mask image file
        radius (in pixels): size of the disk used for the erosion
    """
    if radius >= 2:
        common.run('morsi disk%d erosion %s %s' % (int(radius), msk, out))
Ejemplo n.º 9
0
def keypoints_match(k1,
                    k2,
                    method='relative',
                    sift_thresh=0.6,
                    F=None,
                    model=None,
                    epipolar_threshold=10):
    """
    Find matches among two lists of sift keypoints.

    Args:
        k1, k2: paths to text files containing the lists of sift descriptors
        method (optional, default is 'relative'): flag ('relative' or
            'absolute') indicating wether to use absolute distance or relative
            distance
        sift_thresh (optional, default is 0.6): threshold for distance between SIFT
            descriptors. These descriptors are 128-vectors, whose coefficients
            range from 0 to 255, thus with absolute distance a reasonable value
            for this threshold is between 200 and 300. With relative distance
            (ie ratio between distance to nearest and distance to second
            nearest), the commonly used value for the threshold is 0.6.
        F (optional): affine fundamental matrix
        model (optional, default is None): model imposed by RANSAC when
            searching the set of inliers. If None all matches are considered as
            inliers.

    Returns:
        if any, a numpy 2D array containing the list of inliers matches.
    """
    # compute matches
    mfile = common.tmpfile('.txt')
    cmd = "matching %s %s -%s %f -o %s" % (k1, k2, method, sift_thresh, mfile)
    if F is not None:
        fij = ' '.join(
            str(x) for x in [F[0, 2], F[1, 2], F[2, 0], F[2, 1], F[2, 2]])
        cmd = "%s -f \"%s\"" % (cmd, fij)
        cmd += " --epipolar-threshold {}".format(epipolar_threshold)
    common.run(cmd)

    matches = np.loadtxt(mfile)
    if matches.ndim == 2:  # filter outliers with ransac
        if model == 'fundamental' and len(matches) >= 7:
            common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
        elif model == 'homography' and len(matches) >= 4:
            common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" %
                       (mfile, mfile))
        elif model == 'hom_fund' and len(matches) >= 7:
            common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" %
                       (mfile, mfile))
            common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))

    if os.stat(mfile).st_size > 0:  # return numpy array of matches
        return np.loadtxt(mfile)
Ejemplo n.º 10
0
def plys_to_dsm(tiles):
    """
    """
    out_dsm = os.path.join(cfg['out_dir'], 'dsm.tif')
    clouds = ' '.join(os.path.join(t['dir'], 'cloud.ply') for t in tiles)
    if 'utm_bbx' in cfg:
        bbx = cfg['utm_bbx']
        common.run("ls %s | plyflatten -bb \"%f %f %f %f \" %f %s" %
                   (clouds, bbx[0], bbx[1], bbx[2], bbx[3],
                    cfg['dsm_resolution'], out_dsm))
    else:
        common.run("ls %s | plyflatten %f %s" %
                   (clouds, cfg['dsm_resolution'], out_dsm))
Ejemplo n.º 11
0
def global_dsm(tiles):
    """
    """
    out_dsm_vrt = os.path.join(cfg['out_dir'], 'dsm.vrt')
    out_dsm_tif = os.path.join(cfg['out_dir'], 'dsm.tif')

    dsms_list = [os.path.join(t['dir'], 'dsm.tif') for t in tiles]
    dsms = '\n'.join(d for d in dsms_list if os.path.exists(d))

    input_file_list = os.path.join(cfg['out_dir'],
                                   'gdalbuildvrt_input_file_list.txt')

    with open(input_file_list, 'w') as f:
        f.write(dsms)

    common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" %
               (input_file_list, out_dsm_vrt))

    res = cfg['dsm_resolution']

    if 'utm_bbx' in cfg:
        bbx = cfg['utm_bbx']
        xoff = bbx[0]
        yoff = bbx[3]
        xsize = int(np.ceil((bbx[1] - bbx[0]) / res))
        ysize = int(np.ceil((bbx[3] - bbx[2]) / res))
        projwin = "-projwin %s %s %s %s" % (xoff, yoff, xoff + xsize * res,
                                            yoff - ysize * res)
    else:
        projwin = ""

    common.run(" ".join([
        "gdal_translate", "-co TILED=YES -co BIGTIFF=IF_SAFER",
        "%s %s %s" % (projwin, out_dsm_vrt, out_dsm_tif)
    ]))

    # EXPORT CONFIDENCE
    out_conf_vrt = os.path.join(cfg['out_dir'], 'confidence.vrt')
    out_conf_tif = os.path.join(cfg['out_dir'], 'confidence.tif')

    dsms_list = [os.path.join(t['dir'], 'confidence.tif') for t in tiles]
    dems_list_ok = [d for d in dsms_list if os.path.exists(d)]
    dsms = '\n'.join(dems_list_ok)

    input_file_list = os.path.join(cfg['out_dir'],
                                   'gdalbuildvrt_input_file_list2.txt')

    if len(dems_list_ok) > 0:

        with open(input_file_list, 'w') as f:
            f.write(dsms)

        common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" %
                   (input_file_list, out_conf_vrt))

        common.run(" ".join([
            "gdal_translate", "-co TILED=YES -co BIGTIFF=IF_SAFER",
            "%s %s %s" % (projwin, out_conf_vrt, out_conf_tif)
        ]))
Ejemplo n.º 12
0
    def crop_image(self, image_in, write_folder, crop):
        # crop should be x_min, y_min, width, height

        x_min, y_min, width, height = crop

        image_out = self._get_output_name(write_folder, image_in)
        # generate image
        print("Croping to image {} ...".format(image_out))

        common.run('gdal_translate -srcwin {} {} {} {} {} {}'.format(
            x_min, y_min, width, height, image_in, image_out))

        print("Done")

        return (0)
Ejemplo n.º 13
0
def register_heights(im1, im2):
    """
    Affine registration of heights.

    Args:
        im1: first height map
        im2: second height map, to be registered on the first one

    Returns
        path to the registered second height map
    """
    # remove high frequencies with a morphological zoom out
    im1_low_freq = common.image_zoom_out_morpho(im1, 4)
    im2_low_freq = common.image_zoom_out_morpho(im2, 4)

    # first read the images and store them as numpy 1D arrays, removing all the
    # nans and inf
    i1 = piio.read(im1_low_freq).ravel() #np.ravel() gives a 1D view
    i2 = piio.read(im2_low_freq).ravel()
    ind = np.logical_and(np.isfinite(i1), np.isfinite(i2))
    h1 = i1[ind]
    h2 = i2[ind]

    # for debug
    print(np.shape(i1))
    print(np.shape(h1))

#    # 1st option: affine
#    # we search the (u, v) vector that minimizes the following sum (over
#    # all the pixels):
#    #\sum (im1[i] - (u*im2[i]+v))^2
#    # it is a least squares minimization problem
#    A = np.vstack((h2, h2*0+1)).T
#    b = h1
#    z = np.linalg.lstsq(A, b)[0]
#    u = z[0]
#    v = z[1]
#
#    # apply the affine transform and return the modified im2
#    out = common.tmpfile('.tif')
#    common.run('plambda %s "x %f * %f +" > %s' % (im2, u, v, out))

    # 2nd option: translation only
    v = np.mean(h1 - h2)
    out = common.tmpfile('.tif')
    common.run('plambda %s "x %f +" -o %s' % (im2, v, out))

    return out
Ejemplo n.º 14
0
Archivo: s2p.py Proyecto: zousiyuan/s2p
def heights_to_ply(tile):
    """
    Generate a ply cloud.

    Args:
        tile: a dictionary that provides all you need to process a tile
    """
    # merge the n-1 height maps of the tile (n = nb of images)
    heights_fusion(tile)

    # compute a ply from the merged height map
    out_dir = tile['dir']
    x, y, w, h = tile['coordinates']
    z = cfg['subsampling_factor']
    plyfile = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    height_map = os.path.join(out_dir, 'height_map.tif')
    if cfg['skip_existing'] and os.path.isfile(plyfile):
        print('ply file already exists for tile {} {}'.format(x, y))
        return

    # H is the homography transforming the coordinates system of the original
    # full size image into the coordinates system of the crop
    H = np.dot(np.diag([1 / z, 1 / z, 1]), common.matrix_translation(-x, -y))
    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(
            common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h),
            colors)
    common.image_safe_zoom_fft(colors, z, colors)
    triangulation.height_map_to_point_cloud(plyfile,
                                            height_map,
                                            cfg['images'][0]['rpc'],
                                            H,
                                            colors,
                                            utm_zone=cfg['utm_zone'],
                                            llbbx=tuple(cfg['ll_bbx']))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (plyfile, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(height_map)
        common.remove(colors)
        common.remove(
            os.path.join(out_dir, 'cloud_water_image_domain_mask.png'))
Ejemplo n.º 15
0
def merge(im1, im2, thresh, out, conservative=False):
    """
    Args:
        im1, im2: paths to the two input images
        thresh: distance threshold on the intensity values
        out: path to the output image
        conservative (optional, default is False): if True, keep only the
            pixels where the two height map agree

    This function merges two images. They are supposed to be two height maps,
    sampled on the same grid. If a pixel has a valid height (ie not inf) in
    only one of the two maps, then we keep this height (if the 'conservative'
    option is set to False). When two heights are available, if they differ
    less than the threshold we take the mean, if not we discard the pixel (ie
    assign NAN to the output pixel).
    """
    # first register the second image on the first
    im2 = register_heights(im1, im2)

    if conservative:
        # then merge
        # the following plambda expression implements:
        # if isfinite x
        #   if isfinite y
        #     if fabs(x - y) < t
        #       return (x+y)/2
        #     return nan
        #   return nan
        # return nan
        common.run("""
            plambda %s %s "x isfinite y isfinite x y - fabs %f < x y + 2 / nan if nan
            if nan if" -o %s
            """ % ( im1, im2, thresh, out))
    else:
        # then merge
        # the following plambda expression implements:
        # if isfinite x
        #   if isfinite y
        #     if fabs(x - y) < t
        #       return (x+y)/2
        #     return nan
        #   return x
        # return y
        common.run("""
            plambda %s %s "x isfinite y isfinite x y - fabs %f < x y + 2 / nan if x
            if y if" -o %s
            """ % ( im1, im2, thresh, out))
Ejemplo n.º 16
0
def height_map_to_point_cloud(cloud,
                              heights,
                              rpc,
                              H=None,
                              crop_colorized='',
                              off_x=None,
                              off_y=None,
                              ascii_ply=False,
                              with_normals=False,
                              utm_zone=None,
                              llbbx=None):
    """
    Computes a color point cloud from a height map.

    Args:
        cloud: path to the output points cloud (ply format)
        heights: height map, sampled on the same grid as the crop_colorized
            image. In particular, its size is the same as crop_colorized.
        rpc: path to xml file containing RPC data for the current Pleiade image
        H (optional, default None): numpy array of size 3x3 defining the
            homography transforming the coordinates system of the original full
            size image into the coordinates system of the crop we are dealing
            with.
        crop_colorized (optional, default ''): path to a colorized crop of a
            Pleiades image
        off_{x,y} (optional, default None): coordinates of the point we want to
            use as origin in the local coordinate system of the computed cloud
        ascii_ply (optional, default false): boolean flag to tell if the output
            ply file should be encoded in plain text (ascii).
        utm_zone (optional, default None):
    """
    if not os.path.exists(crop_colorized):
        crop_colorized = ''
    hij = " ".join(str(x) for x in H.flatten()) if H is not None else ""
    asc = "--ascii" if ascii_ply else ""
    nrm = "--with-normals" if with_normals else ""
    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    command = "colormesh %s %s %s %s -h \"%s\" %s %s %s %s" % (
        cloud, heights, rpc, crop_colorized, hij, asc, nrm, utm, lbb)
    if off_x:
        command += " --offset_x %d" % off_x
    if off_y:
        command += " --offset_y %d" % off_y
    common.run(command)
Ejemplo n.º 17
0
Archivo: s2p.py Proyecto: mnhrdt/s2p
def heights_to_ply(tile):
    """
    Generate a ply cloud.

    Args:
        tile: a dictionary that provides all you need to process a tile
    """
    # merge the n-1 height maps of the tile (n = nb of images)
    heights_fusion(tile)

    # compute a ply from the merged height map
    out_dir = tile['dir']
    x, y, w, h = tile['coordinates']
    z = cfg['subsampling_factor']
    plyfile = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    height_map = os.path.join(out_dir, 'height_map.tif')
    if cfg['skip_existing'] and os.path.isfile(plyfile):
        print('ply file already exists for tile {} {}'.format(x, y))
        return

    # H is the homography transforming the coordinates system of the original
    # full size image into the coordinates system of the crop
    H = np.dot(np.diag([1 / z, 1 / z, 1]), common.matrix_translation(-x, -y))
    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(common.image_crop_gdal(cfg['images'][0]['img'], x, y,
                                                 w, h), colors)
    common.image_safe_zoom_fft(colors, z, colors)
    triangulation.height_map_to_point_cloud(plyfile, height_map,
                                            cfg['images'][0]['rpc'], H, colors,
                                            utm_zone=cfg['utm_zone'],
                                            llbbx=tuple(cfg['ll_bbx']))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (plyfile, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(height_map)
        common.remove(colors)
        common.remove(os.path.join(out_dir,
                                   'cloud_water_image_domain_mask.png'))
Ejemplo n.º 18
0
def fundamental_matrix_ransac(matches, precision=1.0, return_inliers=False):
    """
    Estimates the fundamental matrix given a set of point correspondences
    between two images, using ransac.

    Arguments:
        matches: numpy 2D array of size Nx4 containing a list of pair of
            matching points. Each line is of the form x1, y1, x2, y2, where (x1,
            y1) is the point in the first view while (x2, y2) is the matching
            point in the second view.
            It can be the path to a txt file containing such an array.
        precision: optional parameter indicating the maximum error
            allowed for counting the inliers
        return_inliers: optional boolean flag to activate/deactivate inliers
            output

    Returns:
        the estimated fundamental matrix, and optionally the 2D array containing
        the inliers

    The algorithm uses ransac as a search engine.
    """
    if type(matches) is np.ndarray:
        # write a file containing the list of correspondences. The
        # expected format is a text file with one match per line: x1 y1 x2 y2
        matchfile = common.tmpfile('.txt')
        np.savetxt(matchfile, matches)
    else:
        # assume it is a path to a txt file containing the matches
        matchfile = matches

    # call ransac binary, from Enric's imscript
    inliers = common.tmpfile('.txt')
    Ffile = common.tmpfile('.txt')
    awk_command = "awk {\'printf(\"%e %e %e\\n%e %e %e\\n%e %e %e\", $3, $4, $5, $6, $7, $8, $9, $10, $11)\'}"
    common.run("ransac fmn 1000 %f 7 %s < %s | grep param | %s > %s" %
               (precision, inliers, matchfile, awk_command, Ffile))
    if return_inliers:
        return np.loadtxt(Ffile).transpose(), np.loadtxt(inliers)
    else:
        return np.loadtxt(Ffile).transpose()
Ejemplo n.º 19
0
def fundamental_matrix_ransac(matches, precision=1.0, return_inliers=False):
    """
    Estimates the fundamental matrix given a set of point correspondences
    between two images, using ransac.

    Arguments:
        matches: numpy 2D array of size Nx4 containing a list of pair of
            matching points. Each line is of the form x1, y1, x2, y2, where (x1,
            y1) is the point in the first view while (x2, y2) is the matching
            point in the second view.
            It can be the path to a txt file containing such an array.
        precision: optional parameter indicating the maximum error
            allowed for counting the inliers
        return_inliers: optional boolean flag to activate/deactivate inliers
            output

    Returns:
        the estimated fundamental matrix, and optionally the 2D array containing
        the inliers

    The algorithm uses ransac as a search engine.
    """
    if type(matches) is np.ndarray:
        # write a file containing the list of correspondences. The
        # expected format is a text file with one match per line: x1 y1 x2 y2
        matchfile = common.tmpfile('.txt')
        np.savetxt(matchfile, matches)
    else:
        # assume it is a path to a txt file containing the matches
        matchfile = matches

    # call ransac binary, from Enric's imscript
    inliers = common.tmpfile('.txt')
    Ffile = common.tmpfile('.txt')
    awk_command = "awk {\'printf(\"%e %e %e\\n%e %e %e\\n%e %e %e\", $3, $4, $5, $6, $7, $8, $9, $10, $11)\'}"
    common.run("ransac fmn 1000 %f 7 %s < %s | grep param | %s > %s" % (precision, inliers, matchfile, awk_command, Ffile))
    if return_inliers:
        return np.loadtxt(Ffile).transpose(), np.loadtxt(inliers)
    else:
        return np.loadtxt(Ffile).transpose()
Ejemplo n.º 20
0
def height_map_to_point_cloud(cloud, heights, rpc, H=None, crop_colorized='',
                              off_x=None, off_y=None, ascii_ply=False,
                              with_normals=False, utm_zone=None, llbbx=None):
    """
    Computes a color point cloud from a height map.

    Args:
        cloud: path to the output points cloud (ply format)
        heights: height map, sampled on the same grid as the crop_colorized
            image. In particular, its size is the same as crop_colorized.
        rpc: path to xml file containing RPC data for the current Pleiade image
        H (optional, default None): numpy array of size 3x3 defining the
            homography transforming the coordinates system of the original full
            size image into the coordinates system of the crop we are dealing
            with.
        crop_colorized (optional, default ''): path to a colorized crop of a
            Pleiades image
        off_{x,y} (optional, default None): coordinates of the point we want to
            use as origin in the local coordinate system of the computed cloud
        ascii_ply (optional, default false): boolean flag to tell if the output
            ply file should be encoded in plain text (ascii).
        utm_zone (optional, default None):
    """
    if not os.path.exists(crop_colorized):
        crop_colorized = ''
    hij = " ".join(str(x) for x in H.flatten()) if H is not None else ""
    asc = "--ascii" if ascii_ply else ""
    nrm = "--with-normals" if with_normals else ""
    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    command = "colormesh %s %s %s %s -h \"%s\" %s %s %s %s" % (cloud, heights,
                                                               rpc,
                                                               crop_colorized,
                                                               hij, asc, nrm,
                                                               utm, lbb)
    if off_x:
        command += " --offset_x %d" % off_x
    if off_y:
        command += " --offset_y %d" % off_y
    common.run(command)
Ejemplo n.º 21
0
def disp_map_to_point_cloud(out,
                            disp,
                            mask,
                            rpc1,
                            rpc2,
                            H1,
                            H2,
                            A,
                            colors,
                            utm_zone=None,
                            llbbx=None,
                            xybbx=None,
                            xymsk=None):
    """
    Computes a 3D point cloud from a disparity map.

    Args:
        out: path to the output ply file
        disp, mask: paths to the diparity and mask maps
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        A: path to txt file containing the pointing correction matrix
            for im2
        colors: path to the png image containing the colors
    """
    href = " ".join(str(x) for x in np.loadtxt(H1).flatten())
    hsec = " ".join(
        str(x) for x in np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt(
            A))).flatten())
    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else ""
    msk = "--mask-orig %s" % xymsk if xymsk else ""

    command = 'disp2ply {} {} {} {} {}'.format(out, disp, mask, rpc1, rpc2)
    command += ' {} -href "{}" -hsec "{}"'.format(colors, href, hsec)
    command += ' {} {} {} {}'.format(utm, lbb, xbb, msk)
    common.run(command)
Ejemplo n.º 22
0
def height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, height, rpc_err, A=None):
    """
    Computes a height map from a disparity map, using rpc.

    Args:
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        height: path to the output height map
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    if A is not None:
        HH2 = common.tmpfile('.txt')
        np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt(A))))
    else:
        HH2 = H2

    common.run("disp_to_h %s %s %s %s %s %s %s %s" % (rpc1, rpc2, H1, HH2, disp,
                                                      mask, height, rpc_err))
Ejemplo n.º 23
0
def height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, height, rpc_err, A=None):
    """
    Computes a height map from a disparity map, using rpc.

    Args:
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        height: path to the output height map
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    if A is not None:
        HH2 = common.tmpfile('.txt')
        np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt(A))))
    else:
        HH2 = H2

    common.run("disp_to_h %s %s %s %s %s %s %s %s" % (rpc1, rpc2, H1, HH2, disp,
                                                      mask, height, rpc_err))
Ejemplo n.º 24
0
def height_map(out,
               x,
               y,
               w,
               h,
               rpc1,
               rpc2,
               H1,
               H2,
               disp,
               mask,
               rpc_err,
               out_filt,
               A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): path to txt file containing the pointing correction matrix
            for im2
    """
    tmp = common.tmpfile('.tif')
    height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, out)

    # apply output filter
    common.run('plambda {0} {1} "x 0 > y nan if" -o {1}'.format(out_filt, out))
Ejemplo n.º 25
0
    def scale_image(self, image_in, write_folder, scaling_filter, scale_x,
                    scale_y):

        image_out = self._get_output_name(write_folder, image_in, False)

        # generate image
        print("Generating {} ...".format(image_out))

        # First get input image size
        w, h, _ = common.image_size_gdal(image_in)

        # Generate a temporary vrt file to have the proper geotransform
        fd, tmp_vrt = tempfile.mkstemp(suffix='.vrt', dir=write_folder)

        os.close(fd)

        common.run('gdal_translate -of VRT -a_ullr 0 0 {} {} {} {}'.format(
            w, h, image_in, tmp_vrt))

        common.run(
            'gdalwarp -co RPB=NO -co PROFILE=GeoTIFF -r {} -co "BIGTIFF=IF_NEEDED" -co "TILED=YES" -ovr NONE \
                     -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -tr \
                    {} {} {} {}'.format(scaling_filter, scale_x, scale_y,
                                        tmp_vrt, image_out))

        try:
            # Remove aux files if any
            os.remove(image_out + ".aux.xml")
        except OSError:
            pass

        # Clean tmp vrt file
        os.remove(tmp_vrt)

        print("Done")

        return (0)
Ejemplo n.º 26
0
def multidisp_map_to_point_cloud(out,
                                 disp_list,
                                 rpc_ref,
                                 rpc_list,
                                 colors,
                                 utm_zone=None,
                                 llbbx=None,
                                 xybbx=None):
    """
    Computes a 3D point cloud from N disparity maps.

    Args:
        out: path to the output ply file
        disp_list: paths to the diparity maps
        rpc_ref, rpc_list: paths to the xml files
        colors: path to the png image containing the colors
    """

    disp_command = [
        '--disp%d %s' % (i + 1, disp) for i, disp in enumerate(disp_list)
    ]
    rpc_command = [
        '--rpc_sec%d %s' % (i + 1, rpc) for i, rpc in enumerate(rpc_list)
    ]

    utm = "--utm-zone %s" % utm_zone if utm_zone else ""
    lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else ""
    xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else ""

    command = 'multidisp2ply {} {} {} {} {}'.format(out, len(disp_list),
                                                    " ".join(disp_command),
                                                    "--rpc_ref %s" % rpc_ref,
                                                    " ".join(rpc_command))
    command += ' --color {}'.format(colors)
    command += ' {} {} {}'.format(utm, lbb, xbb)
    common.run(command)
Ejemplo n.º 27
0
def compute_disparity_map(im1,
                          im2,
                          disp,
                          mask,
                          algo,
                          disp_min=None,
                          disp_max=None,
                          extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        disp: path to the output diparity map
        mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08',
            'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
            'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
        disp_min : smallest disparity to consider
        disp_max : biggest disparity to consider
        extra_params: optional string with algorithm-dependent parameters
    """
    if rectify_secondary_tile_only(algo) is False:
        disp_min = [disp_min]
        disp_max = [disp_max]

    # limit disparity bounds
    np.alltrue(len(disp_min) == len(disp_max))
    for dim in range(len(disp_min)):
        if disp_min[dim] is not None and disp_max[dim] is not None:
            image_size = common.image_size_gdal(im1)
            if disp_max[dim] - disp_min[dim] > image_size[dim]:
                center = 0.5 * (disp_min[dim] + disp_max[dim])
                disp_min[dim] = int(center - 0.5 * image_size[dim])
                disp_max[dim] = int(center + 0.5 * image_size[dim])

        # round disparity bounds
        if disp_min[dim] is not None:
            disp_min[dim] = int(np.floor(disp_min[dim]))
        if disp_max is not None:
            disp_max[dim] = int(np.ceil(disp_max[dim]))

    if rectify_secondary_tile_only(algo) is False:
        disp_min = disp_min[0]
        disp_max = disp_max[0]

    # define environment variables
    env = os.environ.copy()
    env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])

    # call the block_matching binary
    if algo == 'hirschmuller02':
        bm_binary = 'subpix.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(
            bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if algo == 'hirschmuller08':
        bm_binary = 'callSGBM.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(
            bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if algo == 'hirschmuller08_laplacian':
        bm_binary = 'callSGBM_lap.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(
            bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params))
    if algo == 'hirschmuller08_cauchy':
        bm_binary = 'callSGBM_cauchy.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(
            bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params))
    if algo == 'sgbm':
        # opencv sgbm function implements a modified version of Hirschmuller's
        # Semi-Global Matching (SGM) algorithm described in "Stereo Processing
        # by Semiglobal Matching and Mutual Information", PAMI, 2008

        p1 = 8  # penalizes disparity changes of 1 between neighbor pixels
        p2 = 32  # penalizes disparity changes of more than 1
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity

        win = 3  # matched block size. It must be a positive odd number
        lr = 1  # maximum difference allowed in the left-right disparity check
        cost = common.tmpfile('.tif')
        common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(
            im1, im2, disp, cost, disp_min, disp_max, win, p1, p2, lr))

        # create rejection mask (0 means rejected, 1 means accepted)
        # keep only the points that are matched and present in both input images
        common.run(
            'plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'
            .format(disp, im1, im2, mask))

    if algo == 'tvl1':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
                   env)

    if algo == 'tvl1_2d':
        tvl1 = 'callTVL1.sh'
        common.run(
            '{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask, 1),
            env)

    if algo == 'msmw':
        bm_binary = 'iip_stereo_correlation_multi_win2'
        common.run(
            '{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'
            .format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw2':
        bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
        common.run(
            '{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'
            .format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw3':
        bm_binary = 'msmw'
        common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
            bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'mgm':
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['TSGM'] = '3'
        common.run(
            '{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5}'.format(
                'mgm', disp_min, disp_max, im1, im2, disp), env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))

    if algo == 'mgm_multi':
        env['REMOVESMALLCC'] = '25'
        env['MINDIFF'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['SUBPIX'] = '2'
        common.run(
            '{0} -r {1} -R {2} -S 3 -s vfit -t census {3} {4} {5}'.format(
                'mgm_multi', disp_min, disp_max, im1, im2, disp), env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))

    if (algo == 'micmac'):
        # add micmac binaries to the PATH environment variable
        s2p_dir = os.path.dirname(
            os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
        micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
        os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin

        # prepare micmac xml params file
        micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
        work_dir = os.path.dirname(os.path.abspath(im1))
        common.run('cp {0} {1}'.format(micmac_params, work_dir))

        # run MICMAC
        common.run('MICMAC {0:s}'.format(
            os.path.join(work_dir, 'micmac_params.xml')))

        # copy output disp map
        micmac_disp = os.path.join(work_dir, 'MEC-EPI',
                                   'Px1_Num6_DeZoom1_LeChantier.tif')
        disp = os.path.join(work_dir, 'rectified_disp.tif')
        common.run('cp {0} {1}'.format(micmac_disp, disp))

        # compute mask by rejecting the 10% of pixels with lowest correlation score
        micmac_cost = os.path.join(work_dir, 'MEC-EPI',
                                   'Correl_LeChantier_Num_5.tif')
        mask = os.path.join(work_dir, 'rectified_mask.png')
        common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(
            micmac_cost, mask))
Ejemplo n.º 28
0
def multidisparities_to_ply(tile):
    """
    Compute a point cloud from the disparity maps of N-pairs of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.

    # There is no guarantee that this function works with z!=1
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']

    rpc_ref = cfg['images'][0]['rpc']
    disp_list = list()
    rpc_list = list()

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    print('triangulating tile {} {}...'.format(x, y))
    n = len(cfg['images']) - 1
    for i in range(n):
        pair = 'pair_%d' % (i + 1)
        H_ref = os.path.join(out_dir, pair, 'H_ref.txt')
        H_sec = os.path.join(out_dir, pair, 'H_sec.txt')
        disp = os.path.join(out_dir, pair, 'rectified_disp.tif')
        mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png')
        disp2D = os.path.join(out_dir, pair, 'disp2D.tif')
        rpc_sec = cfg['images'][i + 1]['rpc']

        if os.path.exists(disp):
            # homography for warp
            T = common.matrix_translation(x, y)
            hom_ref = np.loadtxt(H_ref)
            hom_ref_shift = np.dot(hom_ref, T)

            # homography for 1D to 2D conversion
            hom_sec = np.loadtxt(H_sec)
            if cfg["use_global_pointing_for_geometric_triangulation"] is True:
                pointing = os.path.join(cfg['out_dir'],
                                        'global_pointing_%s.txt' % pair)
                hom_pointing = np.loadtxt(pointing)
                hom_sec = np.dot(hom_sec, np.linalg.inv(hom_pointing))
            hom_sec_shift_inv = np.linalg.inv(hom_sec)

            h1 = " ".join(str(x) for x in hom_ref_shift.flatten())
            h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten())

            # relative disparity map to absolute disparity map
            tmp_abs = common.tmpfile('.tif')
            os.environ["PLAMBDA_GETPIXEL"] = "0"
            common.run(
                'plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s'
                % (disp, mask_rect, tmp_abs))

            # 1d to 2d conversion
            tmp_1d_to_2d = common.tmpfile('.tif')
            common.run('plambda %s "%s 9 njoin x mprod" -o %s' %
                       (tmp_abs, h2, tmp_1d_to_2d))

            # warp
            tmp_warp = common.tmpfile('.tif')
            common.run('homwarp -o 2 "%s" %d %d %s %s' %
                       (h1, w, h, tmp_1d_to_2d, tmp_warp))

            # set masked value to NaN
            exp = 'y 0 = nan x if'
            common.run('plambda %s %s "%s" -o %s' %
                       (tmp_warp, mask_orig, exp, disp2D))
            # disp2D contains positions in the secondary image

            # added input data for triangulation module
            disp_list.append(disp2D)
            rpc_list.append(rpc_sec)

            if cfg['clean_intermediate']:
                common.remove(H_ref)
                common.remove(H_sec)
                common.remove(disp)
                common.remove(mask_rect)
                common.remove(mask_orig)

    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(
            common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h),
            colors)

    # compute the point cloud
    triangulation.multidisp_map_to_point_cloud(ply_file,
                                               disp_list,
                                               rpc_ref,
                                               rpc_list,
                                               colors,
                                               utm_zone=cfg['utm_zone'],
                                               llbbx=tuple(cfg['ll_bbx']),
                                               xybbx=(x, x + w, y, y + h))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(colors)
Ejemplo n.º 29
0
def disparity_to_ply(tile):
    """
    Compute a point cloud from the disparity map of a pair of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']
    rpc1 = cfg['images'][0]['rpc']
    rpc2 = cfg['images'][1]['rpc']

    if os.path.exists(os.path.join(out_dir, 'stderr.log')):
        print('triangulation: stderr.log exists')
        print('pair_1 not processed on tile {} {}'.format(x, y))
        return

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    print('triangulating tile {} {}...'.format(x, y))
    # This function is only called when there is a single pair (pair_1)
    H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt')
    H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt')
    pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt')
    disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif')
    mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png')
    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    # prepare the image needed to colorize point cloud
    colors = os.path.join(out_dir, 'rectified_ref.png')
    if cfg['images'][0]['clr']:
        hom = np.loadtxt(H_ref)
        roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
        ww, hh = common.bounding_box2D(common.points_apply_homography(
            hom, roi))[2:]
        tmp = common.tmpfile('.tif')
        common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom,
                                      ww + 2 * cfg['horizontal_margin'],
                                      hh + 2 * cfg['vertical_margin'])
        common.image_qauto(tmp, colors)
    else:
        common.image_qauto(
            os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors)

    # compute the point cloud
    triangulation.disp_map_to_point_cloud(ply_file,
                                          disp,
                                          mask_rect,
                                          rpc1,
                                          rpc2,
                                          H_ref,
                                          H_sec,
                                          pointing,
                                          colors,
                                          utm_zone=cfg['utm_zone'],
                                          llbbx=tuple(cfg['ll_bbx']),
                                          xybbx=(x, x + w, y, y + h),
                                          xymsk=mask_orig)

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(H_ref)
        common.remove(H_sec)
        common.remove(disp)
        common.remove(mask_rect)
        common.remove(mask_orig)
        common.remove(colors)
        common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
Ejemplo n.º 30
0
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None,
                          disp_max=None, extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        disp: path to the output diparity map
        mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08',
            'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
            'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
        disp_min : smallest disparity to consider
        disp_max : biggest disparity to consider
        extra_params: optional string with algorithm-dependent parameters
    """
    if rectify_secondary_tile_only(algo) is False:
        disp_min = [disp_min]
        disp_max = [disp_max]

    # limit disparity bounds
    np.alltrue(len(disp_min) == len(disp_max))
    for dim in range(len(disp_min)):
        if disp_min[dim] is not None and disp_max[dim] is not None:
            image_size = common.image_size_gdal(im1)
            if disp_max[dim] - disp_min[dim] > image_size[dim]:
                center = 0.5 * (disp_min[dim] + disp_max[dim])
                disp_min[dim] = int(center - 0.5 * image_size[dim])
                disp_max[dim] = int(center + 0.5 * image_size[dim])

        # round disparity bounds
        if disp_min[dim] is not None:
            disp_min[dim] = int(np.floor(disp_min[dim]))
        if disp_max is not None:
            disp_max[dim] = int(np.ceil(disp_max[dim]))

    if rectify_secondary_tile_only(algo) is False:
        disp_min = disp_min[0]
        disp_max = disp_max[0]

    # define environment variables
    env = os.environ.copy()
    env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])

    # call the block_matching binary
    if algo == 'hirschmuller02':
        bm_binary = 'subpix.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if algo == 'hirschmuller08':
        bm_binary = 'callSGBM.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if algo == 'hirschmuller08_laplacian':
        bm_binary = 'callSGBM_lap.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'hirschmuller08_cauchy':
        bm_binary = 'callSGBM_cauchy.sh'
        common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
                                                            disp_max, extra_params))
    if algo == 'sgbm':
        # opencv sgbm function implements a modified version of Hirschmuller's
        # Semi-Global Matching (SGM) algorithm described in "Stereo Processing
        # by Semiglobal Matching and Mutual Information", PAMI, 2008

        p1 = 8  # penalizes disparity changes of 1 between neighbor pixels
        p2 = 32  # penalizes disparity changes of more than 1
        # it is required that p2 > p1. The larger p1, p2, the smoother the disparity

        win = 3  # matched block size. It must be a positive odd number
        lr = 1  # maximum difference allowed in the left-right disparity check
        cost = common.tmpfile('.tif')
        common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2,
                                                               disp, cost,
                                                               disp_min,
                                                               disp_max,
                                                               win, p1, p2, lr))

        # create rejection mask (0 means rejected, 1 means accepted)
        # keep only the points that are matched and present in both input images
        common.run('plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'.format(disp, im1, im2, mask))

    if algo == 'tvl1':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
                   env)

    if algo == 'tvl1_2d':
        tvl1 = 'callTVL1.sh'
        common.run('{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask,
                                                    1), env)


    if algo == 'msmw':
        bm_binary = 'iip_stereo_correlation_multi_win2'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw2':
        bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
        common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'msmw3':
        bm_binary = 'msmw'
        common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
                bm_binary, disp_min, disp_max, im1, im2, disp, mask))

    if algo == 'mgm':
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
        env['TSGM'] = '3'
        common.run('{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5}'.format('mgm',
                                                                                 disp_min,
                                                                                 disp_max,
                                                                                 im1, im2,
                                                                                 disp),
                   env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))

    if (algo == 'micmac'):
        # add micmac binaries to the PATH environment variable
        s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
        micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
        os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin

        # prepare micmac xml params file
        micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
        work_dir = os.path.dirname(os.path.abspath(im1))
        common.run('cp {0} {1}'.format(micmac_params, work_dir))

        # run MICMAC
        common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml')))

        # copy output disp map
        micmac_disp = os.path.join(work_dir, 'MEC-EPI',
                                   'Px1_Num6_DeZoom1_LeChantier.tif')
        disp = os.path.join(work_dir, 'rectified_disp.tif')
        common.run('cp {0} {1}'.format(micmac_disp, disp))

        # compute mask by rejecting the 10% of pixels with lowest correlation score
        micmac_cost = os.path.join(work_dir, 'MEC-EPI',
                                   'Correl_LeChantier_Num_5.tif')
        mask = os.path.join(work_dir, 'rectified_mask.png')
        common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(micmac_cost, mask))
Ejemplo n.º 31
0
def plot_matches(im1,
                 im2,
                 rpc1,
                 rpc2,
                 matches,
                 x=None,
                 y=None,
                 w=None,
                 h=None,
                 outfile=None):
    """
    Plot matches on Pleiades images

    Args:
        im1, im2: paths to full Pleiades images
        rpc1, rpc2: two  instances of the RPCModel class, or paths to xml files
            containing the rpc coefficients
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2). The coordinates are given in the frame of the full images.
        x, y, w, h (optional, default is None): ROI in the reference image
        outfile (optional, default is None): path to the output file. If None,
            the file image is displayed using the pvflip viewer

    Returns:
        path to the displayed output
    """
    # if no matches, no plot
    if not matches.size:
        print("visualisation.plot_matches: nothing to plot")
        return

    # read rpcs
    for r in [rpc1, rpc2]:
        if not isinstance(r, rpc_model.RPCModel):
            r = rpc_model.RPCModel(r)

    # determine regions to crop in im1 and im2
    if x is not None:
        x1 = x
    else:
        x1 = np.min(matches[:, 0])

    if y is not None:
        y1 = y
    else:
        y1 = np.min(matches[:, 1])

    if w is not None:
        w1 = w
    else:
        w1 = np.max(matches[:, 0]) - x1

    if h is not None:
        h1 = h
    else:
        h1 = np.max(matches[:, 1]) - y1

    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x1, y1, w1, h1)
    # x2 = np.min(matches[:, 2])
    # w2 = np.max(matches[:, 2]) - x2
    # y2 = np.min(matches[:, 3])
    # h2 = np.max(matches[:, 3]) - y2

    # # add 20 pixels offset and round. The image_crop_gdal function will round
    # # off the coordinates before it does the crops.
    # x1 -= 20; x1 = np.round(x1)
    # y1 -= 20; y1 = np.round(y1)
    # x2 -= 20; x2 = np.round(x2)
    # y2 -= 20; y2 = np.round(y2)
    # w1 += 40; w1 = np.round(w1)
    # h1 += 40; h1 = np.round(h1)
    # w2 += 40; w2 = np.round(w2)
    # h2 += 40; h2 = np.round(h2)

    # do the crops
    crop1 = common.image_qauto(common.image_crop_gdal(im1, x1, y1, w1, h1))
    crop2 = common.image_qauto(common.image_crop_gdal(im2, x2, y2, w2, h2))

    # compute matches coordinates in the cropped images
    pts1 = matches[:, 0:2] - [x1, y1]
    pts2 = matches[:, 2:4] - [x2, y2]

    # plot the matches on the two crops
    to_display = plot_matches_low_level(crop1, crop2, np.hstack((pts1, pts2)))
    if outfile is None:
        os.system('v %s &' % (to_display))
    else:
        common.run('cp %s %s' % (to_display, outfile))

    return
Ejemplo n.º 32
0
def plot_matches(im1, im2, rpc1, rpc2, matches, x=None, y=None, w=None, h=None,
                 outfile=None):
    """
    Plot matches on Pleiades images

    Args:
        im1, im2: paths to full Pleiades images
        rpc1, rpc2: two  instances of the RPCModel class, or paths to xml files
            containing the rpc coefficients
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2). The coordinates are given in the frame of the full images.
        x, y, w, h (optional, default is None): ROI in the reference image
        outfile (optional, default is None): path to the output file. If None,
            the file image is displayed using the pvflip viewer

    Returns:
        path to the displayed output
    """
    # if no matches, no plot
    if not matches.size:
        print("visualisation.plot_matches: nothing to plot")
        return

    # read rpcs
    for r in [rpc1, rpc2]:
        if not isinstance(r, rpc_model.RPCModel):
            r = rpc_model.RPCModel(r)

    # determine regions to crop in im1 and im2
    if x is not None:
        x1 = x
    else:
        x1 = np.min(matches[:, 0])

    if y is not None:
        y1 = y
    else:
        y1 = np.min(matches[:, 1])

    if w is not None:
        w1 = w
    else:
        w1 = np.max(matches[:, 0]) - x1

    if h is not None:
        h1 = h
    else:
        h1 = np.max(matches[:, 1]) - y1

    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x1, y1, w1, h1)
    # x2 = np.min(matches[:, 2])
    # w2 = np.max(matches[:, 2]) - x2
    # y2 = np.min(matches[:, 3])
    # h2 = np.max(matches[:, 3]) - y2

    # # add 20 pixels offset and round. The image_crop_gdal function will round
    # # off the coordinates before it does the crops.
    # x1 -= 20; x1 = np.round(x1)
    # y1 -= 20; y1 = np.round(y1)
    # x2 -= 20; x2 = np.round(x2)
    # y2 -= 20; y2 = np.round(y2)
    # w1 += 40; w1 = np.round(w1)
    # h1 += 40; h1 = np.round(h1)
    # w2 += 40; w2 = np.round(w2)
    # h2 += 40; h2 = np.round(h2)

    # do the crops
    crop1 = common.image_qauto(common.image_crop_gdal(im1, x1, y1, w1, h1))
    crop2 = common.image_qauto(common.image_crop_gdal(im2, x2, y2, w2, h2))

    # compute matches coordinates in the cropped images
    pts1 = matches[:, 0:2] - [x1, y1]
    pts2 = matches[:, 2:4] - [x2, y2]

    # plot the matches on the two crops
    to_display = plot_matches_low_level(crop1, crop2, np.hstack((pts1, pts2)))
    if outfile is None:
        os.system('v %s &' % (to_display))
    else:
        common.run('cp %s %s' % (to_display, outfile))

    return
Ejemplo n.º 33
0
Archivo: s2p.py Proyecto: mnhrdt/s2p
def multidisparities_to_ply(tile):
    """
    Compute a point cloud from the disparity maps of N-pairs of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.

    # There is no guarantee that this function works with z!=1
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']

    rpc_ref = cfg['images'][0]['rpc']
    disp_list = list()
    rpc_list = list()

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    print('triangulating tile {} {}...'.format(x, y))
    n = len(cfg['images']) - 1
    for i in range(n):
        pair = 'pair_%d' % (i+1)
        H_ref = os.path.join(out_dir, pair, 'H_ref.txt')
        H_sec = os.path.join(out_dir, pair, 'H_sec.txt')
        disp = os.path.join(out_dir, pair, 'rectified_disp.tif')
        mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png')
        disp2D = os.path.join(out_dir, pair, 'disp2D.tif')
        rpc_sec = cfg['images'][i+1]['rpc']

        if os.path.exists(disp):
            # homography for warp
            T = common.matrix_translation(x, y)
            hom_ref = np.loadtxt(H_ref)
            hom_ref_shift = np.dot(hom_ref, T)

            # homography for 1D to 2D conversion
            hom_sec = np.loadtxt(H_sec)
            if cfg["use_global_pointing_for_geometric_triangulation"] is True:
                pointing = os.path.join(cfg['out_dir'], 'global_pointing_%s.txt' % pair)
                hom_pointing = np.loadtxt(pointing)
                hom_sec = np.dot(hom_sec,np.linalg.inv(hom_pointing))
            hom_sec_shift_inv = np.linalg.inv(hom_sec)

            h1 = " ".join(str(x) for x in hom_ref_shift.flatten())
            h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten())

            # relative disparity map to absolute disparity map
            tmp_abs = common.tmpfile('.tif')
            os.environ["PLAMBDA_GETPIXEL"] = "0"
            common.run('plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s' % (disp, mask_rect, tmp_abs))

            # 1d to 2d conversion
            tmp_1d_to_2d = common.tmpfile('.tif')
            common.run('plambda %s "%s 9 njoin x mprod" -o %s' % (tmp_abs, h2, tmp_1d_to_2d))

            # warp
            tmp_warp = common.tmpfile('.tif')
            common.run('homwarp -o 2 "%s" %d %d %s %s' % (h1, w, h, tmp_1d_to_2d, tmp_warp))

            # set masked value to NaN
            exp = 'y 0 = nan x if'
            common.run('plambda %s %s "%s" -o %s' % (tmp_warp, mask_orig, exp, disp2D))
            # disp2D contains positions in the secondary image

            # added input data for triangulation module
            disp_list.append(disp2D)
            rpc_list.append(rpc_sec)

            if cfg['clean_intermediate']:
                common.remove(H_ref)
                common.remove(H_sec)
                common.remove(disp)
                common.remove(mask_rect)
                common.remove(mask_orig)

    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(common.image_crop_gdal(cfg['images'][0]['img'], x, y,
                                                 w, h), colors)

    # compute the point cloud
    triangulation.multidisp_map_to_point_cloud(ply_file, disp_list, rpc_ref, rpc_list,
                                               colors,
                                               utm_zone=cfg['utm_zone'],
                                               llbbx=tuple(cfg['ll_bbx']),
                                               xybbx=(x, x+w, y, y+h))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(colors)
Ejemplo n.º 34
0
Archivo: s2p.py Proyecto: mnhrdt/s2p
def disparity_to_ply(tile):
    """
    Compute a point cloud from the disparity map of a pair of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']
    rpc1 = cfg['images'][0]['rpc']
    rpc2 = cfg['images'][1]['rpc']

    if os.path.exists(os.path.join(out_dir, 'stderr.log')):
        print('triangulation: stderr.log exists')
        print('pair_1 not processed on tile {} {}'.format(x, y))
        return

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    print('triangulating tile {} {}...'.format(x, y))
    # This function is only called when there is a single pair (pair_1)
    H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt')
    H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt')
    pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt')
    disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif')
    mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png')
    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    # prepare the image needed to colorize point cloud
    colors = os.path.join(out_dir, 'rectified_ref.png')
    if cfg['images'][0]['clr']:
        hom = np.loadtxt(H_ref)
        roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
        ww, hh = common.bounding_box2D(common.points_apply_homography(hom, roi))[2:]
        tmp = common.tmpfile('.tif')
        common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom,
                                      ww + 2*cfg['horizontal_margin'],
                                      hh + 2*cfg['vertical_margin'])
        common.image_qauto(tmp, colors)
    else:
        common.image_qauto(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors)

    # compute the point cloud
    triangulation.disp_map_to_point_cloud(ply_file, disp, mask_rect, rpc1, rpc2,
                                          H_ref, H_sec, pointing, colors,
                                          utm_zone=cfg['utm_zone'],
                                          llbbx=tuple(cfg['ll_bbx']),
                                          xybbx=(x, x+w, y, y+h),
                                          xymsk=mask_orig)

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(H_ref)
        common.remove(H_sec)
        common.remove(disp)
        common.remove(mask_rect)
        common.remove(mask_orig)
        common.remove(colors)
        common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
Ejemplo n.º 35
0
    # generate image
    print("Generating {} ...".format(out_img_file))

    # First get input image size
    sz = common.image_size_gdal(in_img_file)
    w = sz[0]
    h = sz[1]

    # Generate a temporary vrt file to have the proper geotransform
    fd, tmp_vrt = tempfile.mkstemp(suffix='.vrt',
                                   dir=os.path.dirname(out_img_file))

    os.close(fd)

    common.run('gdal_translate -of VRT -a_ullr 0 0 %d %d %s %s' %
               (w, h, in_img_file, tmp_vrt))

    common.run((
        'gdalwarp -co RPB=NO -co PROFILE=GeoTIFF -r %s -co "BIGTIFF=IF_NEEDED" -co "TILED=YES" -ovr NONE -overwrite -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -tr'
        ' %d %d %s %s') % (filt, scale_x, scale_y, tmp_vrt, out_img_file))

    try:
        # Remove aux files if any
        os.remove(out_img_file + ".aux.xml")
    except OSError:
        pass

    # Clean tmp vrt file
    os.remove(tmp_vrt)

    print("Done")