Exemple #1
0
def plot_vectors(p, v, x, y, w, h, f=1, out_file=None):
    """
    Plots vectors on an image, using gnuplot

    Args:
        p: points (origins of vectors),represented as a numpy Nx2 array
        v: vectors, represented as a numpy Nx2 array
        x, y, w, h: rectangular ROI
        f: (optional, default is 1) exageration factor
        out_file: (optional, default is None) path to the output file

    Returns:
        nothing, but opens a display or write a png file
    """
    tmp = common.tmpfile('.txt')
    data = np.hstack((p, v))
    np.savetxt(tmp, data, fmt='%6f')
    gp_string = 'set term png size %d,%d;unset key;unset tics;plot [%d:%d] [%d:%d] "%s" u($1):($2):(%d*$3):(%d*$4) w vectors head filled' % (w, h, x, x+w, y, y+h, tmp, f, f)

    if out_file is None:
        out_file = common.tmpfile('.png')

    common.run("gnuplot -p -e '%s' > %s" % (gp_string, out_file))
    print out_file

    if out_file is None:
        os.system("v %s &" % out_file)
Exemple #2
0
def loop_zhang(F, w, h):
    """
    Computes rectifying homographies from a fundamental matrix, with Loop-Zhang.

    Args:
        F: 3x3 numpy array containing the fundamental matrix
        w, h: images size. The two images are supposed to have same size

    Returns:
        The two rectifying homographies.

    The rectifying homographies are computed using the Pascal Monasse binary
    named rectify_mindistortion. It uses the Loop-Zhang algorithm.
    """
    Ffile = common.tmpfile('.txt')
    Haf = common.tmpfile('.txt')
    Hbf = common.tmpfile('.txt')
    common.matrix_write(Ffile, F)
    common.run('rectify_mindistortion %s %d %d %s %s > /dev/null' %
               (Ffile, w, h, Haf, Hbf))
    Ha = common.matrix_read(Haf, size=(3, 3))
    Hb = common.matrix_read(Hbf, size=(3, 3))

    # check if both the images are rotated
    a = does_this_homography_change_the_vertical_direction(Ha)
    b = does_this_homography_change_the_vertical_direction(Hb)
    if a and b:
        R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
        Ha = np.dot(R, Ha)
        Hb = np.dot(R, Hb)
    return Ha, Hb
Exemple #3
0
def loop_zhang(F, w, h):
    """
    Computes rectifying homographies from a fundamental matrix, with Loop-Zhang.

    Args:
        F: 3x3 numpy array containing the fundamental matrix
        w, h: images size. The two images are supposed to have same size

    Returns:
        The two rectifying homographies.

    The rectifying homographies are computed using the Pascal Monasse binary
    named rectify_mindistortion. It uses the Loop-Zhang algorithm.
    """
    Ffile = common.tmpfile('.txt')
    Haf = common.tmpfile('.txt')
    Hbf = common.tmpfile('.txt')
    common.matrix_write(Ffile, F)
    common.run('rectify_mindistortion %s %d %d %s %s > /dev/null' % (Ffile, w,
                                                                     h, Haf,
                                                                     Hbf))
    Ha = common.matrix_read(Haf, size=(3, 3))
    Hb = common.matrix_read(Hbf, size=(3, 3))

    # check if both the images are rotated
    a = does_this_homography_change_the_vertical_direction(Ha)
    b = does_this_homography_change_the_vertical_direction(Hb)
    if a and b:
        R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
        Ha = np.dot(R, Ha)
        Hb = np.dot(R, Hb)
    return Ha, Hb
Exemple #4
0
def cloud_water_image_domain(out, w, h, H, rpc, roi_gml=None, cld_gml=None):
    """
    Computes a mask for pixels masked by clouds, water, or out of image domain.

    Args:
        out: path to the output image file.
        w, h: (w, h) are the dimensions of the output image mask.
        H: 3x3 numpy array representing the homography that transforms the
            original full image into the rectified tile.
        rpc: paths to the xml file containing the rpc coefficients of the image.
            RPC model is used with SRTM data to derive the water mask.
        roi_gml (optional, default None): path to a gml file containing a mask
            defining the area contained in the full image.
        cld_gml (optional, default None): path to a gml file containing a mask
            defining the areas covered by clouds.

    Returns:
        True if the tile is completely masked, False otherwise.
    """
    # put the coefficients of the homography in a string
    hij = ' '.join(['%f' % x for x in H.flatten()])

    # image domain mask
    if roi_gml is None:  # initialize to 255
        common.run('plambda zero:%dx%d "x 255 +" -o %s' % (w, h, out))
    else:
        common.run('cldmask %d %d -h "%s" %s %s' % (w, h, hij, roi_gml, out))
        if common.is_image_black(out):  # if we are already out, return
            return True

    # cloud mask
    if cld_gml is not None:
        cld_msk = common.tmpfile('.png')
        common.run('cldmask %d %d -h "%s" %s %s' % (w, h, hij, cld_gml,
                                                    cld_msk))
        # cld msk has to be inverted.
        # TODO: add flag to the cldmask binary, to avoid using read/write the
        # msk one more time for this
        common.run('plambda %s "255 x -" -o %s' % (cld_msk, cld_msk))

        intersection(out, out, cld_msk)

    # water mask
    water_msk = common.tmpfile('.png')
    env = os.environ.copy()
    env['SRTM4_CACHE'] = cfg['srtm_dir']
    common.run('watermask %d %d -h "%s" %s %s' % (w, h, hij, rpc, water_msk),
               env)
    intersection(out, out, water_msk)

    return common.is_image_black(out)
Exemple #5
0
def filtered_sift_matches_roi(im1, im2, rpc1, rpc2, x, y, w, h,
        model='fundamental'):
    """
    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions of
            the rectangle.
        model (optional, default is 'fundamental'): model imposed by RANSAC
            when searching the set of inliers

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
            If no sift matches are found, then an exception is raised.

    The returned matches are the inliers of an epipolar model found with ransac.
    """
    # get sift matches
    matches = rectification.matches_from_sift_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)

    # filter outliers with ransac
    # the binary is from Enric's imscript
    # update: changed the ransac error tolerance used to determine whether or
    # not a point is compatible with a model, from 1 pix to .3 pix
    if len(matches) < 7:
        raise Exception("less than 7 matches")
    matches_file = common.tmpfile('.txt')
    np.savetxt(matches_file, matches)

    inliers_file = common.tmpfile('.txt')
    if model is 'fundamental':
        common.run("ransac fmn 1000 .3 7 %s < %s" % (inliers_file, matches_file))
    elif model is 'homography':
        common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (inliers_file,
            matches_file))
    elif model is 'hom_fund':
        common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (inliers_file,
            matches_file))
        common.run("ransac fmn 1000 .2 7 %s < %s" % (inliers_file, inliers_file))
    else:
        print "filtered_sift_matches_roi: bad value for argument 'model'"
    inliers = np.loadtxt(inliers_file)
    if not inliers.size:
        raise Exception("no inliers")

    return inliers
Exemple #6
0
def compute_dem(out, x, y, w, h, z, rpc1, rpc2, H1, H2, disp, mask, rpc_err,
                A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        z: zoom factor (usually 1, 2 or 4) used to produce the input disparity
            map
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): pointing correction matrix for im2

    Returns:
        nothing
    """
    out_dir = os.path.dirname(out)

    tmp = common.tmpfile('.tif')
    compute_height_map(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, z, out)
Exemple #7
0
def build_main_file(
    stack,
    program,
    backend,
    annotation=default_annotation,
    delegate_function=default_delegate_function,
    rebuild=True,
):
    if rebuild:
        common.buildNinjaTarget("skip_collect_annotations")

    mainFilePath = stack.enter_context(common.tmpfile(suffix=".sk")).name
    with open(mainFilePath, "w") as mainFile:
        cmd = (
            os.path.join(build_dir, "bin/skip_collect_annotations"),
            "--binding",
            "backend=" + ("native" if backend == "native" else "nonnative"),
            "--annotation",
            annotation,
            "--delegate",
            delegate_function,
            program,
        )
        returncode = subprocess.call(cmd,
                                     env=os.environ,
                                     stdout=mainFile,
                                     stderr=subprocess.PIPE)
        if returncode != 0:
            print(
                "Failed to load annotations - check the project file for `%s`."
                % (program))
            exit(1)
        mainFile.flush()

    return mainFilePath
Exemple #8
0
def image_keypoints(im, x, y, w, h, max_nb=None, extra_params=''):
    """
    Runs SIFT (the keypoints detection and description only, no matching).

    It uses Ives Rey Otero's implementation published in IPOL:
    http://www.ipol.im/pub/pre/82/

    Args:
        im: path to the input image
        max_nb (optional): maximal number of keypoints. If more keypoints are
            detected, those at smallest scales are discarded
        extra_params (optional): extra parameters to be passed to the sift
            binary

    Returns:
        path to the file containing the list of descriptors
    """
    keyfile = common.tmpfile('.txt')
    if max_nb:
        cmd = "sift_roi %s %d %d %d %d --max-nb-pts %d %s -o %s" % (im, x, y, w,
                                                                    h, max_nb,
                                                                    extra_params,
                                                                    keyfile)
    else:
        cmd = "sift_roi %s %d %d %d %d %s -o %s" % (im, x, y, w, h,
                                                    extra_params, keyfile)
    common.run(cmd)
    return keyfile
Exemple #9
0
def compute_height_map(rpc1,
                       rpc2,
                       H1,
                       H2,
                       disp,
                       mask,
                       height,
                       rpc_err,
                       A=None):
    """
    Computes a height map from a disparity map, using rpc.

    Args:
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        height: path to the output height map
        rpc_err: path to the output rpc_error of triangulation
        A (optional): pointing correction matrix for im2
    """
    if A is not None:
        HH2 = common.tmpfile('.txt')
        np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(A)))
    else:
        HH2 = H2

    common.run("disp_to_h %s %s %s %s %s %s %s %s" %
               (rpc1, rpc2, H1, HH2, disp, mask, height, rpc_err))
    return
Exemple #10
0
def image_keypoints(im, x, y, w, h, max_nb=None, extra_params=''):
    """
    Runs SIFT (the keypoints detection and description only, no matching).

    It uses Ives Rey Otero's implementation published in IPOL:
    http://www.ipol.im/pub/pre/82/

    Args:
        im: path to the input image
        max_nb (optional): maximal number of keypoints. If more keypoints are
            detected, those at smallest scales are discarded
        extra_params (optional): extra parameters to be passed to the sift
            binary

    Returns:
        path to the file containing the list of descriptors
    """
    keyfile = common.tmpfile('.txt')
    if max_nb:
        cmd = "sift_roi %s %d %d %d %d --max-nb-pts %d %s -o %s" % (
            im, x, y, w, h, max_nb, extra_params, keyfile)
    else:
        cmd = "sift_roi %s %d %d %d %d %s -o %s" % (im, x, y, w, h,
                                                    extra_params, keyfile)
    common.run(cmd)
    return keyfile
Exemple #11
0
def colorize(crop_panchro, im_color, x, y, zoom, out_colorized, rmin,rmax):
    """
    Colorizes a Pleiades gray crop using low-resolution color information.

    Args:
        crop_panchro: path to the panchro (ie gray) crop
        im_color: path to the full color image (tiff or jp2)
        x, y: coordinates of the top-left corner of crop_panchro, in the full
            Pleiade image frame.
        zoom: subsampling zoom-factor that was used to generate crop_panchro
        out_colorized: path to the output file
    """
    # get a translated and zoomed crop from the color image. It has to be
    # sampled on exactly the same grid as the panchro crop.
    # To do that we compose the translation + zoom transformation with a 4x
    # zoom (because color pleiades images have 4x lower resolution).  There is
    # also a small horizontal translation (4 pixels at the panchro resolution)
    # The resulting transformation is the composition of:
    #   translation (-1 - x/4, -y/4)
    #   zoom 4/z
    w, h = common.image_size_tiffinfo(crop_panchro)
    xx = np.floor(x / 4.0) 
    yy = np.floor(y / 4.0)
    ww = np.ceil((x + w * zoom) / 4.0) - xx 
    hh = np.ceil((y + h * zoom) / 4.0) - yy
    crop_ms = common.image_crop_TIFF(im_color, xx, yy, ww, hh)
    crop_ms = common.image_zoom_gdal(crop_ms, zoom/4.0)
    # crop_ms = common.image_safe_zoom_fft(crop_ms, zoom/4.0)

    # crop the crop_ms image to remove the extra-pixels due to the integer crop
    # followed by zoom
    x0 = max(0,x - 4*xx)
    y0 = max(0,y - 4*yy)
    crop_ms = common.image_crop_TIFF(crop_ms, x0, y0, w, h)
    assert(common.image_size_tiffinfo(crop_panchro) ==
           common.image_size_tiffinfo(crop_ms))

    # convert rgbi to rgb
    rgb = common.rgbi_to_rgb(crop_ms, out=None, tilewise=True)

    # blend intensity and color to obtain the result
    # each channel value r, g or b is multiplied by 3*y / (r+g+b), where y
    # denotes the panchro intensity
    tmp = common.tmpfile('.tif')
    pcmd = "dup split + + / * 3 *"
    os.environ['TMPDIR'] = os.path.join(cfg['temporary_dir'], 'meta/')
    cmd = 'tiffu meta \"plambda ^ ^1 \\\"%s\\\" -o @\" %s %s -- %s' % (pcmd,
                                                                      crop_panchro,
                                                                      rgb, tmp)
    common.run(cmd)
    if w * h > 25e6:  # image larger than 5000 x 5000 pixels
        common.image_qauto_otb(out_colorized, tmp)
    else:
        #common.image_qauto(tmp, out_colorized)
        common.image_rescaleintensities(tmp, out_colorized, rmin, rmax)
    return
Exemple #12
0
def colorize(crop_panchro, im_color, x, y, zoom, out_colorized, rmin, rmax):
    """
    Colorizes a Pleiades gray crop using low-resolution color information.

    Args:
        crop_panchro: path to the panchro (ie gray) crop
        im_color: path to the full color image (tiff or jp2)
        x, y: coordinates of the top-left corner of crop_panchro, in the full
            Pleiade image frame.
        zoom: subsampling zoom-factor that was used to generate crop_panchro
        out_colorized: path to the output file
    """
    # get a translated and zoomed crop from the color image. It has to be
    # sampled on exactly the same grid as the panchro crop.
    # To do that we compose the translation + zoom transformation with a 4x
    # zoom (because color pleiades images have 4x lower resolution).  There is
    # also a small horizontal translation (4 pixels at the panchro resolution)
    # The resulting transformation is the composition of:
    #   translation (-1 - x/4, -y/4)
    #   zoom 4/z
    w, h = common.image_size_tiffinfo(crop_panchro)
    xx = np.floor(x / 4.0)
    yy = np.floor(y / 4.0)
    ww = np.ceil((x + w * zoom) / 4.0) - xx
    hh = np.ceil((y + h * zoom) / 4.0) - yy
    crop_ms = common.image_crop_tif(im_color, xx, yy, ww, hh)
    crop_ms = common.image_zoom_gdal(crop_ms, zoom / 4.0)
    # crop_ms = common.image_safe_zoom_fft(crop_ms, zoom/4.0)

    # crop the crop_ms image to remove the extra-pixels due to the integer crop
    # followed by zoom
    x0 = max(0, x - 4 * xx)
    y0 = max(0, y - 4 * yy)
    crop_ms = common.image_crop_tif(crop_ms, x0, y0, w, h)
    assert (common.image_size_tiffinfo(crop_panchro) ==
            common.image_size_tiffinfo(crop_ms))

    # convert rgbi to rgb
    rgb = common.rgbi_to_rgb(crop_ms, out=None, tilewise=True)

    # blend intensity and color to obtain the result
    # each channel value r, g or b is multiplied by 3*y / (r+g+b), where y
    # denotes the panchro intensity
    tmp = common.tmpfile('.tif')
    pcmd = "dup split + + / * 3 *"
    os.environ['TMPDIR'] = os.path.join(cfg['temporary_dir'], 'meta/')
    cmd = 'tiffu meta \"plambda ^ ^1 \\\"%s\\\" -o @\" %s %s -- %s' % (
        pcmd, crop_panchro, rgb, tmp)
    common.run(cmd)
    if w * h > 25e6:  # image larger than 5000 x 5000 pixels
        common.image_qauto_otb(out_colorized, tmp)
    else:
        #common.image_qauto(tmp, out_colorized)
        common.image_rescaleintensities(tmp, out_colorized, rmin, rmax)
    return
Exemple #13
0
def plot_matches(im1, im2, matches):
    """
    Displays two images side by side with matches highlighted

    Args:
        im1, im2: paths to the two input images
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2)

    Returns:
        path to the resulting image, to be displayed
    """
    # load images
    img1 = piio.read(im1).astype(np.uint8)
    img2 = piio.read(im2).astype(np.uint8)

    # if images have more than 3 channels, keep only the first 3
    if img1.shape[2] > 3:
        img1 = img1[:, :, 0:3]
    if img2.shape[2] > 3:
        img2 = img2[:, :, 0:3]

    # build the output image
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    w = w1 + w2
    h = max(h1, h2)
    out = np.zeros((h, w, 3), np.uint8)
    out[:h1, :w1] = img1
    out[:h2, w1:w] = img2

    # define colors, according to min/max intensity values
    out_min = min(np.nanmin(img1), np.nanmin(img2))
    out_max = max(np.nanmax(img1), np.nanmax(img2))
    green = [out_min, out_max, out_min]
    blue = [out_min, out_min, out_max]

    # plot the matches
    for i in range(len(matches)):
        x1 = matches[i, 0]
        y1 = matches[i, 1]
        x2 = matches[i, 2] + w1
        y2 = matches[i, 3]
        # convert endpoints to int (nn interpolation)
        x1, y1, x2, y2 = np.round([x1, y1, x2, y2])
        plot_line(out, x1, y1, x2, y2, blue)
        out[y1, x1] = green
        out[y2, x2] = green

    # save the output image, and return its path
    outfile = common.tmpfile('.png')
    piio.write(outfile, out)
    return outfile
Exemple #14
0
def fundamental_matrix_ransac(matches, precision=1.0, return_inliers=False):
    """
    Estimates the fundamental matrix given a set of point correspondences
    between two images, using ransac.

    Arguments:
        matches: numpy 2D array of size Nx4 containing a list of pair of
            matching points. Each line is of the form x1, y1, x2, y2, where (x1,
            y1) is the point in the first view while (x2, y2) is the matching
            point in the second view.
            It can be the path to a txt file containing such an array.
        precision: optional parameter indicating the maximum error
            allowed for counting the inliers
        return_inliers: optional boolean flag to activate/deactivate inliers
            output

    Returns:
        the estimated fundamental matrix, and optionally the 2D array containing
        the inliers

    The algorithm uses ransac as a search engine.
    """
    if type(matches) is np.ndarray:
        # write a file containing the list of correspondences. The
        # expected format is a text file with one match per line: x1 y1 x2 y2
        matchfile = common.tmpfile('.txt')
        np.savetxt(matchfile, matches)
    else:
        # assume it is a path to a txt file containing the matches
        matchfile = matches

    # call ransac binary, from Enric's imscript
    inliers = common.tmpfile('.txt')
    Ffile = common.tmpfile('.txt')
    awk_command = "awk {\'printf(\"%e %e %e\\n%e %e %e\\n%e %e %e\", $3, $4, $5, $6, $7, $8, $9, $10, $11)\'}"
    common.run("ransac fmn 1000 %f 7 %s < %s | grep param | %s > %s" %
               (precision, inliers, matchfile, awk_command, Ffile))
    if return_inliers:
        return np.loadtxt(Ffile).transpose(), np.loadtxt(inliers)
    else:
        return np.loadtxt(Ffile).transpose()
Exemple #15
0
def fundamental_matrix_ransac(matches, precision=1.0, return_inliers=False):
    """
    Estimates the fundamental matrix given a set of point correspondences
    between two images, using ransac.

    Arguments:
        matches: numpy 2D array of size Nx4 containing a list of pair of
            matching points. Each line is of the form x1, y1, x2, y2, where (x1,
            y1) is the point in the first view while (x2, y2) is the matching
            point in the second view.
            It can be the path to a txt file containing such an array.
        precision: optional parameter indicating the maximum error
            allowed for counting the inliers
        return_inliers: optional boolean flag to activate/deactivate inliers
            output

    Returns:
        the estimated fundamental matrix, and optionally the 2D array containing
        the inliers

    The algorithm uses ransac as a search engine.
    """
    if type(matches) is np.ndarray:
        # write a file containing the list of correspondences. The
        # expected format is a text file with one match per line: x1 y1 x2 y2
        matchfile = common.tmpfile('.txt')
        np.savetxt(matchfile, matches)
    else:
        # assume it is a path to a txt file containing the matches
        matchfile = matches

    # call ransac binary, from Enric's imscript
    inliers = common.tmpfile('.txt')
    Ffile = common.tmpfile('.txt')
    awk_command = "awk {\'printf(\"%e %e %e\\n%e %e %e\\n%e %e %e\", $3, $4, $5, $6, $7, $8, $9, $10, $11)\'}"
    common.run("ransac fmn 1000 %f 7 %s < %s | grep param | %s > %s" % (precision, inliers, matchfile, awk_command, Ffile))
    if return_inliers:
        return np.loadtxt(Ffile).transpose(), np.loadtxt(inliers)
    else:
        return np.loadtxt(Ffile).transpose()
Exemple #16
0
def keypoints_match(k1,
                    k2,
                    method='relative',
                    sift_thresh=0.6,
                    F=None,
                    model=None):
    """
    Find matches among two lists of sift keypoints.

    Args:
        k1, k2: paths to text files containing the lists of sift descriptors
        method (optional, default is 'relative'): flag ('relative' or
            'absolute') indicating wether to use absolute distance or relative
            distance
        sift_thresh (optional, default is 0.6): threshold for distance between SIFT
            descriptors. These descriptors are 128-vectors, whose coefficients
            range from 0 to 255, thus with absolute distance a reasonable value
            for this threshold is between 200 and 300. With relative distance
            (ie ratio between distance to nearest and distance to second
            nearest), the commonly used value for the threshold is 0.6.
        F (optional): affine fundamental matrix
        model (optional, default is None): model imposed by RANSAC when
            searching the set of inliers. If None all matches are considered as
            inliers.

    Returns:
        a numpy 2D array containing the list of inliers matches.
    """
    # compute matches
    mfile = common.tmpfile('.txt')
    cmd = "matching %s %s -%s %f -o %s" % (k1, k2, method, sift_thresh, mfile)
    if F is not None:
        fij = ' '.join(
            str(x) for x in [F[0, 2], F[1, 2], F[2, 0], F[2, 1], F[2, 2]])
        cmd = "%s -f \"%s\"" % (cmd, fij)
    common.run(cmd)

    # filter outliers with ransac
    if model == 'fundamental':
        common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
    if model is 'homography':
        common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" %
                   (mfile, mfile))
    if model is 'hom_fund':
        common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" %
                   (mfile, mfile))
        common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))

    # return numpy array of matches
    return np.loadtxt(mfile)
Exemple #17
0
def crop_rpc_and_image(out_dir, img, rpc, rpc_ref, x, y, w, h):
    """
    Crops an image and its rpc. The ROI may be defined on another image.

    Args:
        out_dir: path to the output directory. The cropped image and rpc files
            will be written there.
        img: path to the input image
        rpc: path to the input rpc
        rpc_ref: path to the rpc file of the reference image
        x, y, w, h: 4 integers defining a rectangular ROI in the reference
            image
    """
    r = rpc_model.RPCModel(rpc)

    # recompute the roi if the input image is not the reference image
    if rpc_ref is not rpc:
        r_ref = rpc_model.RPCModel(rpc_ref)
        x, y, w, h = rpc_utils.corresponding_roi(r_ref, r, x, y, w, h)

    # output filenames
    crop_rpc_and_image.counter += 1
    s = "_%02d" % crop_rpc_and_image.counter
    out_img_file = os.path.join(out_dir, "img%s.tif" % s)
    out_rpc_file = os.path.join(out_dir, "rpc%s.xml" % s)
    out_prv_file = os.path.join(out_dir, "prv%s.png" % s)

    # do the crop
    out_r = rpc_apply_crop_to_rpc_model(r, x, y, w, h)
    out_r.write(out_rpc_file)
    common.run('gdal_translate -srcwin %d %d %d %d "%s" "%s"' %
               (x, y, w, h, img, out_img_file))

    # do the preview: it has to fit a 1366x768 rectangle
    w = float(w)
    h = float(h)
    if w > 1366 or h > 768:
        if w / h > float(1366) / 768:
            f = w / 1366
        else:
            f = h / 768
        tmp = common.tmpfile('.tif')
        common.image_zoom_gdal(out_img_file, f, tmp, w, h)
        common.run('gdal_translate -of png -ot Byte -scale %s %s' %
                   (tmp, out_prv_file))
    else:
        common.run('gdal_translate -of png -ot Byte -scale %s %s' %
                   (out_img_file, out_prv_file))
    common.run('rm %s.aux.xml' % out_prv_file)
Exemple #18
0
def register_heights(im1, im2):
    """
    Affine registration of heights.

    Args:
        im1: first height map
        im2: second height map, to be registered on the first one

    Returns
        path to the registered second height map
    """
    # remove high frequencies with a morphological zoom out
    im1_low_freq = common.image_zoom_out_morpho(im1, 4)
    im2_low_freq = common.image_zoom_out_morpho(im2, 4)

    # first read the images and store them as numpy 1D arrays, removing all the
    # nans and inf
    i1 = piio.read(im1_low_freq).ravel() #np.ravel() gives a 1D view
    i2 = piio.read(im2_low_freq).ravel()
    ind = np.logical_and(np.isfinite(i1), np.isfinite(i2))
    h1 = i1[ind]
    h2 = i2[ind]

    # for debug
    print np.shape(i1)
    print np.shape(h1)

#    # 1st option: affine
#    # we search the (u, v) vector that minimizes the following sum (over
#    # all the pixels):
#    #\sum (im1[i] - (u*im2[i]+v))^2
#    # it is a least squares minimization problem
#    A = np.vstack((h2, h2*0+1)).T
#    b = h1
#    z = np.linalg.lstsq(A, b)[0]
#    u = z[0]
#    v = z[1]
#
#    # apply the affine transform and return the modified im2
#    out = common.tmpfile('.tif')
#    common.run('plambda %s "x %f * %f +" > %s' % (im2, u, v, out))

    # 2nd option: translation only
    v = np.mean(h1 - h2)
    out = common.tmpfile('.tif')
    common.run('plambda %s "x %f +" -o %s' % (im2, v, out))

    return out
Exemple #19
0
def register_heights(im1, im2):
    """
    Affine registration of heights.

    Args:
        im1: first height map
        im2: second height map, to be registered on the first one

    Returns
        path to the registered second height map
    """
    # remove high frequencies with a morphological zoom out
    im1_low_freq = common.image_zoom_out_morpho(im1, 4)
    im2_low_freq = common.image_zoom_out_morpho(im2, 4)

    # first read the images and store them as numpy 1D arrays, removing all the
    # nans and inf
    i1 = piio.read(im1_low_freq).ravel() #np.ravel() gives a 1D view
    i2 = piio.read(im2_low_freq).ravel()
    ind = np.logical_and(np.isfinite(i1), np.isfinite(i2))
    h1 = i1[ind]
    h2 = i2[ind]

    # for debug
    print np.shape(i1)
    print np.shape(h1)

#    # 1st option: affine
#    # we search the (u, v) vector that minimizes the following sum (over
#    # all the pixels):
#    #\sum (im1[i] - (u*im2[i]+v))^2
#    # it is a least squares minimization problem
#    A = np.vstack((h2, h2*0+1)).T
#    b = h1
#    z = np.linalg.lstsq(A, b)[0]
#    u = z[0]
#    v = z[1]
#
#    # apply the affine transform and return the modified im2
#    out = common.tmpfile('.tif')
#    common.run('plambda %s "x %f * %f +" > %s' % (im2, u, v, out))

    # 2nd option: translation only
    v = np.mean(h1 - h2)
    out = common.tmpfile('.tif')
    common.run('plambda %s "x %f +" -o %s' % (im2, v, out))

    return out
Exemple #20
0
def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None,
                    model=None):
    """
    Find matches among two lists of sift keypoints.

    Args:
        k1, k2: paths to text files containing the lists of sift descriptors
        method (optional, default is 'relative'): flag ('relative' or
            'absolute') indicating wether to use absolute distance or relative
            distance
        sift_thresh (optional, default is 0.6): threshold for distance between SIFT
            descriptors. These descriptors are 128-vectors, whose coefficients
            range from 0 to 255, thus with absolute distance a reasonable value
            for this threshold is between 200 and 300. With relative distance
            (ie ratio between distance to nearest and distance to second
            nearest), the commonly used value for the threshold is 0.6.
        F (optional): affine fundamental matrix
        model (optional, default is None): model imposed by RANSAC when
            searching the set of inliers. If None all matches are considered as
            inliers.

    Returns:
        a numpy 2D array containing the list of inliers matches.
    """
    # compute matches
    mfile = common.tmpfile('.txt')
    cmd = "matching %s %s -%s %f -o %s" % (k1, k2, method, sift_thresh, mfile)
    if F is not None:
        fij = ' '.join(str(x) for x in [F[0, 2], F[1, 2], F[2, 0],
                                        F[2, 1], F[2, 2]])
        cmd = "%s -f \"%s\"" % (cmd, fij)
    common.run(cmd)

    # filter outliers with ransac
    if model == 'fundamental':
        common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
    if model is 'homography':
        common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (mfile,
                                                                        mfile))
    if model is 'hom_fund':
        common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (mfile,
                                                                        mfile))
        common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))

    # return numpy array of matches
    return np.loadtxt(mfile)
Exemple #21
0
def compute_dem(out, x, y, w, h, z, rpc1, rpc2, H1, H2, disp, mask, rpc_err,
                A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        z: zoom factor (usually 1, 2 or 4) used to produce the input disparity
            map
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): pointing correction matrix for im2

    Returns:
        nothing
    """
    out_dir = os.path.dirname(out)

    # redirect stdout and stderr to log file, in append mode
    if not cfg['debug']:
        fout = open('%s/stdout.log' % out_dir, 'a', 0)  # '0' for no buffering
        sys.stdout = fout
        sys.stderr = fout

    tmp = common.tmpfile('.tif')
    compute_height_map(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, z, out)

    # close logs
    common.garbage_cleanup()
    if not cfg['debug']:
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
        fout.close()
Exemple #22
0
def compute_dem(out,
                x,
                y,
                w,
                h,
                z,
                rpc1,
                rpc2,
                H1,
                H2,
                disp,
                mask,
                rpc_err,
                A=None):
    """
    Computes an altitude map, on the grid of the original reference image, from
    a disparity map given on the grid of the rectified reference image.

    Args:
        out: path to the output file
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        z: zoom factor (usually 1, 2 or 4) used to produce the input disparity
            map
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        rpc_err: path to the output rpc_error of triangulation
        A (optional): pointing correction matrix for im2

    Returns:
        nothing
    """
    out_dir = os.path.dirname(out)

    tmp = common.tmpfile('.tif')
    compute_height_map(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A)
    transfer_map(tmp, H1, x, y, w, h, z, out)
Exemple #23
0
def compute_height_map(rpc1, rpc2, H1, H2, disp, mask, height, rpc_err, A=None):
    """
    Computes a height map from a disparity map, using rpc.

    Args:
        rpc1, rpc2: paths to the xml files
        H1, H2: path to txt files containing two 3x3 numpy arrays defining
            the rectifying homographies
        disp, mask: paths to the diparity and mask maps
        height: path to the output height map
        rpc_err: path to the output rpc_error of triangulation
        A (optional): pointing correction matrix for im2
    """
    if A is not None:
        HH2 = common.tmpfile('.txt')
        np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(A)))
    else:
        HH2 = H2

    common.run("disp_to_h %s %s %s %s %s %s %s %s" % (rpc1, rpc2, H1, HH2, disp,
                                                      mask, height, rpc_err))
    return
Exemple #24
0
def mosaic(fout, w, h, list_tiles, tw, th, ov):
    """
    Compose several tiles of the same size into a bigger image.

    Args:
        fout: path to the output image
        w, h: output image dimensions
        list_tiles: list containing paths to the input tiles
        tw, th: dimensions of a tile (they must all have the same dimensions)
        ov: overlap between tiles (in pixels)

    Returns:
        nothing
    """
    N = len(list_tiles)
    ntx = np.ceil(float(w - ov) / (tw - ov)).astype(int)
    nty = np.ceil(float(h - ov) / (th - ov)).astype(int)
    assert(ntx * nty == N)

    # default numpy datatype is float64, useless as the ouput file will be
    # stored with float32
    out = np.zeros([h, w], dtype=np.float32)
    count = np.zeros([h, w], dtype=np.uint8)

    # loop over all the tiles
    for j in range(nty):
        for i in range(ntx):
            sys.stdout.write("\tPasting tile %02d %02d\r" % (j, i))
            sys.stdout.flush()
            # top-left and bottom-right corners of the tile in the output full
            # image
            x0 = i * (tw - ov)
            y0 = j * (th - ov)
            x1 = min(x0 + tw, w)
            y1 = min(y0 + th, h)

            # read the tile with piio. If the tile has not been produced,
            # nothing needs to be done. The corresponding pixels will get the
            # value 'nan' in the output full image.
            tile_fname = list_tiles[j * ntx + i]
            if os.path.isfile(tile_fname):
                tile = piio.read(tile_fname).astype(np.float32)[:, :, 0]
                assert(np.shape(tile) == (th, tw))

                # count the pixels different from nan and inf
                ind = np.isfinite(tile)
                count[y0:y1, x0:x1] += ind[:y1 - y0, :x1 - x0]

                # replace nan and inf with zeros, then add the tile to the
                # output. ~ind is the negation of ind
                tile[~ind] = 0
                out[y0:y1, x0:x1] += tile[:y1 - y0, :x1 - x0]

    # free mem
    if 'tile' in locals():
        del tile
    if 'ind' in locals():
        del ind
    gc.collect()

    sys.stdout.write('\n')
    # put nan where count is zero, and take the average where count is nonzero.
    sys.stdout.write('\tCounting...\n')
    sys.stdout.flush()
    ind = (count > 0)

    sys.stdout.write('\tAveraging...\n')
    sys.stdout.flush()
    out[ind] /= count[ind]

    sys.stdout.write('\tPutting nans on empty pixels...\n')
    sys.stdout.flush()
    out[~ind] = np.nan

    del count

    # saving the 'out' numpy array in TIFF with piio requires to much memory
    # (something like twice de file size) because tiled tiff image writing is
    # not implemented yet in iio.
    # As an alternative, the numpy array is stored in raw and the libtiff util
    # 'raw2tiff' is used to produce a tiff file from it.
    sys.stdout.write('\twriting raw data to disk...\n')
    raw_file = common.tmpfile('')
    out.tofile(raw_file)
    common.run('raw2tiff -w %d -l %d -d float -c zip %s %s' % (w, h, raw_file,
                                                               fout))
Exemple #25
0
def compute_disparity_map(im1, im2, out_disp, out_mask, algo, disp_min, disp_max, extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        out_disp: path to the output diparity map
        out_mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08', 'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm', and 'msmw'
        disp_min : smallest disparity to consider
        disp_max : biggest disparity to consider
        extra_params: optional string with algorithm-dependent parameters
    """
    # call the block_matching binary
    if (algo == 'hirschmuller02'):
        bm_binary = hirschmuller02
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if (algo == 'hirschmuller08'):
        bm_binary = hirschmuller08
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'hirschmuller08_laplacian'):
        bm_binary = hirschmuller08_laplacian
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'hirschmuller08_cauchy'):
        bm_binary = hirschmuller08_cauchy
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'sgbm'):
        bm_binary = sgbm
        out_cost = common.tmpfile('.tif')
        common.run("%s %s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_cost, out_mask, disp_min, disp_max, extra_params))

    if (algo == 'tvl1'):
        bm_binary = tvl1
        common.run("%s %s %s %s %s" %(bm_binary, im1, im2, out_disp,
            out_mask))

    if (algo == 'msmw'):
        bm_binary = msmw
        common.run("%s -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m %d -M %d %s %s %s %s" %(bm_binary,
            disp_min, disp_max, im1, im2, out_disp, out_mask))

    if (algo == 'msmw2'):
        bm_binary = msmw2
        common.run("%s -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m %d -M %d %s %s %s %s" % (bm_binary,
            disp_min, disp_max, im1, im2, out_disp, out_mask))

    if (algo == 'mgm'):
        env = os.environ.copy()
        env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])
        env['MEDIAN'] = '1'
        env['CENSUS_NCC_WIN'] = '5'
        env['TSGM'] = '3'
        common.run("%s -r %d -R %d -s vfit -t census -O 8 %s %s %s" % (mgm,
                                                                       disp_min,
                                                                       disp_max,
                                                                       im1, im2,
                                                                       out_disp),
                  env)

        # produce the mask: rejected pixels are marked with nan of inf in disp
        # map
        common.run('plambda %s "isfinite" -o %s' % (out_disp, out_mask))

    if (algo == 'micmac'):
        # add micmac binaries to the PATH environment variable
        s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
        micmac_bin = os.path.join(s2p_dir, '3rdparty', 'micmac', 'bin')
        os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin

        # prepare micmac xml params file
        micmac_params = os.path.join(s2p_dir, 'data', 'micmac_params.xml')
        work_dir = os.path.dirname(os.path.abspath(im1))
        common.run('cp %s %s' % (micmac_params, work_dir))

        # run MICMAC
        common.run("MICMAC %s" % os.path.join(work_dir, 'micmac_params.xml'))

        # copy output disp map
        micmac_disp = os.path.join(work_dir, 'MEC-EPI',
                                 'Px1_Num6_DeZoom1_LeChantier.tif')
        out_disp = os.path.join(work_dir, 'rectified_disp.tif')
        common.run('cp %s %s' % (micmac_disp, out_disp))

        # compute mask by rejecting the 10% of pixels with lowest correlation score
        micmac_cost = os.path.join(work_dir, 'MEC-EPI',
                                 'Correl_LeChantier_Num_5.tif')
        out_mask = os.path.join(work_dir, 'rectified_mask.png')
        common.run('plambda %s "x x%%q10 < 0 255 if" -o %s' % (micmac_cost,
                                                               out_mask))
Exemple #26
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None):
    """
    Rectify a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        out1, out2: paths to the output crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.

        This function uses the parameter subsampling_factor from the config module.
        If the factor z > 1 then the output images will be subsampled by a factor z.
        The output matrices H1, H2, and the ranges are also updated accordingly:
        Hi = Z*Hi   with Z = diag(1/z,1/z,1)   and
        disp_min = disp_min/z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
            have been applied to the two (big) images.
        disp_min, disp_max: horizontal disparity range
    """

    # compute rectifying homographies
    H1, H2, disp_min, disp_max = compute_rectification_homographies(im1, im2,
        rpc1, rpc2, x, y, w, h, A)

    ## compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    #x0,y0,w0,h0 = x,y,w,h

    # check that the first homography maps the ROI in the positive quadrant
    assert (round(x0) == 0)
    assert (round(y0) == 0)

    z = cfg['subsampling_factor']

    # apply homographies and do the crops
    # THIS STEP IS HERE TO PRODUCE THE MASKS WHERE THE IMAGE IS KNOWN
    # SURE THIS IS A CRAPPY WAY TO DO THIS, WE SHOULD DEFINITIVELY DO IT
    # SIMULTANEOUSLY WITH THE HOMOGRAPHIC TRANSFORMATION
    msk1 = common.tmpfile('.png')
    msk2 = common.tmpfile('.png')
    common.run('plambda %s "x 255" -o %s' % (im1, msk1))
    common.run('plambda %s "x 255" -o %s' % (im2, msk2))
    homography_cropper.crop_and_apply_homography(msk1, msk1, H1, w0, h0, z)
    homography_cropper.crop_and_apply_homography(msk2, msk2, H2, w0, h0, z)
    # FINALLY : apply homographies and do the crops of the images
    homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, z)
    homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, z)
    # COMBINE THE MASK TO REMOVE THE POINTS THAT FALL OUTSIDE THE IMAGE
    common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk1, out1, out1))
    common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk2, out2, out2))

#    This also does the job but when z != 1 it fails (segfault: homography)
#    TODO: FIX homography, maybe code a new one
#    common.image_apply_homography(out1, im1, H1, w0, h0)
#    common.image_apply_homography(out2, im2, H2, w0, h0)

    #  If subsampling_factor the homographies are altered to reflect the zoom
    if z != 1:
        from math import floor, ceil
        # update the H1 and H2 to reflect the zoom
        Z = np.eye(3);
        Z[0,0] = Z[1,1] = 1.0 / z

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_min = floor(disp_min / z)
        disp_max = ceil(disp_max / z)
        w0 = w0 / subsampling_factor
        h0 = h0 / subsampling_factor

    return H1, H2, disp_min, disp_max
def compile(stack, args):
    if not args.preamble:
        args.preamble = os.path.join(binary_dir,
                                     'runtime/native/lib/preamble.ll')

    CC = common.cmakeCacheGet('CMAKE_CXX_COMPILER')
    CFLAGS = tuple(
        subprocess.check_output(
            ('pkg-config', '--cflags',
             os.path.join(binary_dir,
                          'runtime/native/native_cc.pc'))).strip().split(' '))
    LIBS = tuple(
        subprocess.check_output(
            ('pkg-config', '--libs',
             os.path.join(binary_dir,
                          'runtime/native/native_cc.pc'))).strip().split(' '))

    # Run skip_to_native to generate our .o file
    objFile = stack.enter_context(common.tmpfile('tmp.gen_object.', '.o'))

    SKFLAGS = tuple(
        filter(
            # Because for some reason gcc decided that linker flags should start
            # with the same prefix as warnings.
            lambda x: not x.startswith('-Wl,'),
            filter(lambda x: x.startswith(('-m', '-f', '-W', '-g', '-O')),
                   CFLAGS)))

    PROFILE_FLAGS = ('--profile', args.profile) if args.profile else ()

    PARALLEL_FLAGS = (('--parallel',
                       str(args.parallel)) if args.parallel is not None else
                      ())

    PRINT_SKIP_TO_LLVM = (
        '--print-skip-to-llvm', ) if args.print_skip_to_llvm else ()

    cmd = (
        os.path.join(source_dir, 'runtime/tools/skip_to_native'),
        '--preamble',
        args.preamble,
        '--output',
        objFile.name,
        '--via-backend',
        args.via_backend,
    ) + PROFILE_FLAGS + PARALLEL_FLAGS + tuple(
        args.srcs) + SKFLAGS + PRINT_SKIP_TO_LLVM

    logger.debug('Running: ' + ' '.join(map(pipes.quote, cmd)))
    common.callHelper(cmd)

    # do not continue compilation if we are just printing skip_to_llvm
    if args.print_skip_to_llvm:
        exit(0)

    # Compile the .o into the final binary
    binFile = stack.enter_context(common.tmpfile('tmp.gen_binary.', ''))
    binFile.close()

    # For each file that we're compiling look to see if it has an associated
    # .cpp file.  For a directory the associated .cpp is named testhelper.cpp.
    # For a file.sk the associated .cpp is named file_testhelper.cpp.
    def testCpp(x):
        if os.path.isdir(x):
            return os.path.join(x, 'testhelper.cpp')
        else:
            return os.path.splitext(x)[0] + '_testhelper.cpp'

    cppSrcs = tuple(
        filter(lambda x: os.path.isfile(x), map(testCpp, args.srcs)))
    sk_standalone = (args.sk_standalone or os.path.join(
        source_dir, 'runtime/native/src/sk_standalone.cpp'))
    cmd = (CC, '-o', binFile.name, '-g', sk_standalone,
           objFile.name) + cppSrcs + CFLAGS + LIBS
    logger.debug('Running: ' + ' '.join(map(pipes.quote, cmd)))

    with common.PerfTimer('clang.runtime'):
        common.callHelper(cmd)

    common.logPerfData('binary_size', ['skip_compiler'],
                       os.path.getsize(binFile.name))

    # if we want to create a named executable at the location of args.output
    if args.output:
        shutil.copy(os.path.join(source_dir, binFile.name), args.output)

    return binFile
Exemple #28
0
def mosaic(fout, w, h, list_tiles, tw, th, ov):
    """
    Compose several tiles of the same size into a bigger image.

    Args:
        fout: path to the output image
        w, h: output image dimensions
        list_tiles: list containing paths to the input tiles
        tw, th: dimensions of a tile (they must all have the same dimensions)
        ov: overlap between tiles (in pixels)

    Returns:
        nothing
    """
    N = len(list_tiles)
    ntx = np.ceil(float(w - ov) / (tw - ov)).astype(int)
    nty = np.ceil(float(h - ov) / (th - ov)).astype(int)
    assert (ntx * nty == N)

    # default numpy datatype is float64, useless as the ouput file will be
    # stored with float32
    out = np.zeros([h, w], dtype=np.float32)
    count = np.zeros([h, w], dtype=np.uint8)

    # loop over all the tiles
    for j in range(nty):
        for i in range(ntx):
            sys.stdout.write("\tPasting tile %02d %02d\r" % (j, i))
            sys.stdout.flush()
            # top-left and bottom-right corners of the tile in the output full
            # image
            x0 = i * (tw - ov)
            y0 = j * (th - ov)
            x1 = min(x0 + tw, w)
            y1 = min(y0 + th, h)

            # read the tile with piio. If the tile has not been produced,
            # nothing needs to be done. The corresponding pixels will get the
            # value 'nan' in the output full image.
            tile_fname = list_tiles[j * ntx + i]
            if os.path.isfile(tile_fname):
                tile = piio.read(tile_fname).astype(np.float32)[:, :, 0]
                assert (np.shape(tile) == (th, tw))

                # count the pixels different from nan and inf
                ind = np.isfinite(tile)
                count[y0:y1, x0:x1] += ind[:y1 - y0, :x1 - x0]

                # replace nan and inf with zeros, then add the tile to the
                # output. ~ind is the negation of ind
                tile[~ind] = 0
                out[y0:y1, x0:x1] += tile[:y1 - y0, :x1 - x0]

    # free mem
    if 'tile' in locals():
        del tile
    if 'ind' in locals():
        del ind
    gc.collect()

    sys.stdout.write('\n')
    # put nan where count is zero, and take the average where count is nonzero.
    sys.stdout.write('\tCounting...\n')
    sys.stdout.flush()
    ind = (count > 0)

    sys.stdout.write('\tAveraging...\n')
    sys.stdout.flush()
    out[ind] /= count[ind]

    sys.stdout.write('\tPutting nans on empty pixels...\n')
    sys.stdout.flush()
    out[~ind] = np.nan

    del count

    # saving the 'out' numpy array in TIFF with piio requires to much memory
    # (something like twice de file size) because tiled tiff image writing is
    # not implemented yet in iio.
    # As an alternative, the numpy array is stored in raw and the libtiff util
    # 'raw2tiff' is used to produce a tiff file from it.
    sys.stdout.write('\twriting raw data to disk...\n')
    raw_file = common.tmpfile('')
    out.tofile(raw_file)
    common.run('raw2tiff -w %d -l %d -d float -c zip %s %s' %
               (w, h, raw_file, fout))
Exemple #29
0
def compute_disparity_map(im1, im2, out_disp, out_mask, algo, disp_min, disp_max, extra_params=''):
    """
    Runs a block-matching binary on a pair of stereo-rectified images.

    Args:
        im1, im2: rectified stereo pair
        out_disp: path to the output diparity map
        out_mask: path to the output rejection mask
        algo: string used to indicate the desired binary. Currently it can be
            one among 'hirschmuller02', 'hirschmuller08', 'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm', and 'msmw'
        disp_min : smallest disparity to consider
        disp_max : biggest disparity to consider
        extra_params: optional string with algorithm-dependent parameters
    """
    # call the block_matching binary
    if (algo == 'hirschmuller02'):
        bm_binary = hirschmuller02
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: LoG(0) regionRadius(3)
        #    LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
        #    regionRadius: radius of the window

    if (algo == 'hirschmuller08'):
        bm_binary = hirschmuller08
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'hirschmuller08_laplacian'):
        bm_binary = hirschmuller08_laplacian
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'hirschmuller08_cauchy'):
        bm_binary = hirschmuller08_cauchy
        common.run("%s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_mask, disp_min, disp_max, extra_params))
        # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
        #    regionRadius: radius of the window
        #    P1, P2 : regularization parameters
        #    LRdiff: maximum difference between left and right disparity maps

    if (algo == 'sgbm'):
        bm_binary = sgbm
        out_cost = common.tmpfile('tif')
        common.run("%s %s %s %s %s %s %d %d %s" %(bm_binary, im1, im2, out_disp,
            out_cost, out_mask, disp_min, disp_max, extra_params))

    if (algo == 'tvl1'):
        bm_binary = tvl1
        common.run("%s %s %s %s %s" %(bm_binary, im1, im2, out_disp,
            out_mask))

    if (algo == 'msmw'):
        bm_binary = msmw
        common.run("%s -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m %d -M %d %s %s %s %s" %(bm_binary,
            disp_min, disp_max, im1, im2, out_disp, out_mask))

    if (algo == 'msmw2'):
        bm_binary = msmw2
        common.run("%s -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m %d -M %d %s %s %s %s" % (bm_binary,
            disp_min, disp_max, im1, im2, out_disp, out_mask))