예제 #1
0
def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use ransac to estimate F from a set of sift matches, then use
    # loop-zhang to estimate rectifying homographies.

    matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2]))
    H1, H2 = estimation.loop_zhang(F, w, h)

    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on sift matches ----------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI in the origin
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    H2 = register_horizontally(matches, H1, H2)
    disp_m, disp_M = update_disp_range(matches, H1, H2, w, h)

    return H1, H2, disp_m, disp_M
예제 #2
0
def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use ransac to estimate F from a set of sift matches, then use
    # loop-zhang to estimate rectifying homographies.

    matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2]))
    H1, H2 = estimation.loop_zhang(F, w, h)

    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on sift matches ----------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI in the origin
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    H2 = register_horizontally(matches, H1, H2)
    disp_m, disp_M = update_disp_range(matches, H1, H2, w, h)

    return H1, H2, disp_m, disp_M
예제 #3
0
def evaluation_from_estimated_F(im1,
                                im2,
                                rpc1,
                                rpc2,
                                x,
                                y,
                                w,
                                h,
                                A=None,
                                matches=None):
    """
    Measures the pointing error on a Pleiades' pair of images, affine approx.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels, and computed from an
        approximated fundamental matrix.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # estimate the fundamental matrix between the two views
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # compute the mean displacement from epipolar lines
    d_sum = 0
    for i in range(len(p1)):
        x = np.array([p1[i, 0], p1[i, 1], 1])
        xx = np.array([p2[i, 0], p2[i, 1], 1])
        ll = F.dot(x)
        #d = np.sign(xx.dot(ll)) * evaluation.distance_point_to_line(xx, ll)
        d = evaluation.distance_point_to_line(xx, ll)
        d_sum += d
    return d_sum / len(p1)
예제 #4
0
def evaluation_iterative(im1,
                         im2,
                         rpc1,
                         rpc2,
                         x,
                         y,
                         w,
                         h,
                         A=None,
                         matches=None):
    """
    Measures the maximal pointing error on a Pleiades' pair of images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # compute the pointing error for each match
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    x2 = p2[:, 0]
    y2 = p2[:, 1]
    e = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)[1]
    # matches = matches[e < 0.1, :]
    # visualisation.plot_matches_pleiades(im1, im2, matches)
    print "max, mean, min pointing error, from %d points:" % (len(matches))
    print np.max(e), np.mean(e), np.min(e)

    # return the mean error
    return np.mean(np.abs(e))
예제 #5
0
def compute_correction(img1, rpc1, img2, rpc2, x, y, w, h,
                       filter_matches='fundamental'):
    """
    Computes pointing correction matrix for specific ROI

    Args:
        img1: path to the reference image.
        rpc1: paths to the xml file containing the rpc coefficients of the
            reference image
        img2: path to the secondary image.
        rpc2: paths to the xml file containing the rpc coefficients of the
            secondary image
        x, y, w, h: four integers defining the rectangular ROI in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle. The ROI may be as big as you want. If bigger than
            1 Mpix, only five crops will be used to compute sift matches.
        filter_matches (optional, default is 'fundamental'): model imposed by
            RANSAC when searching the set of inliers

    Returns:
        a 3x3 matrix representing the planar transformation to apply to img2 in
        order to correct the pointing error, and the list of sift matches used
        to compute this correction.
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    try:
        if w*h < 2e6:
            m = sift.matches_on_rpc_roi(img1, img2, r1, r2, x, y, w, h)
        else:
            m = filtered_sift_matches_full_img(img1, img2, r1, r2,
                                               cfg['pointing_correction_rois_mode'],
                                               None, 1000, x, y, w, h,
                                               model=filter_matches)
    except Exception as e:
        print e
        print "WARNING: pointing_accuracy.compute_correction: no sift matches."
        m = None

    # A = optimize_pair(img1, img2, r1, r2, None, m)
    if m is not None:
        A = local_translation(r1, r2, x, y, w, h, m)
    else:
        A = None

    return A, m
예제 #6
0
def evaluation_from_estimated_F(im1, im2, rpc1, rpc2, x, y, w, h, A=None,
        matches=None):
    """
    Measures the pointing error on a Pleiades' pair of images, affine approx.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels, and computed from an
        approximated fundamental matrix.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # estimate the fundamental matrix between the two views
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # compute the mean displacement from epipolar lines
    d_sum = 0
    for i in range(len(p1)):
        x  = np.array([p1[i, 0], p1[i, 1], 1])
        xx = np.array([p2[i, 0], p2[i, 1], 1])
        ll  = F.dot(x)
        #d = np.sign(xx.dot(ll)) * evaluation.distance_point_to_line(xx, ll)
        d = evaluation.distance_point_to_line(xx, ll)
        d_sum += d
    return d_sum/len(p1)
예제 #7
0
def evaluation_iterative(im1, im2, rpc1, rpc2, x, y, w, h, A=None,
                         matches=None):
    """
    Measures the maximal pointing error on a Pleiades' pair of images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # compute the pointing error for each match
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    x2 = p2[:, 0]
    y2 = p2[:, 1]
    e = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)[1]
    # matches = matches[e < 0.1, :]
    # visualisation.plot_matches_pleiades(im1, im2, matches)
    print "max, mean, min pointing error, from %d points:" % (len(matches))
    print np.max(e), np.mean(e), np.min(e)

    # return the mean error
    return np.mean(np.abs(e))
예제 #8
0
def compute_correction(img1,
                       rpc1,
                       img2,
                       rpc2,
                       x,
                       y,
                       w,
                       h,
                       filter_matches='fundamental'):
    """
    Computes pointing correction matrix for specific ROI

    Args:
        img1: path to the reference image.
        rpc1: paths to the xml file containing the rpc coefficients of the
            reference image
        img2: path to the secondary image.
        rpc2: paths to the xml file containing the rpc coefficients of the
            secondary image
        x, y, w, h: four integers defining the rectangular ROI in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle. The ROI may be as big as you want. If bigger than
            1 Mpix, only five crops will be used to compute sift matches.
        filter_matches (optional, default is 'fundamental'): model imposed by
            RANSAC when searching the set of inliers

    Returns:
        a 3x3 matrix representing the planar transformation to apply to img2 in
        order to correct the pointing error, and the list of sift matches used
        to compute this correction.
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    try:
        if w * h < 2e6:
            m = sift.matches_on_rpc_roi(img1, img2, r1, r2, x, y, w, h)
        else:
            m = filtered_sift_matches_full_img(
                img1,
                img2,
                r1,
                r2,
                cfg['pointing_correction_rois_mode'],
                None,
                1000,
                x,
                y,
                w,
                h,
                model=filter_matches)
    except Exception as e:
        print e
        print "WARNING: pointing_accuracy.compute_correction: no sift matches."
        m = None

    # A = optimize_pair(img1, img2, r1, r2, None, m)
    if m is not None:
        A = local_translation(r1, r2, x, y, w, h, m)
    else:
        A = None

    return A, m
예제 #9
0
def filtered_sift_matches_full_img(im1,
                                   im2,
                                   rpc1,
                                   rpc2,
                                   flag='automatic',
                                   prev1=None,
                                   a=1000,
                                   x=None,
                                   y=None,
                                   w=None,
                                   h=None,
                                   outfile=None,
                                   model='fundamental'):
    """
    Computes a list of sift matches between two full Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        flag: 'automatic', 'interactive' or 'load', to decide if the five zones
            used to search keypoints are queried interactively, chosen
            automatically, or loaded from the file 'pointing_correction_rois.txt'.
        prev1 (optional): path to the jpg preview image of im1 (used in case of
            interactive mode)
        a: length of the squared tiles used to extract sift points, in the case
            of automatic mode
        x, y, w, h (optional): use a big ROI and extract the five tiles from
            there instead of from the full image.
        outfile (optional): path to a txt where to save the list of matches.
        model (optional, default is 'fundamental'): model imposed by RANSAC
            when searching the set of inliers

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
            If no sift matches are found, then an exception is raised.

    The keypoints are extracted from five zones in the first image. One in the
    center, and four in the corners. The matches are consistent with an
    epipolar model in each of the zones.
    """
    # if no big ROI is defined, use the full image
    if x is None:
        x = 0
        y = 0
        w = rpc1.lastCol
        h = rpc1.lastRow

    # initialize output array
    out = np.zeros(shape=(1, 4))

    if flag == 'automatic':
        # if roi size is too big wrt image size, take a smaller roi size
        if (min(h, w) < 4 * a):
            a = round(min(h, w) / 4)

        # central zone
        x0 = round((w - a) / 2) + x
        y0 = round((h - a) / 2) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a,
                                              a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the central zone"
            print e

        # corner zones
        x0 = round((1 * w - 2 * a) / 4) + x
        y0 = round((1 * h - 2 * a) / 4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a,
                                              a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 1"
            print e

        x0 = round((3 * w - 2 * a) / 4) + x
        y0 = round((1 * h - 2 * a) / 4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a,
                                              a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 2"
            print e

        x0 = round((1 * w - 2 * a) / 4) + x
        y0 = round((3 * h - 2 * a) / 4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a,
                                              a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 3"
            print e

        x0 = round((3 * w - 2 * a) / 4) + x
        y0 = round((3 * h - 2 * a) / 4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a,
                                              a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 4"
            print e

    if flag == 'interactive':
        for i in range(5):
            x, y, w, h = common.get_roi_coordinates(rpc1, prev1)
            try:
                matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y,
                                                  w, h)
                out = np.vstack((out, matches))
            except Exception as e:
                print "no matches in the selected roi"
                print e

    if flag == 'load':
        im = os.path.dirname(im1)
        fname = os.path.join(im, 'pointing_correction_rois.txt')
        rois = np.loadtxt(fname)
        for i in xrange(len(rois)):
            x, y, w, h = rois[i, :]
            try:
                matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y,
                                                  w, h)
                out = np.vstack((out, matches))
            except Exception as e:
                print "no matches in the selected roi"
                print e

    # save and return the full list of matches, only if there are enough
    if len(out) < 7:
        raise Exception("not enough matches")
    else:
        if outfile is not None:
            np.savetxt(outfile, out[1:, :])
        return out[1:, :]
예제 #10
0
def filtered_sift_matches_full_img(im1, im2, rpc1, rpc2, flag='automatic',
        prev1=None, a=1000, x=None, y=None, w=None, h=None, outfile=None, model='fundamental'):
    """
    Computes a list of sift matches between two full Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        flag: 'automatic', 'interactive' or 'load', to decide if the five zones
            used to search keypoints are queried interactively, chosen
            automatically, or loaded from the file 'pointing_correction_rois.txt'.
        prev1 (optional): path to the jpg preview image of im1 (used in case of
            interactive mode)
        a: length of the squared tiles used to extract sift points, in the case
            of automatic mode
        x, y, w, h (optional): use a big ROI and extract the five tiles from
            there instead of from the full image.
        outfile (optional): path to a txt where to save the list of matches.
        model (optional, default is 'fundamental'): model imposed by RANSAC
            when searching the set of inliers

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
            If no sift matches are found, then an exception is raised.

    The keypoints are extracted from five zones in the first image. One in the
    center, and four in the corners. The matches are consistent with an
    epipolar model in each of the zones.
    """
    # if no big ROI is defined, use the full image
    if x is None:
        x = 0
        y = 0
        w = rpc1.lastCol
        h = rpc1.lastRow

    # initialize output array
    out = np.zeros(shape = (1, 4))

    if flag == 'automatic':
        # if roi size is too big wrt image size, take a smaller roi size
        if (min(h, w) < 4*a):
            a = round(min(h, w)/4)

        # central zone
        x0 = round((w-a)/2) + x
        y0 = round((h-a)/2) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a, a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the central zone"
            print e

        # corner zones
        x0 = round((1*w - 2*a)/4) + x
        y0 = round((1*h - 2*a)/4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a, a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 1"
            print e

        x0 = round((3*w - 2*a)/4) + x
        y0 = round((1*h - 2*a)/4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a, a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 2"
            print e

        x0 = round((1*w - 2*a)/4) + x
        y0 = round((3*h - 2*a)/4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a, a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 3"
            print e

        x0 = round((3*w - 2*a)/4) + x
        y0 = round((3*h - 2*a)/4) + y
        try:
            matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x0, y0, a, a)
            out = np.vstack((out, matches))
        except Exception as e:
            print "no matches in the corner 4"
            print e

    if flag == 'interactive':
        for i in range(5):
            x, y, w, h = common.get_roi_coordinates(rpc1, prev1)
            try:
                matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
                out = np.vstack((out, matches))
            except Exception as e:
                print "no matches in the selected roi"
                print e

    if flag == 'load':
        im = os.path.dirname(im1)
        fname = os.path.join(im, 'pointing_correction_rois.txt')
        rois = np.loadtxt(fname)
        for i in xrange(len(rois)):
            x, y, w, h = rois[i, :]
            try:
                matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
                out = np.vstack((out, matches))
            except Exception as e:
                print "no matches in the selected roi"
                print e


    # save and return the full list of matches, only if there are enough
    if len(out) < 7:
        raise Exception("not enough matches")
    else:
        if outfile is not None:
            np.savetxt(outfile, out[1:, :])
        return out[1:, :]