def loop(self):
        while not rospy.is_shutdown():
            ranges = np.array(self.scan.ranges)
            angles = np.array([self.scan.angle_min + self.scan.angle_increment * i for i in range(len(ranges))])

            # Filter out ranges (and corresponding angles) not in the interval [range_min, range_max].
            good_indices = np.where(np.logical_and(ranges > self.scan.range_min, ranges < self.scan.range_max))
            ranges = ranges[good_indices]
            angles = angles[good_indices]

            # Skip iteration if too few good values.
            if np.size(good_indices) < 2:
                continue

            # Points in Cartesian coordinates.
            points = np.array([pol2cart(dist, angle) for dist, angle in itertools.izip(ranges, angles)])

            # Split points in the middle.
            left_points = points[:len(points) / 2]
            right_points = points[len(points) / 2:]

            # Fit line models with RANSAC algorithm.
            left_model, left_inliers = ransac(left_points, LineModel, min_samples=5, residual_threshold=0.1, max_trials=100)
            right_model, right_inliers = ransac(right_points, LineModel, min_samples=5, residual_threshold=0.1, max_trials=100)

            # Determine validity of the lines
            left_valid = True
            right_valid = True

            if np.size(left_inliers) < 15:
                left_valid = False

            if np.size(right_inliers) < 15:
                right_valid = False

            # Publish row message.
            self.publish_wall(left_model, left_valid, right_model, right_valid)

            # RViz visualization of lines and which points are considered in/outliers.
            # Predict y's using the two outermost x's. This gives us two points on each line.
            left_wall_x = np.array([left_points[0][0], left_points[-1][0]])
            right_wall_x = np.array([right_points[0][0], right_points[-1][0]])
            left_wall_y = left_model.predict_y(left_wall_x)
            right_wall_y = right_model.predict_y(right_wall_x)

            # Publish markers.
            self.publish_visualization_marker(left_wall_x, left_wall_y, Marker.LINE_STRIP, "line_left", (0.2, 1.0, 0.2))
            self.publish_visualization_marker(right_wall_x, right_wall_y, Marker.LINE_STRIP, "line_right", (0.2, 0.2, 1.0))
            self.publish_visualization_marker(left_points[left_inliers, 0], left_points[left_inliers, 1], Marker.POINTS, "left_inliers", (0.5, 1.0, 0.5))
            self.publish_visualization_marker(right_points[right_inliers, 0], right_points[right_inliers, 1], Marker.POINTS, "right_inliers", (0.5, 0.5, 1.0))
            left_outliers = left_inliers == False
            right_outliers = right_inliers == False
            self.publish_visualization_marker(left_points[left_outliers, 0], left_points[left_outliers, 1], Marker.POINTS, "left_outliers", (0.0, 0.5, 0.0))
            self.publish_visualization_marker(right_points[right_outliers, 0], right_points[right_outliers, 1], Marker.POINTS, "right_outliers", (0.0, 0.0, 0.5))

            self.rate.sleep()
    def calc_transformations(self):
        print('Calculating each pair translation matrix')
        self.images[0].M = numpy.float32([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)

        for i in xrange(1, len(self.images)):
            image_1 = self.images[i]
            image_2 = self.images[i - 1]

            matches = bf.knnMatch(image_1.des, image_2.des, k=2)
            good = [m for m, n in matches if m.distance <
                    self.knnRatio * n.distance]

            src_pts = numpy.float32(
                [image_1.kp[m.queryIdx].pt for m in good]).reshape(-1, 2)
            dst_pts = numpy.float32(
                [image_2.kp[m.trainIdx].pt for m in good]).reshape(-1, 2)

            model_robust, _ = ransac((src_pts, dst_pts), TranslationTransform,
                                     min_samples=6,
                                     residual_threshold=self.ransacThreshold,
                                     max_trials=1000,
                                     stop_sample_num=0.9 * src_pts.shape[0])

            tx, ty = model_robust.params
            M = numpy.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
            image_1.M = M.dot(image_2.M)

            tx, ty = image_1.M[0, 2], image_1.M[1, 2]
            if ty > 0 and ty > self.drift_y_down:
                self.drift_y_down = ty
            elif ty < 0 and ty < self.drift_y_up:
                self.drift_y_up = ty

            self.drift_x_max = tx
Exemple #3
0
def test_ransac_is_data_valid():
    np.random.seed(1)

    is_data_valid = lambda data: data.shape[0] > 2
    model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf, is_data_valid=is_data_valid)
    assert_equal(model, None)
    assert_equal(inliers, None)
Exemple #4
0
def test_ransac_is_data_valid():

    is_data_valid = lambda data: data.shape[0] > 2
    model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
                            is_data_valid=is_data_valid, random_state=1)
    assert_equal(model, None)
    assert_equal(inliers, None)
Exemple #5
0
def robustEstimate(ptsA, ptsB):
	"""
	Perform robust estimation on the given
	correspondences using RANSAC.

	Args:
	----
		ptsA: A 2 x N matrix of points.
		ptsB: A 2 x N matrix of points.

	Returns:
	-------
		The number of inliers within the points.
	"""
	src, dst, N = [], [], ptsA.shape[1]
	for i in xrange(N):
		src.append((ptsA[0, i], ptsA[1, i]))
		dst.append((ptsB[0, i], ptsB[1, i]))

	src, dst = np.asarray(src), np.asarray(dst)

	model = ProjectiveTransform()
	model.estimate(src, dst)
	model_robust, inliers = ransac((src, dst), ProjectiveTransform, min_samples=3, residual_threshold=2, max_trials=100)

	return inliers
Exemple #6
0
def test_ransac_is_model_valid():
    def is_model_valid(model, data):
        return False
    model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
                            is_model_valid=is_model_valid, random_state=1)
    assert_equal(model, None)
    assert_equal(inliers, None)
Exemple #7
0
    def run3(self):
        """ Cette fonction test des alternatives à SIFT et ORB. Ne fonctionne pas."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.BRISK()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.match(d1,d2)
            
            g1,g2 = [],[]
            for i in matches:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
Exemple #8
0
    def run4(self):
        """ Cette fonction recadre les images grâce à SURF et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(5)), enhance_contrast(normaliser(im2), square(5))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.SURF()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(d1,d2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)
            
            g1,g2 = [],[]
            for i in good:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
Exemple #9
0
def auto_find_center_rings(avg_img, sigma=1, no_rings=4, min_samples=3,
                           residual_threshold=1, max_trials=1000):
    """This will find the center of the speckle pattern and the radii of the
    most intense rings.

    Parameters
    ----------
    avg_img : 2D array
        shape of the image
    sigma : float, optional
        Standard deviation of the Gaussian filter.
    no_rings : int, optional
        number of rings
    min_sample : int, optional
        The minimum number of data points to fit a model to.
    residual_threshold : float, optional
        Maximum distance for a data point to be classified as an inlier.
    max_trials : int, optional
        Maximum number of iterations for random sample selection.

    Returns
    -------
    center : tuple
        center co-ordinates of the speckle pattern
    image : 2D array
        Indices of pixels that belong to the rings,
        directly index into an array
    radii : list
        values of the radii of the rings

    Note
    ----
    scikit-image ransac
    method(http://www.imagexd.org/tutorial/lessons/1_ransac.html) is used to
    automatically find the center and the most intense rings.
    """

    image = img_as_float(color.rgb2gray(avg_img))
    edges = feature.canny(image, sigma)
    coords = np.column_stack(np.nonzero(edges))
    edge_pts_xy = coords[:, ::-1]
    radii = []

    for i in range(no_rings):
        model_robust, inliers = ransac(edge_pts_xy, CircleModel, min_samples,
                                       residual_threshold,
                                       max_trials=max_trials)
        if i == 0:
            center = int(model_robust.params[0]), int(model_robust.params[1])
        radii.append(model_robust.params[2])

        rr, cc = draw.circle_perimeter(center[1], center[0],
                                       int(model_robust.params[2]),
                                       shape=image.shape)
        image[rr, cc] = i + 1
        edge_pts_xy = edge_pts_xy[~inliers]

    return center, image, radii
Exemple #10
0
def get_best_matches(k1, k2, matches):
    src = k1[matches[:,0]][:,::-1]
    dst = k2[matches[:,1]][:,::-1]
    # if there are not enough matches, this fails
    model_robust, inliers = ransac((src, dst), AffineTransform,
                                   min_samples=20, residual_threshold=1,
                                   max_trials=40)

    return model_robust, inliers
Exemple #11
0
def test_ransac_is_model_valid():
    np.random.seed(1)

    def is_model_valid(model, data):
        return False
    model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf,
                            is_model_valid=is_model_valid)
    assert_equal(model, None)
    assert_equal(inliers, None)
Exemple #12
0
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)
def landmark_registration(points_file1, points_file2, out_file, residual_threshold=2, max_trials=100, delimiter="\t"):
    points1 = pd.read_csv(points_file1, delimiter=delimiter)
    points2 = pd.read_csv(points_file2, delimiter=delimiter)

    src = np.concatenate([np.array(points1['x']).reshape([-1,1]), np.array(points1['y']).reshape([-1,1])], axis=-1)
    dst = np.concatenate([np.array(points2['x']).reshape([-1,1]), np.array(points2['y']).reshape([-1,1])], axis=-1)

    model = AffineTransform()
    model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
                                   residual_threshold=residual_threshold, max_trials=max_trials)
    pd.DataFrame(model_robust.params).to_csv(out_file, header=None, index=False, sep="\t")
Exemple #14
0
def punch(img):
    # Identifiying the Tissue punches in order to Crop the image correctly
    # Canny edges and RANSAC is used to fit a circe to the punch
    # A Mask is created

    distance = 0
    r = 0

    float_im, orig, ihc = create_bin(img)
    gray = rgb2grey(orig)
    smooth = gaussian(gray, sigma=3)

    shape = np.shape(gray)
    l = shape[0]
    w = shape[1]

    x = l - 20
    y = w - 20

    rows = np.array([[x, x, x], [x + 1, x + 1, x + 1]])
    columns = np.array([[y, y, y], [y + 1, y + 1, y + 1]])

    corner = gray[rows, columns]

    thresh = np.mean(corner)
    print thresh
    binar = (smooth < thresh - 0.01)

    bin = remove_small_holes(binar, min_size=100000, connectivity=2)
    bin1 = remove_small_objects(bin, min_size=5000, connectivity=2)
    bin2 = gaussian(bin1, sigma=3)
    bin3 = (bin2 > 0)

    # eosin = IHC[:, :, 2]
    edges = canny(bin3)
    coords = np.column_stack(np.nonzero(edges))

    model, inliers = ransac(coords, CircleModel, min_samples=4, residual_threshold=1, max_trials=1000)

    # rr, cc = circle_perimeter(int(model.params[0]),
    #                          int(model.params[1]),
    #                          int(model.params[2]),
    #                          shape=im.shape)

    a, b = model.params[0], model.params[1]
    r = model.params[2]
    ny, nx = bin3.shape
    ix, iy = np.meshgrid(np.arange(nx), np.arange(ny))
    distance = np.sqrt((ix - b)**2 + (iy - a)**2)

    mask = np.ma.masked_where(distance > r, bin3)

    return distance, r, float_im, orig, ihc, bin3
Exemple #15
0
def test_ransac_invalid_input():
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, max_trials=-1)
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, stop_probability=-1)
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, stop_probability=1.01)
    def fit(self, line_img):
        """Estimate the tranform matrix self.M based on a binary image
        with line detected.
        - `line_img`: image with two lines detected, representing the 
        left and right boundaries of lanes. In the transformed
        bird-eye view, the two boundaries should be roughly parallel.
        """
        # image shape
        H, W = line_img.shape[:2]
        # find line coordinates
        ys, xs = np.where(line_img > 0)
        # clustering of two lines
        cluster2 = KMeans(2)
        cluster2.fit(np.c_[xs, ys])
        # build robust linear model for each line
        linear_models = []
        for c in [0, 1]:
            i = (cluster2.labels_ == c)

            robust_model, inliers = ransac(np.c_[xs[i], ys[i]], LineModel, 
                                        min_samples=2, residual_threshold=1., max_trials=500)
            linear_models.append(robust_model)
        # get the vertices of a trapezoid as source points
        if self.forward_distance is None:
            middle_h = H/2 + 100#160
        else:
            middle_h = H - self.forward_distance
        line0 = [(linear_models[0].predict_x(H), H), (linear_models[0].predict_x(middle_h), middle_h)]
        line1 = [(linear_models[1].predict_x(H), H), (linear_models[1].predict_x(middle_h), middle_h)]
        src_pts = np.array(line0 + line1, dtype=np.float32)
        # get the vertices of destination points
        # here simply map it to a rect with same width/length from bottom
        bottom_x1, bottom_x2 = line0[0][0], line1[0][0]
        
        v = np.array(line0[1]) - np.array(line0[0])
        # it must be the same as source trapzoid length otherwise y_mpp will change
        L = H#int(np.sqrt(np.sum( v*v ))) #H
        dst_pts = np.array([(bottom_x1, H), (bottom_x1, H-L),
                           (bottom_x2, H), (bottom_x2, H-L)], 
                          dtype=np.float32)
        # estimate the transform matrix
        self.M =  cv2.getPerspectiveTransform(src_pts, dst_pts)
        self.invM = cv2.getPerspectiveTransform(dst_pts, src_pts)
        # estimate meter-per-pixel in the transformed image
        self.x_mpp = 3 / np.abs(bottom_x1-bottom_x2)
        self.y_mpp = self.estimate_ympp(line_img)
        return self
Exemple #17
0
def refineBoundaries(img_orig, plate):
    np.random.seed(7)

    minr, minc, maxr, maxc = plate.bbox
    img_window = img_orig[minr: maxr, minc: maxc]

    plate_points = [(minc, minr), (maxc, minr), (maxc, maxr), (minc, maxr)]
    plate_x = minc
    plate_y = minr

    img_window = np.absolute(filters.prewitt_v(img_window))
    thresh = filters.threshold_otsu(img_window)
    img_window = img_window <= thresh
    labels = measure.label(img_window)

    points = []

    for region in measure.regionprops(labels):
        minr, minc, maxr, maxc = region.bbox
        ratio = float(maxr - minr) / float(maxc - minc)
        heigh = maxr - minr
        area = region.area

        if (ratio > 1 and area > 10 and heigh > 10):
            points.append((minc, minr, maxc, maxr))

    if len(points) > 1:
        points = np.array(points)
        x1 = np.min(points[:, 0])
        x2 = np.max(points[:, 2])

        ransac_model, inliers = measure.ransac(points[:, 0:2], measure.LineModel, 5, 3, max_trials=30)
        points = points[inliers]

        if ransac_model.params[1] != 0:
            average_heigh = int(np.mean(points[:, 3]) - np.mean(points[:, 1]))
            pad_t = average_heigh / 2
            pad_b = average_heigh + (average_heigh / 3)

            y1 = ransac_model.predict_y(x1)
            y2 = ransac_model.predict_y(x2)

            refined_points = [(x1, y1 - pad_t), (x2, y2 - pad_t), (x2, y2 + pad_b), (x1, y1 + pad_b)]
            refined_points = [(x + plate_x, y + plate_y) for (x, y) in refined_points]
            return refined_points

    return plate_points
    def NextGID(self,image):
        """ Calculates the next Group ID for the input image """
        NewImg = self.LoadImage(image,Greyscale=True,scale=0.25)
        self.orb.detect_and_extract(NewImg)
        NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors)

        for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors):
            # Check for overlap
            matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True)

            # Select keypoints from the source (image to be registered)
            # and target (reference image)
            src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1]
            dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1]

            model_robust, inliers = ransac((src, dst), ProjectiveTransform,
                                           min_samples=4, residual_threshold=1, max_trials=300)                
                
            NumberOfTrueMatches = np.sum(inliers)  #len(inliers[inliers])

            if NumberOfTrueMatches > 100 :
                # Image has overlap
                logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches))
                break
            else :
                logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches))
                continue

        else:
            # None of the images in the for loop has any overlap...So this is a new Group
            self.ImagesKeypointsDescriptors = [] # Erase all previous group items
            # self.ImagesWithOverlap = [] 

            # Increment Group ID
            self.CurrentGroupID += 1
            logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID))

        # Append the latest image to the current group
        self.ImagesKeypointsDescriptors.append(NewImgKeyDescr) 
        # self.ImagesWithOverlap.append(NewImg)

        # Return the current  group ID
        return self.CurrentGroupID
    def _orb_ransac_shift(self, im1, im2, template):
        descriptor_extractor = ORB() #n_keypoints=self.parameters['n_keypoints'])
        key1, des1 = self._find_key_points(descriptor_extractor, im1)
        key2, des2 = self._find_key_points(descriptor_extractor, im2)
        matches = match_descriptors(des1, des2, cross_check=True)

        # estimate affine transform model using all coordinates
        src = key1[matches[:, 0]]
        dst = key2[matches[:, 1]]

        # robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((src, dst), AffineTransform,
                                       min_samples=3, residual_threshold=1,
                                       max_trials=100)
#        diff = []
#        for p1, p2 in zip(src[inliers], dst[inliers]):
#            diff.append(p2-p1)
#        return np.mean(diff, axis=0)

        return model_robust.translation
Exemple #20
0
def test_ransac_geometric():
    np.random.seed(1)

    # generate original data without noise
    src = 100 * np.random.random((50, 2))
    model0 = AffineTransform(scale=(0.5, 0.3), rotation=1, translation=(10, 20))
    dst = model0(src)

    # add some faulty data
    outliers = (0, 5, 20)
    dst[outliers[0]] = (10000, 10000)
    dst[outliers[1]] = (-100, 100)
    dst[outliers[2]] = (50, 50)

    # estimate parameters of corrupted data
    model_est, inliers = ransac((src, dst), AffineTransform, 2, 20)

    # test whether estimated parameters equal original parameters
    assert_almost_equal(model0._matrix, model_est._matrix)
    assert np.all(np.nonzero(inliers == False)[0] == outliers)
Exemple #21
0
def match_from_toa(fk, fd, tk, td, min_matches=10):
    # get matching keypoints between images (from to) or (previous, base) or (next, base)
    try:
        matches = matcher.knnMatch(fd, td, k=2)
        matches_subset = filter_matches(matches)

        src = [fk[match.queryIdx] for match in matches_subset]
        # target image is base image
        dst = [tk[match.trainIdx] for match in matches_subset]

        src = np.asarray(src)
        dst = np.asarray(dst)

        if src.shape[0] > min_matches:
            # TODO - select which transform to use based on sensor data?
            model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=8, residual_threshold=1)
            accuracy = float(inliers.shape[0]) / float(src.shape[0])
            ransac_matches = matches_subset[inliers]
            return model_robust, ransac_matches, accuracy
    except Exception, e:
        logging.error(e)
Exemple #22
0
def test_ransac_shape():
    # generate original data without noise
    model0 = CircleModel()
    model0.params = (10, 12, 3)
    t = np.linspace(0, 2 * np.pi, 1000)
    data0 = model0.predict_xy(t)

    # add some faulty data
    outliers = (10, 30, 200)
    data0[outliers[0], :] = (1000, 1000)
    data0[outliers[1], :] = (-50, 50)
    data0[outliers[2], :] = (-100, -10)

    # estimate parameters of corrupted data
    model_est, inliers = ransac(data0, CircleModel, 3, 5,
                                random_state=1)

    # test whether estimated parameters equal original parameters
    assert_equal(model0.params, model_est.params)
    for outlier in outliers:
        assert outlier not in inliers
def RANSAC(image): 
    np.random.seed(seed=1)
    # generate coordinates of line
    x = np.arange(-200, 200)
    y = 0.2 * x + 20
    data = np.column_stack([x, y])

    # add faulty data
    faulty = np.array(30 * [(180., -100)])
    faulty += 5 * np.random.normal(size=faulty.shape)
    data[:faulty.shape[0]] = faulty

    # add gaussian noise to coordinates
    noise = np.random.normal(size=data.shape)
    data += 0.5 * noise
    data[::2] += 5 * noise[::2]
    data[::4] += 20 * noise[::4]

    # fit line using all data
    model = LineModel()
    model.estimate(data)

    # robustly fit line only using inlier data with RANSAC algorithm
    model_robust, inliers = ransac(data, LineModel, min_samples=2,
                                   residual_threshold=1, max_trials=1000)
    outliers = inliers == False

    # generate coordinates of estimated models
    line_x = np.arange(-250, 250)
    line_y = model.predict_y(line_x)
    line_y_robust = model_robust.predict_y(line_x)

    plt.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
             label='Inlier data')
    plt.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
             label='Outlier data')
    plt.plot(line_x, line_y, '-k', label='Line model from all data')
    plt.plot(line_x, line_y_robust, '-b', label='Robust line model')
    plt.legend(loc='lower left')
    plt.show()
Exemple #24
0
def match_from_to(fk, fd, tk, td, min_matches=10):
    # get matching keypoints between images (from to) or (previous, base) or (next, base)
    ransac_matches = np.zeros((0, 2))
    matches = np.zeros((0, 2))
    inliers = np.zeros((0, 2))
    # try:
    if 1:
        # opencv way
        matches = matcher.knnMatch(fd, td, k=2)
        matches_subset = filter_matches(matches)
        matches_subset = np.array([[match.trainIdx, match.queryIdx] for match in matches_subset])

        src = np.asarray(fk[matches_subset[:, 1]])
        dst = np.asarray(tk[matches_subset[:, 0]])
        logging.info("STARTING MATCH src: %d dst %d" % (src.shape[0], dst.shape[0]))
        if src.shape[0] > min_matches:
            # TODO - select which transform to use based on sensor data?
            try:
                model_robust, inliers = ransac(
                    (src, dst),
                    AffineTransform,
                    min_samples=min_matches,
                    stop_sample_num=100,
                    max_trials=2000,
                    stop_probability=0.995,
                    residual_threshold=2,
                )
            except Exception, e:
                logging.error(e)

            logging.info("FOUND inliers %d" % inliers.shape[0])
            if inliers.shape[0]:
                num_correct = inliers.shape[0]
                num_matches = src.shape[0]
                num_false = num_matches - num_correct
                ransac_matches = matches_subset[inliers]
                perc_correct = 1 - float(num_false) / float(num_matches)
                return model_robust, ransac_matches, matches, inliers, perc_correct
        else:
            logging.info("Not enough matches: %d < min_matches: %d" % (src.shape[0], min_matches))
Exemple #25
0
def find_two_matches(base_img, img, base_k, img_k, base_d, img_d, min_matches=10):

    matches = match_descriptors(base_d, img_d, cross_check=True)
   
    #   * src (image to be registered):
    #   * dst (reference image):
   
    src = img_k[matches[:,1]][:,::-1]
    dst = base_k[matches[:,0]][:,::-1]
    
    if matches.shape[0] > min_matches:
#        model_robust, inliers = ransac((src, dst), ProjectiveTransform,
#                                   min_samples=10, residual_threshold=10,
#                                   stop_sample_num=100, max_trials=300)
#
        model_robust, inliers = ransac((src, dst), AffineTransform,
                                       min_samples=6, residual_threshold=3,
                                       max_trials=100)
        ransac_matches = matches[inliers]
        inlierRatio = ransac_matches.shape[0]/float(matches.shape[0])
        return model_robust, ransac_matches, inlierRatio
    else:
        return np.zeros((0, 2)), np.zeros((0, 2)), 0.0
Exemple #26
0
def match_from_to_compare(fk, fd, tk, td, min_matches=10):
    # get matching keypoints between images (from to) or (previous, base) or (next, base)
    ransac_matches = np.zeros((0, 2))
    matches = np.zeros((0, 2))
    inliers = np.zeros((0, 2))
    try:
        # skimage way
        # may need to reverse
        matches = match_descriptors(fd, td, cross_check=True)
        src = tk[matches[:, 1]][::-1]
        dst = fk[matches[:, 0]][::-1]
        logging.info("STARTING MATCH src: %d dst %d" % (src.shape[0], dst.shape[0]))
        if src.shape[0] > min_matches:
            # TODO - select which transform to use based on sensor data?
            try:
                model_robust, inliers = ransac(
                    (src, dst),
                    AffineTransform,
                    min_samples=min_matches,
                    stop_sample_num=100,
                    max_trials=2000,
                    stop_probability=0.995,
                    residual_threshold=2,
                )
            except Exception, e:
                logging.error(e)

            logging.info("FOUND inliers %d" % inliers.shape[0])
            if inliers.shape[0]:
                num_correct = inliers.shape[0]
                num_matches = src.shape[0]
                num_false = num_matches - num_correct
                ransac_matches = matches[inliers]
                perc_correct = 1 - float(num_false) / float(num_matches)
                return model_robust, ransac_matches, matches, inliers, perc_correct
        else:
Exemple #27
0
    def run2(self):
        """ Cette fonction recadre les images grâce à SIFT et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)

            sift = cv2.SIFT()

            # find the keypoints and descriptors with SIFT
            kp1, des1 = sift.detectAndCompute(im1,None)
            kp2, des2 = sift.detectAndCompute(im2,None)

            # BFMatcher with default params
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(des1,des2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)


            # good est une liste de DMatch
            g1,g2 = [],[]
            for i in good:
                g1.append(kp1[i.queryIdx].pt)
                g2.append(kp2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
    def get_translation_tool(self, n_keypoints=1000):

        # Convert images to grayscale
        src_image = rgb2gray(self.src_image)
        dst_image = rgb2gray(self.dst_image)

        # Initiate an ORB class object which can extract features & descriptors from images.
        # Set the amount of features that should be found (more = more accurate)
        descriptor_extractor = ORB(n_keypoints=n_keypoints)

        # Extract features and descriptors from source image
        descriptor_extractor.detect_and_extract(src_image)
        self.keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        # Extract features and descriptors from destination image
        descriptor_extractor.detect_and_extract(dst_image)
        self.keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        # Matches the descriptors and gives them rating as to how similar they are
        self.matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        # Selects the coordinates from source image and destination image based on the
        # indices given from the match_descriptors function.
        src = self.keypoints1[self.matches12[:, 0]][:, ::-1]
        dst = self.keypoints2[self.matches12[:, 1]][:, ::-1]

        # Filters out the outliers and generates the transformation matrix based on only the inliers
        model_robust, inliers = \
            ransac((src, dst), ProjectiveTransform,
                min_samples=4, residual_threshold=2)

        # This returns the object "model_robust" which contains the tranformation matrix and
        # uses that to translate any coordinate point from source to destination image.
        return model_robust, inliers
Exemple #29
0
def count_i(land_name):
    data = land_name.split('/')[-1].split('.')[0]
    #file1 = open('txt_file_m18_m/'+data+'.txt','w')
    maxNum = 0
    maxName = ''
    for name in glob(feature_db + '*'):
        #print (name)
        name2 = name.split('/')
        number = name2[-1].split('.')[0]
        # Read features.
        locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
            land_name)
        #cmd_args.features_1_path)
        num_features_1 = locations_1.shape[0]
        #tf.logging.info("Loaded image 1's %d features" % num_features_1)
        locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(name)
        #cmd_args.features_2_path)
        num_features_2 = locations_2.shape[0]
        #tf.logging.info("Loaded image 2's %d features" % num_features_2)

        # Find nearest-neighbor matches using a KD tree.
        d1_tree = cKDTree(descriptors_1)
        _, indices = d1_tree.query(descriptors_2,
                                   distance_upper_bound=_DISTANCE_THRESHOLD)

        # Select feature locations for putative matches.
        locations_2_to_use = np.array([
            locations_2[i, ] for i in range(num_features_2)
            if indices[i] != num_features_1
        ])
        locations_1_to_use = np.array([
            locations_1[indices[i], ] for i in range(num_features_2)
            if indices[i] != num_features_1
        ])

        # Perform geometric verification using RANSAC.
        try:
            _, inliers = ransac((locations_1_to_use, locations_2_to_use),
                                AffineTransform,
                                min_samples=3,
                                residual_threshold=20,
                                max_trials=1000)
            #print(sum(inliers))
            #print(maxNum)
            #"""
            #tf.logging.info('Found %d inliers' % sum(inliers))
            #print(sum(inliers))
            #print(maxNum)
            score = int(sum(inliers))
            #file1.write(name+'\t'+str(sum(inliers))+'\n')
            #maxName = num2name[number]
            num = name.split('/')[-1].split('.')[0]
            if score > 35 and score > maxNum:
                maxNum = score
                maxName = num2name[number]
                print(maxName)
                print(maxNum)
                #break
            #"""

        except:
            #print('fail')
            a = 0
        #break
    #print (maxName)
    #result_dic[data] = maxName
    return maxName
Exemple #30
0

def read_csv(name, nl="\n", dl=","):
    cloud = []
    with open(name, newline=nl) as csvfile:
        csvreader = reader(csvfile, delimiter=dl)
        for xx, yy, zz in csvreader:
            cloud.append([float(xx), float(yy), float(zz)])
    return cloud


cloud = numpy.array(read_csv("cloud_r.xyz"))

model_robotus, inliers = ransac(cloud,
                                LineModelND,
                                min_samples=2,
                                residual_threshold=1,
                                max_trials=1000)
outliers = inliers == False

import matplotlib.pyplot as plt

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(cloud[inliers][:, 0],
           cloud[inliers][:, 1],
           cloud[inliers][:, 2],
           c='g',
           marker='o',
           label='inliers')
# ax.scatter(cloud[outliers][:,0],cloud[outliers][:,1],cloud[outliers][:,2],c='r',marker='o',label='outliers')
Exemple #31
0
img0 = img_list[0]
coords0 = corner_list[0]
matching_corners = [
    match_locations(img0, img1, coords0, coords1, min_dist)
    for img1, coords1 in zip(img_list, corner_list)
]

############################################################################
# Once all the points are registered to the reference points, robust
# relative affine transformations can be estimated using the RANSAC method.
src = np.array(coords0)
trfm_list = [
    measure.ransac((dst, src),
                   transform.EuclideanTransform,
                   min_samples=3,
                   residual_threshold=2,
                   max_trials=100)[0].params for dst in matching_corners
]

fig, ax_list = plt.subplots(6, 2, figsize=(6, 9), sharex=True, sharey=True)
for idx, (im, trfm, (ax0, ax1)) in enumerate(zip(img_list, trfm_list,
                                                 ax_list)):
    ax0.imshow(im, cmap="gray", vmin=0, vmax=1)
    ax1.imshow(transform.warp(im, trfm), cmap="gray", vmin=0, vmax=1)

    if idx == 0:
        ax0.set_title(f"Tilted images")
        ax0.set_ylabel(f"Reference Image\n(PSNR={psnr_ref:.2f})")
        ax1.set_title(f"Registered images")
def MatchFeatures(query_locations,
                  query_descriptors,
                  index_image_locations,
                  index_image_descriptors,
                  ransac_seed=None,
                  feature_distance_threshold=0.9,
                  ransac_residual_threshold=10.0):
  """Matches local features using geometric verification.

  First, finds putative local feature matches by matching `query_descriptors`
  against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an
  affine transformation between the putative feature corresponces using their
  locations.

  Args:
    query_locations: Locations of local features for query image. NumPy array of
      shape [#query_features, 2].
    query_descriptors: Descriptors of local features for query image. NumPy
      array of shape [#query_features, depth].
    index_image_locations: Locations of local features for index image. NumPy
      array of shape [#index_image_features, 2].
    index_image_descriptors: Descriptors of local features for index image.
      NumPy array of shape [#index_image_features, depth].
    ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
    feature_distance_threshold: Distance threshold below which a pair of
      features is considered a potential match, and will be fed into RANSAC.
    ransac_residual_threshold: Residual error threshold for considering matches
      as inliers, used in RANSAC algorithm.

  Returns:
    score: Number of inliers of match. If no match is found, returns 0.

  Raises:
    ValueError: If local descriptors from query and index images have different
      dimensionalities.
  """
  num_features_query = query_locations.shape[0]
  num_features_index_image = index_image_locations.shape[0]
  if not num_features_query or not num_features_index_image:
    return 0

  local_feature_dim = query_descriptors.shape[1]
  if index_image_descriptors.shape[1] != local_feature_dim:
    raise ValueError(
        'Local feature dimensionality is not consistent for query and index '
        'images.')

  # Find nearest-neighbor matches using a KD tree.
  index_image_tree = spatial.cKDTree(index_image_descriptors)
  _, indices = index_image_tree.query(
      query_descriptors, distance_upper_bound=feature_distance_threshold)

  # Select feature locations for putative matches.
  query_locations_to_use = np.array([
      query_locations[i,]
      for i in range(num_features_query)
      if indices[i] != num_features_index_image
  ])
  index_image_locations_to_use = np.array([
      index_image_locations[indices[i],]
      for i in range(num_features_query)
      if indices[i] != num_features_index_image
  ])

  # If there are not enough putative matches, early return 0.
  if query_locations_to_use.shape[0] <= _MIN_RANSAC_SAMPLES:
    return 0

  # Perform geometric verification using RANSAC.
  _, inliers = measure.ransac(
      (index_image_locations_to_use, query_locations_to_use),
      transform.AffineTransform,
      min_samples=_MIN_RANSAC_SAMPLES,
      residual_threshold=ransac_residual_threshold,
      max_trials=_NUM_RANSAC_TRIALS,
      random_state=ransac_seed)
  if inliers is None:
    inliers = []

  return sum(inliers)
Exemple #33
0
# find correspondences using simple weighted sum of squared differences
src = []
dst = []
for coord in coords_orig_subpix:
    src.append(coord)
    dst.append(match_corner(coord))
src = np.array(src)
dst = np.array(dst)


# estimate affine transform model using all coordinates
model = AffineTransform()
model.estimate(src, dst)

# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
                               residual_threshold=2, max_trials=100)
outliers = inliers == False


# compare "true" and estimated transform parameters
print tform.scale, tform.translation, tform.rotation
print model.scale, model.translation, model.rotation
print model_robust.scale, model_robust.translation, model_robust.rotation


# visualize correspondences
img_combined = np.concatenate((img_orig_gray, img_warped_gray), axis=1)

fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
def calculate_point_MCMC(kps_left, sco_left, des_left, kps_right, sco_right,
                         des_right):
    #Flann特征匹配
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=40)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des_left, des_right, k=2)

    goodMatch = []
    locations_1_to_use = []
    locations_2_to_use = []
    dis = []
    # 匹配对筛选
    # min_dist = 1000
    # max_dist = 0
    disdif_avg = 0
    # 统计平均距离差
    for m, n in matches:
        disdif_avg += n.distance - m.distance
    disdif_avg = disdif_avg / len(matches)

    for m, n in matches:
        #自适应阈值
        if n.distance > m.distance + 1 * disdif_avg:
            goodMatch.append(m)
            p2 = cv2.KeyPoint(kps_right[m.trainIdx][0],
                              kps_right[m.trainIdx][1], 1)
            p1 = cv2.KeyPoint(kps_left[m.queryIdx][0], kps_left[m.queryIdx][1],
                              1)
            locations_1_to_use.append([p1.pt[0], p1.pt[1]])
            locations_2_to_use.append([p2.pt[0], p2.pt[1]])
            dis.append([n.distance, m.distance])
    # if
    #goodMatch = sorted(goodMatch, key=lambda x: x.distance)
    # print('match num is %d' % len(goodMatch))
    locations_1_to_use = np.array(locations_1_to_use)
    locations_2_to_use = np.array(locations_2_to_use)
    dis = np.array(dis)

    # Perform geometric verification using RANSAC.
    _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                                transform.AffineTransform,
                                min_samples=3,
                                residual_threshold=_RESIDUAL_THRESHOLD,
                                max_trials=1000)

    # print('Found %d inliers' % sum(inliers))

    inlier_idxs = np.nonzero(inliers)[0]

    # 筛选距离最近的前60%个数据
    inlier_idxs = np.nonzero(inliers)[0]
    dis_R = dis[inliers]
    dis_idx = np.argsort(dis_R[:, 0] - dis_R[:, 1])
    dis_sorted = dis_R[dis_idx]
    l = dis_idx.shape[0]
    end = int(l * 0.6)

    #最终匹配结果
    inlier_idxs = inlier_idxs[dis_idx[:end]]

    # print('sorted inliers:', end)

    #最终匹配结果
    matches = np.column_stack((inlier_idxs, inlier_idxs))
    # print('whole time is %6.3f' % (time.perf_counter() - start0))

    return locations_1_to_use[inlier_idxs], locations_2_to_use[inlier_idxs]
Exemple #35
0
# print("data shape", data.shape)

# add faulty data
faulty = np.array(10 * [(180., -100)])
faulty += 10 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# print("data shape", data.shape)

# fit line using all data
model = LineModelND()
model.estimate(data)

# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data,
                               LineModelND,
                               min_samples=2,
                               residual_threshold=threshold,
                               max_trials=20)
outliers = inliers == False
# print("inliers", inliers)
# print("outliers", outliers, np.count_nonzero(outliers))

# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)

fig, ax = plt.subplots()
ax.plot(data[inliers, 0],
        data[inliers, 1],
        '.b',
Exemple #36
0
 #Load up the first frame:
 currframe = frame(cap,frameorder[0])
 weightimage = np.ones(currframe.grayimage.shape)
 imagearr = np.zeros((currframe.rawimage.shape[0],currframe.rawimage.shape[1],currframe.rawimage.shape[2],len(frameorder)))/0.
 imagearr[:,:,:,0] = ski_exp.rescale_intensity(ski_util.img_as_float(currframe.rawimage))
 #Go through the frames:
 for i in range(1,len(frameorder)):
     print i,len(frameorder)-1,frameorder[i],imagearr.shape
     newframe = frame(cap,frameorder[i])
     #Compute matches:
     matches = match_frames_twosided(currframe,newframe,dist_ratio=0.6)
     #matches = match_frames_flann(currframe,newframe)
     matching_coords_curr = currframe.coords[matches > 0,:]
     matching_coords_new = newframe.coords[matches[matches>0],:]
     #Compute homology:
     model,inliers = ski_measure.ransac((matching_coords_curr,matching_coords_new),ski_trans.ProjectiveTransform,min_samples=25,residual_threshold=1.0,max_trials=2000)
     #print currframe.rawimage.max(),currframe.rawimage.min()
     test = homology_chisquared(newframe.rawimage,ski_exp.rescale_intensity(ski_util.img_as_float(currframe.rawimage)),model._matrix.reshape(-1))
     #print np.sum(test),test.min(),test.max()
     #print len(inliers),np.sum(inliers)
     homologies[i,:,:] = model._matrix
     #Determine where the new frame extrema would be after being transformed:
     xmin,xmax,ymin,ymax = compute_extrema(newframe,model._matrix)
     jointimage = ski_exp.rescale_intensity(ski_util.img_as_float(currframe.rawimage.copy()))
     #ax = plot_matches(newframe.rawimage,currframe.rawimage,matching_coords_new[inliers,:],matching_coords_new[inliers,:],matches[inliers],show_below=False,ax=None)
     #ax.figure.savefig('test_matches_{0:d}.jpg'.format(newframe.framenumber))
     #print xmin,ymin,xmax,ymax,imagearr.shape,np.sum(inliers)
     if xmax > currframe.rawimage.shape[1]*1.5 or ymax > currframe.rawimage.shape[0]*1.5 or xmin < currframe.rawimage.shape[1]*-0.5 or ymin < currframe.rawimage.shape[0]*-0.5:
         ski_io.imsave('test_avg_aborted.jpg',currframe.rawimage)
         print "    ",xmin,xmax,ymin,ymax,currframe.rawimage.shape
         sys.exit("Image matching has likely failed, aborting to avoid running out of memory")
Exemple #37
0
        matchesMask[i] = [1, 0]
        # print(i)
        # p2 = cv2.KeyPoint(kp1[m.trainIdx][0],  kp1[m.trainIdx][1],  1)
        # p1 = cv2.KeyPoint(kp2[m.queryIdx][0], kp2[m.queryIdx][1], 1)
        locations_1_to_use.append(
            [kp1[m.queryIdx].pt[0], kp1[m.queryIdx].pt[1]])
        locations_2_to_use.append(
            [kp2[m.trainIdx].pt[0], kp2[m.trainIdx].pt[1]])
locations_1_to_use = np.array(locations_1_to_use)
locations_2_to_use = np.array(locations_2_to_use)
# Perform geometric verification using RANSAC.
_RESIDUAL_THRESHOLD = 30

_, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                            transform.AffineTransform,
                            min_samples=3,
                            residual_threshold=_RESIDUAL_THRESHOLD,
                            max_trials=1000)
inlier_idxs = np.nonzero(inliers)[0]
# p1 = locations_2_to_use[inlier_idxs]
# p2 = locations_1_to_use[inlier_idxs]

# Visualize correspondences, and save to file.
#1 绘制匹配连线
plt.rcParams['savefig.dpi'] = 100  #图片像素
plt.rcParams['figure.dpi'] = 100  #分辨率
plt.rcParams['figure.figsize'] = (4.0, 3.0)  # 设置figure_size尺寸
_, ax = plt.subplots()
plotmatch.plot_matches(ax,
                       img_sar_canny,
                       img_optical_canny,
Exemple #38
0
descriptor_extractor.detect_and_extract(img_right)
keypoints_right = descriptor_extractor.keypoints
descriptors_right = descriptor_extractor.descriptors

matches = match_descriptors(descriptors_left, descriptors_right,
                            cross_check=True)

print(f'Number of matches: {matches.shape[0]}')

# Estimate the epipolar geometry between the left and right image.
random_seed = 9
rng = np.random.default_rng(random_seed)

model, inliers = ransac((keypoints_left[matches[:, 0]],
                         keypoints_right[matches[:, 1]]),
                        FundamentalMatrixTransform, min_samples=8,
                        residual_threshold=1, max_trials=5000,
                        random_state=rng)

inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

print(f'Number of inliers: {inliers.sum()}')

# Compare estimated sparse disparities to the dense ground-truth disparities.

disp = inlier_keypoints_left[:, 1] - inlier_keypoints_right[:, 1]
disp_coords = np.round(inlier_keypoints_left).astype(np.int64)
disp_idxs = np.ravel_multi_index(disp_coords.T, groundtruth_disp.shape)
disp_error = np.abs(groundtruth_disp.ravel()[disp_idxs] - disp)
disp_error = disp_error[np.isfinite(disp_error)]
Exemple #39
0
    # Array to keep track of all candidates in database.
    inliers_counts = []
    # Read the resized query image for plotting.
    img_1 = mpimg.imread(resized_image)
    for index in unique_image_indexes:
        locations_2_use_query, locations_2_use_db = get_locations_2_use(
            index, indices, accumulated_indexes_boundaries)
        if len(locations_2_use_db) <= 3:
            continue

        # Perform geometric verification using RANSAC.
        _, inliers = ransac(
            (locations_2_use_db,
             locations_2_use_query),  # source and destination coordinates
            AffineTransform,
            min_samples=3,
            residual_threshold=20,
            max_trials=1000)
        # If no inlier is found for a database candidate image, we continue on to the next one.
        if inliers is None or len(inliers) == 0:
            continue
        # the number of inliers as the score for retrieved images.
        inliers_counts.append({"index": index, "inliers": sum(inliers)})
        print('Found inliers for image {} -> {}'.format(index, sum(inliers)))
        # Visualize correspondences.
    #    _, ax = plt.subplots()
    #    img_2 = mpimg.imread(db_images[index])
    #    inlier_idxs = np.nonzero(inliers)[0]
    #    plot_matches(
    #       ax,
def MatchFeatures(query_locations,
                  query_descriptors,
                  index_image_locations,
                  index_image_descriptors,
                  ransac_seed=None,
                  descriptor_matching_threshold=0.9,
                  ransac_residual_threshold=10.0,
                  query_im_array=None,
                  index_im_array=None,
                  query_im_scale_factors=None,
                  index_im_scale_factors=None,
                  use_ratio_test=False):
    """Matches local features using geometric verification.

  First, finds putative local feature matches by matching `query_descriptors`
  against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an
  affine transformation between the putative feature corresponces using their
  locations.

  Args:
    query_locations: Locations of local features for query image. NumPy array of
      shape [#query_features, 2].
    query_descriptors: Descriptors of local features for query image. NumPy
      array of shape [#query_features, depth].
    index_image_locations: Locations of local features for index image. NumPy
      array of shape [#index_image_features, 2].
    index_image_descriptors: Descriptors of local features for index image.
      NumPy array of shape [#index_image_features, depth].
    ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
    descriptor_matching_threshold: Threshold below which a pair of local
      descriptors is considered a potential match, and will be fed into RANSAC.
      If use_ratio_test==False, this is a simple distance threshold. If
      use_ratio_test==True, this is Lowe's ratio test threshold.
    ransac_residual_threshold: Residual error threshold for considering matches
      as inliers, used in RANSAC algorithm.
    query_im_array: Optional. If not None, contains a NumPy array with the query
      image, used to produce match visualization, if there is a match.
    index_im_array: Optional. Same as `query_im_array`, but for index image.
    query_im_scale_factors: Optional. If not None, contains a NumPy array with
      the query image scales, used to produce match visualization, if there is a
      match. If None and a visualization will be produced, [1.0, 1.0] is used
      (ie, feature locations are not scaled).
    index_im_scale_factors: Optional. Same as `query_im_scale_factors`, but for
      index image.
    use_ratio_test: If True, descriptor matching is performed via ratio test,
      instead of distance-based threshold.

  Returns:
    score: Number of inliers of match. If no match is found, returns 0.
    match_viz_bytes: Encoded image bytes with visualization of the match, if
      there is one, and if `query_im_array` and `index_im_array` are properly
      set. Otherwise, it's an empty bytes string.

  Raises:
    ValueError: If local descriptors from query and index images have different
      dimensionalities.
  """
    num_features_query = query_locations.shape[0]
    num_features_index_image = index_image_locations.shape[0]
    if not num_features_query or not num_features_index_image:
        return 0, b''

    local_feature_dim = query_descriptors.shape[1]
    if index_image_descriptors.shape[1] != local_feature_dim:
        raise ValueError(
            'Local feature dimensionality is not consistent for query and index '
            'images.')

    # Construct KD-tree used to find nearest neighbors.
    index_image_tree = spatial.cKDTree(index_image_descriptors)
    if use_ratio_test:
        distances, indices = index_image_tree.query(query_descriptors,
                                                    k=2,
                                                    n_jobs=-1)
        query_locations_to_use = np.array([
            query_locations[i, ] for i in range(num_features_query)
            if distances[i][0] < descriptor_matching_threshold *
            distances[i][1]
        ])
        index_image_locations_to_use = np.array([
            index_image_locations[indices[i][0], ]
            for i in range(num_features_query)
            if distances[i][0] < descriptor_matching_threshold *
            distances[i][1]
        ])
    else:
        _, indices = index_image_tree.query(
            query_descriptors,
            distance_upper_bound=descriptor_matching_threshold,
            n_jobs=-1)

        # Select feature locations for putative matches.
        query_locations_to_use = np.array([
            query_locations[i, ] for i in range(num_features_query)
            if indices[i] != num_features_index_image
        ])
        index_image_locations_to_use = np.array([
            index_image_locations[indices[i], ]
            for i in range(num_features_query)
            if indices[i] != num_features_index_image
        ])

    # If there are not enough putative matches, early return 0.
    if query_locations_to_use.shape[0] <= _MIN_RANSAC_SAMPLES:
        return 0, b''

    # Perform geometric verification using RANSAC.
    _, inliers = measure.ransac(
        (index_image_locations_to_use, query_locations_to_use),
        transform.AffineTransform,
        min_samples=_MIN_RANSAC_SAMPLES,
        residual_threshold=ransac_residual_threshold,
        max_trials=_NUM_RANSAC_TRIALS,
        random_state=ransac_seed)
    match_viz_bytes = b''

    if inliers is None:
        inliers = []
    elif query_im_array is not None and index_im_array is not None:
        if query_im_scale_factors is None:
            query_im_scale_factors = [1.0, 1.0]
        if index_im_scale_factors is None:
            index_im_scale_factors = [1.0, 1.0]
        inlier_idxs = np.nonzero(inliers)[0]
        _, ax = plt.subplots()
        ax.axis('off')
        ax.xaxis.set_major_locator(plt.NullLocator())
        ax.yaxis.set_major_locator(plt.NullLocator())
        plt.subplots_adjust(top=1,
                            bottom=0,
                            right=1,
                            left=0,
                            hspace=0,
                            wspace=0)
        plt.margins(0, 0)
        feature.plot_matches(ax,
                             query_im_array,
                             index_im_array,
                             query_locations_to_use * query_im_scale_factors,
                             index_image_locations_to_use *
                             index_im_scale_factors,
                             np.column_stack((inlier_idxs, inlier_idxs)),
                             only_matches=True)

        match_viz_io = io.BytesIO()
        plt.savefig(match_viz_io,
                    format='jpeg',
                    bbox_inches='tight',
                    pad_inches=0)
        match_viz_bytes = match_viz_io.getvalue()

    return sum(inliers), match_viz_bytes
Exemple #41
0
src = []
dst = []
for coord in coords_orig_subpix:
    src.append(coord)
    dst.append(match_corner(coord))
src = np.array(src)
dst = np.array(dst)

# estimate affine transform model using all coordinates
model = AffineTransform()
model.estimate(src, dst)

# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst),
                               AffineTransform,
                               min_samples=3,
                               residual_threshold=2,
                               max_trials=100)
outliers = inliers == False

# compare "true" and estimated transform parameters
print(tform.scale, tform.translation, tform.rotation)
print(model.scale, model.translation, model.rotation)
print(model_robust.scale, model_robust.translation, model_robust.rotation)

# visualize correspondence
fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

inlier_idxs = np.nonzero(inliers)[0]
Exemple #42
0
        keypoints_right, sorces_right, descriptor_right = extract_features(
            img_right)

        matches = match_descriptors(descriptor_left,
                                    descriptor_right,
                                    cross_check=True)

        print('Number of raw matches: %d.' % matches.shape[0])
        #print(keypoints_left)
        keypoints_ll = keypoints_left[matches[:, 0], :2]
        keypoints_rr = keypoints_right[matches[:, 1], :2]
        np.random.seed(0)
        best_model_, inliers = ransac(
            (keypoints_ll, keypoints_rr),
            ProjectiveTransform,
            min_samples=4,
            residual_threshold=4,
            max_trials=10000  # res= 2 maxtrial=10000
        )

        n_inliers = np.sum(inliers)
        print('Number of inliers: %d.' % n_inliers)

        inlier_keypoints_left = [
            cv2.KeyPoint(point[0], point[1], 1)
            for point in keypoints_ll[inliers]
        ]
        inlier_keypoints_right = [
            cv2.KeyPoint(point[0], point[1], 1)
            for point in keypoints_rr[inliers]
        ]
Exemple #43
0
def main():
    parser = argparse.ArgumentParser(
        description='COLMAP database building',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--sequence_root",
                        type=str,
                        required=True,
                        help='root of video sequence')
    parser.add_argument("--overwrite_database", action="store_true")

    args = parser.parse_args()
    sequence_root = Path(args.sequence_root)
    overwrite_database = args.overwrite_database
    feature_match_path = sequence_root / "feature_matches.hdf5"
    database_path = sequence_root / "database.db"

    if not overwrite_database:
        if database_path.exists():
            print("ERROR: database exists already")
            exit()

    if not feature_match_path.exists():
        print("ERROR: feature matches hdf5 file does not exist")
        exit()

    # Open the database.
    db = COLMAPDatabase.connect(str(database_path))

    # For convenience, try creating all the tables upfront.
    db.create_tables()

    images_root = sequence_root / "images"
    image_list = list(images_root.glob("0*.jpg"))
    image_list.sort()

    image = cv2.imread(str(image_list[0]))
    height, width, _ = image.shape

    # Create camera model -- PINHOLE CAMERA (fx fy cx cy))
    camera_intrinsics_path = sequence_root / "camera_intrinsics_per_view"
    with open(str(camera_intrinsics_path), "r") as f:
        temp = list()
        for i in range(4):
            temp.append(f.readline())
    model, intrinsics = 1, np.array((temp[0], temp[1], temp[2], temp[3]))
    camera_id = db.add_camera(model, width, height, intrinsics)

    # Create image ids
    image_id_list = list()
    for image_path in image_list:
        image_id_list.append(db.add_image(image_path.name, camera_id))

    # Create matches per image pair
    f_matches = h5py.File(str(feature_match_path), 'r')
    dataset_matches = f_matches['matches']
    start_index = 0

    tq = tqdm.tqdm(total=dataset_matches.shape[0])
    tq.set_description("Gathering keypoints")
    keypoints_dict = dict()
    # Keypoint gathering
    while start_index < dataset_matches.shape[0]:
        header = dataset_matches[start_index, :, 0]
        num_matches, id_1, id_2, _ = header
        tq.set_postfix(source_frame_index='{:d}'.format(id_1),
                       target_frame_index='{:d}'.format(id_2))
        id_1 += 1
        id_2 += 1
        id_1 = int(id_1)
        id_2 = int(id_2)
        pair_matches = dataset_matches[start_index + 1:start_index + 1 +
                                       num_matches, :, 0]
        pair_matches = pair_matches.astype(np.long)
        matches = np.concatenate(
            [(pair_matches[:, 0] + pair_matches[:, 1] * width).reshape(
                (-1, 1)),
             (pair_matches[:, 2] + pair_matches[:, 3] * width).reshape(
                 (-1, 1))],
            axis=1)

        if str(id_1) not in keypoints_dict:
            keypoints_dict[str(id_1)] = list(matches[:, 0])
        else:
            keypoints_dict[str(id_1)] += list(matches[:, 0])
            keypoints_dict[str(id_1)] = list(
                np.unique(keypoints_dict[str(id_1)]))

        if str(id_2) not in keypoints_dict:
            keypoints_dict[str(id_2)] = list(matches[:, 1])
        else:
            keypoints_dict[str(id_2)] += list(matches[:, 1])
            keypoints_dict[str(id_2)] = list(
                np.unique(keypoints_dict[str(id_2)]))
        tq.update(num_matches + 1)
        start_index += num_matches + 1

    tq.close()
    new_keypoints_dict = dict()
    # Keypoint indexing building
    # val -- 1D location of keypoint, key -- one-based frame index
    for key, value in keypoints_dict.items():
        value = np.unique(value)
        temp = dict()
        for idx, val in enumerate(value):
            temp[str(val)] = int(idx + 1)
        new_keypoints_dict[key] = temp

    tq = tqdm.tqdm(total=dataset_matches.shape[0])
    tq.set_description("Adding matches to database")
    start_index = 0
    while start_index < dataset_matches.shape[0]:
        header = dataset_matches[start_index, :, 0]
        num_matches, id_1, id_2, _ = header
        id_1 += 1
        id_2 += 1
        id_1 = int(id_1)
        id_2 = int(id_2)
        # new_keypoint_dict -- one-based frame index
        keypoint_dict_1 = new_keypoints_dict[str(id_1)]
        keypoint_dict_2 = new_keypoints_dict[str(id_2)]

        pair_matches = dataset_matches[start_index + 1:start_index + 1 +
                                       num_matches, :, 0]
        model, inliers = ransac((pair_matches[:, :2], pair_matches[:, 2:]),
                                FundamentalMatrixTransform,
                                min_samples=8,
                                residual_threshold=10.0,
                                max_trials=10)

        pair_matches = pair_matches.astype(np.long)
        pair_matches = np.concatenate(
            [(pair_matches[:, 0] + pair_matches[:, 1] * width).reshape(
                (-1, 1)),
             (pair_matches[:, 2] + pair_matches[:, 3] * width).reshape(
                 (-1, 1))],
            axis=1)

        idx_pair_matches = np.zeros_like(pair_matches).astype(np.int32)
        # one-based keypoint index per frame
        for i in range(pair_matches.shape[0]):
            idx_pair_matches[i, 0] = int(keypoint_dict_1[str(pair_matches[i,
                                                                          0])])
            idx_pair_matches[i, 1] = int(keypoint_dict_2[str(pair_matches[i,
                                                                          1])])
        db.add_matches(id_1, id_2, idx_pair_matches)

        # Fundamental matrix provided
        db.add_two_view_geometry(id_1,
                                 id_2,
                                 idx_pair_matches[inliers],
                                 F=model.params,
                                 config=3)

        tq.set_postfix(source_frame_index='{:d}'.format(id_1),
                       target_frame_index='{:d}'.format(id_2),
                       inliers_ratio='{:.3f}'.format(
                           np.sum(inliers) / idx_pair_matches.shape[0]))
        tq.update(num_matches + 1)
        start_index += num_matches + 1

    tq.close()
    # Adding keypoints per image to database
    for image_id in image_id_list:
        if str(image_id) not in keypoints_dict:
            continue
        temp = np.asarray(keypoints_dict[str(image_id)])
        temp_1 = np.floor(temp.reshape((-1, 1)) / width)
        temp_2 = np.mod(temp.reshape((-1, 1)), width)
        db.add_keypoints(
            int(image_id),
            np.concatenate([temp_2, temp_1], axis=1).astype(np.int32))

    # Write to the database file
    db.commit()
Exemple #44
0
fig, ax = plt.subplots(1, 1, figsize=(15, 12))

# Best match subset for pano0 -> pano1
plot_matches(ax, I0, I1, keypoints0, keypoints1, matches01)
ax.axis('off');

from skimage.transform import ProjectiveTransform
from skimage.measure import ransac

# Select keypoints from
#   * source (image to be registered): pano1
#   * target (reference image): pano0
src = keypoints1[matches01[:, 1]][:, ::-1]
dst = keypoints0[matches01[:, 0]][:, ::-1]

model_robust01, inliers01 = ransac((src, dst), ProjectiveTransform,
                                   min_samples=4, residual_threshold=1, max_trials=300)

print(model_robust01)

fig, ax = plt.subplots(1, 1, figsize=(15, 12))

# Best match subset for pano0 -> pano1
plot_matches(ax, I0, I1, keypoints0, keypoints1, matches01[inliers01])

ax.axis('off');

from skimage.transform import SimilarityTransform

# Shape registration target
r, c = pano0.shape[:2]
    def read_LiDAR(self, data):
        #        sequence_dir = os.path.join(self.data_base_dir, self.sequence_idx, 'velodyne')
        #        sequence_manager = Ptutils.KittiScanDirManager(sequence_dir)
        #        scan_paths = sequence_manager.scan_fullpaths
        tic = time.time()
        rate = rospy.Rate(20)
        self.num_frames = 6000
        #        print('jhloi')
        # Pose Graph Manager (for back-end optimization) initialization

        # Result saver
        self.save_dir = "result/" + self.sequence_idx
        if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)

        # Scan Context Manager (for loop detection) initialization

        # for save the results as a video
        fig_idx = 1

        num_frames_to_skip_to_show = 5
        #        num_frames_to_save = np.floor(num_frames/num_frames_to_skip_to_show)
        #        with writer.saving(fig, video_name, num_frames_to_save): # this video saving part is optional

        # @@@ MAIN @@@: data stream
        #        for for_idx, scan_path in tqdm(enumerate(scan_paths), total=num_frames, mininterval=5.0):

        # get current information
        self.curr_scan_pts = ptstoxyz.pointcloud2_to_xyz_array(
            data, remove_nans=True)

        #        print (len(self.curr_scan_pts), 'nubetotal')
        #        plane1 = pyrsc.Circle()
        #        best_eq, best_inliers = plane1.fit(self.curr_scan_pts, 0.01)
        #        print(best_inliers)
        #        print(len(self.curr_scan_pts), 'origin')
        model_robust, inliers = ransac(self.curr_scan_pts,
                                       LineModelND,
                                       min_samples=2,
                                       residual_threshold=0.9,
                                       max_trials=3)
        #        print(type(inliers))
        #        pos_True = inliers == False
        #        print(len(inliers[pos_false]) , 'outliers')

        #        print(self.curr_scan_pts.shape)
        #        curr_scan_pts = Ptutils.readScan(scan_paths)

        var = []
        for i in range(0, len(inliers)):
            if inliers[i] == False:
                var.append(i)

        self.curr_scan_pts = self.curr_scan_pts[var[:], :]
        print(self.curr_scan_pts.shape)
        # save current node
        curr_node_idx = self.for_idx  # make start with 0
        self.curr_scan_down_pts = Ptutils.random_sampling(
            self.curr_scan_pts, num_points=self.num_icp_points)
        #        print(len(self.curr_scan_down_pts), 'random')
        if (curr_node_idx == 0):
            self.prev_node_idx = curr_node_idx
            self.prev_scan_pts = copy.deepcopy(self.curr_scan_pts)
            self.icp_initial = np.eye(4)
#            continue
        if self.for_idx == -10:
            self.curr_scan_pts1 = ptstoxyz.pointcloud2_to_xyz_array(
                data, remove_nans=True)
            x1 = self.curr_scan_pts1[:, 0]
            y1 = self.curr_scan_pts1[:, 1]
            z1 = self.curr_scan_pts1[:, 2]
            print(len(self.curr_scan_pts1), 'origin', len(x1))
            #           x2= self.curr_scan_pts[:,0]
            #           y2= self.curr_scan_pts[:,1]
            #           z2= self.curr_scan_pts[:,2]

            x3 = self.curr_scan_pts[:, 0]
            y3 = self.curr_scan_pts[:, 1]
            z3 = self.curr_scan_pts[:, 2]
            print(len(self.curr_scan_pts), 'ransac', len(x3))
            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            ax.scatter(x1,
                       y1,
                       z1,
                       marker="*",
                       label="Nube de puntos",
                       s=0.02,
                       c='blue')
            #           ax.scatter(x3,y3,z3, marker = '*',label = "RANSAC outliers" ,s=0.02, c ='red' )

            ax.set_xlabel('X')
            ax.set_ylabel('Y')
            ax.set_zlabel('Z')
            plt.legend(loc=2, prop={'size': 10})
            ax.legend()
            plt.show()
#           fig2=plt.figure()

# calc odometry
#        print (self.prev_scan_down_pts)
        self.prev_scan_down_pts = Ptutils.random_sampling(
            self.prev_scan_pts, num_points=self.num_icp_points)
        self.odom_transform, _, _ = ICP.icp(self.curr_scan_down_pts,
                                            self.prev_scan_down_pts,
                                            init_pose=self.icp_initial,
                                            max_iterations=100)
        #        print (self.odom_transform)
        # update the current (moved) pose
        self.curr_se3 = np.matmul(self.curr_se3, self.odom_transform)

        self.icp_initial = self.odom_transform  # assumption: constant velocity model (for better next ICP converges)
        #        print(self.odom_transform)
        # add the odometry factor to the graph

        #        self.for_idx= self.for_idx+1
        # renewal the prev information
        self.prev_node_idx = curr_node_idx
        self.prev_scan_pts = copy.deepcopy(self.curr_scan_pts)

        # loop detection and optimize the graph
        #        if(PGM.curr_node_idx > 1 and PGM.curr_node_idx % self.try_gap_loop_detection == 0):
        #            # 1/ loop detection
        #            loop_idx, loop_dist, yaw_diff_deg = SCM.detectLoop()
        #            if(loop_idx == None): # NOT FOUND
        #                pass
        #            else:
        #                print("Loop event detected: ", PGM.curr_node_idx, loop_idx, loop_dist)
        #                # 2-1/ add the loop factor
        #                loop_scan_down_pts = SCM.getPtcloud(loop_idx)
        #                loop_transform, _, _ = ICP.icp(curr_scan_down_pts, loop_scan_down_pts, init_pose=yawdeg2se3(yaw_diff_deg), max_iterations=20)
        #                PGM.addLoopFactor(loop_transform, loop_idx)
        #
        #                # 2-2/ graph optimization
        #                PGM.optimizePoseGraph()
        #
        #                # 2-2/ save optimized poses
        #                ResultSaver.saveOptimizedPoseGraphResult(PGM.curr_node_idx, PGM.graph_optimized)

        # save the ICP odometry pose result (no loop closure)
        #        ResultSaver.saveUnoptimizedPoseGraphResult(self.curr_se3, curr_node_idx)
        self.pose_list = np.vstack(
            (self.pose_list, np.reshape(self.curr_se3, (-1, 16))))
        #        if(curr_node_idx % self.save_gap == 0 or curr_node_idx == self.num_frames):
        # save odometry-only poses
        filename = "pose" + self.sequence_idx + "unoptimized_" + str(
            int(time.time())) + ".csv"
        filename = os.path.join(self.save_dir, filename)
        np.savetxt(filename, self.pose_list, delimiter=",")

        if (self.for_idx % num_frames_to_skip_to_show == 0):
            self.x = self.pose_list[:, 3]
            self.y = self.pose_list[:, 7]
            ##            z = self.pose_list[:,11]
            #
            fig = plt.figure(fig_idx)
            plt.clf()
            plt.plot(-self.y, self.x,
                     color='blue')  # kitti camera coord for clarity
            plt.axis('equal')
            plt.xlabel('x', labelpad=10)
            plt.ylabel('y', labelpad=10)
            plt.draw()
            plt.pause(
                0.01)  #is necessary for the plot to update for some reason
#            ResultSaver.vizCurrentTrajectory(fig_idx=fig_idx)
        self.for_idx = self.for_idx + 1
        #                writer.grab_frame()
        msg.header.stamp = rospy.get_rostime()
        msg.header.frame_id = " UTM_COORDINATE "
        msg.pose.pose.position.x = -self.y[curr_node_idx - 4]
        msg.pose.pose.position.y = self.x[curr_node_idx - 4]

        self.pub.publish(msg)
        rate.sleep()
        toc = time.time()
def localize_using_landmarks(features, est_position, config):
    ''' Localize the robot by finding the transformation model (rotation + translation)
        that match the best the set of feature found to the known one on the table.
        This need an estimation of the position.

    Parameters
    ----------
    features: list of (N) OrientedCorner object 
    est_position: list of 3 float. 1. position x; 2. position y; 3. heading
                  in radian
    
    '''
    position = (None, None)
    heading = None

    if features is None:
        return (None, None), None

    features_list = np.array([(feature.x, feature.y) for feature in features])

    table_landmarks = np.array(
        [[0, 0], [config['TABLE_WIDTH'], 0],
         [config['TABLE_WIDTH'], config['TABLE_HEIGHT']],
         [0, config['TABLE_HEIGHT']]],
        dtype=float)

    if est_position[1] < config['TABLE_WIDTH'] / 2:
        table_landmarks = np.vstack(
            (table_landmarks,
             np.array([[
                 0, config['TABLE_HEIGHT'] / 2 -
                 config['CENTER_OBSTACLE_HALF_WIDTH']
             ],
                       [
                           config['TABLE_WIDTH'], config['TABLE_HEIGHT'] / 2 -
                           config['CENTER_OBSTACLE_HALF_WIDTH']
                       ]])))
    else:
        table_landmarks = np.vstack(
            (table_landmarks,
             np.array([[
                 0, config['TABLE_HEIGHT'] / 2 +
                 config['CENTER_OBSTACLE_HALF_WIDTH']
             ],
                       [
                           config['TABLE_WIDTH'], config['TABLE_HEIGHT'] / 2 +
                           config['CENTER_OBSTACLE_HALF_WIDTH']
                       ]])))

    table_rel_landmarks = table_landmarks - est_position[0:2]
    table_rel_landmarks = rotatePolygon(table_rel_landmarks,
                                        -est_position[2] + np.pi / 2)

    pair_idx = pair_points(table_rel_landmarks, features_list)
    src = table_rel_landmarks[pair_idx]
    dst = features_list

    model_robust, inliers = ransac(
        (src, dst),
        TransformationModel,
        min_samples=2,
        residual_threshold=config['MAX_RESIDUAL_LANDMARKS'],
        max_trials=100,
        is_data_valid=lambda cls, data: TransformationModel.is_data_valid(
            cls, data, config['MIN_DISTANCE_VALID_LANDMARKS']))
    outliers = inliers == False

    if model_robust is not None and any(inliers):
        model_robust.estimate(src[inliers], dst[inliers])

        heading = np.array(-model_robust.rotation + est_position[2])
        position = np.array(est_position[0:2]).T + rotatePolygon(
            model_robust.translation, est_position[2] + np.pi / 2).squeeze()

    return position, heading
src = []
dst = []
new_match = []
#center = (img1_series['original'].shape[0]/2,img1_series['original'].shape[1]/2)
for match in matches[10:]:
    # if np.linalg.norm(np.array(kp1[match.queryIdx].pt)-center)<250:
    #     new_match.append(match)
    src.append(kp1[match.queryIdx].pt)
    dst.append(kp2[match.trainIdx].pt)
src = np.array(src)
dst = np.array(dst)

model, inliers = ransac((src, dst),
                        PolyTF,
                        min_samples=6,
                        residual_threshold=0.8,
                        max_trials=4000)
sk_img1 = img1_series['sk_vessel_mask']
sk_img2 = img2_series['sk_vessel_mask']

sk_img2_warped = transform.warp(sk_img2, model)

img2_warped = skimage2opencv(sk_img2_warped)

print('fit complete.')

blend = (img2_warped * 0.5 + img1_series['vessel_mask'] * 0.5).astype(np.uint8)
cv2.namedWindow('Result', cv2.WINDOW_NORMAL)
# cv2.imshow('Result',blend )
# cv2.waitKey(1)
orb.detect_and_extract(image0)
keypoints1 = orb.keypoints
descriptors1 = orb.descriptors

orb.detect_and_extract(image1)
keypoints2 = orb.keypoints
descriptors2 = orb.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]

transform_model, inliers = \
    ransac((src, dst), ProjectiveTransform,
           min_samples=4, residual_threshold=2)

r, c = image1.shape[:2]

corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])

warped_corners = transform_model(corners)

all_corners = np.vstack((warped_corners, corners))

corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)

output_shape = (corner_max - corner_min)
output_shape = np.ceil(output_shape[::-1])
Exemple #49
0
faulty = np.array(30 * [(180., -100)])
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty

# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]

# fit line using all data
model = LineModel()
model.estimate(data)

# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModel, min_samples=2,
                               residual_threshold=1, max_trials=1000)
outliers = inliers == False

# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)

plt.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
         label='Inlier data')
plt.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
         label='Outlier data')
plt.plot(line_x, line_y, '-k', label='Line model from all data')
plt.plot(line_x, line_y_robust, '-b', label='Robust line model')
plt.legend(loc='lower left')
plt.show()