Example #1
0
    def __init__(self, params, imgtopic):
        # Old and new imgs
        self.old_frame = np.array([])
        self.new_frame = np.array([])

        #form str for csv file name
        self.strfile = '~/catkin_ws/' + params['detector'] + params[
            'matcher'] + str(datetime.datetime.now()) + '.csv'

        log = pd.DataFrame(columns=[
            'nfeatures', 'nmatches', 'ngoodmatches', 'reproj_error', 'ex_time'
        ])
        log.to_csv(self.strfile)
        rospy.loginfo('saving log file in %s', self.strfile)

        # Create filter image instance for video VideoCorrections
        self.videocorrect = VideoCorrect()

        # Create Visual Odometry instance
        self.vo = VisualOdometry(params)

        self.image_sub = rospy.Subscriber(imgtopic,
                                          CompressedImage,
                                          self.img_callback,
                                          queue_size=1)
        rospy.loginfo("Subscribed to %s topic", imgtopic)

        self.img_pub = rospy.Publisher('/BlueRov2/image_matches',
                                       Image,
                                       queue_size=1)
def run():
    match = Matcher()
    img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
    # img = CVImage('/home/cesar/Documentos/Computer_Vision/images_test')

    # Load images
    img.read_image()
    img.copy_image()
    img.acquire()
    # t = threading.Thread(target=plot_image, args=(img.new_image, ))
    # t.start()

    # Correlate

    p1, p2 = correlate_image(match, img, 2, 7)
    print ("Total number of keypoints in second image: \
           {}".format(len(match.global_kpts1)))

    print ("Total number of keypoints in first image: \
           {}".format(len(match.global_kpts2)))

    if not match.is_minmatches:
        print "There aren't matches after filtering. Iterate to next image..."
        return

    # Plot keypoints
    plot_matches(match, img)
    # t.__stop()

    # Now, estimate F
    vo = VisualOdometry()
    match.global_kpts1, match.global_kpts2 = \
        vo.EstimateF_multiprocessing(match.global_kpts2, match.global_kpts1)

    # Get structure of the scene, up to a projectivity
    scene = get_structure(match, img, vo)

    # Optimize F
    # param_opt, param_cov = vo.optimize_F(match.global_kpts1, match.global_kpts2)
    # vo.cam2.set_P(param_opt[:9].reshape((3, 3)))
    # scene = vo.recover_structure(param_opt)

    # Plot it
    plot_scene(scene)

    # Get the Essential matrix
    vo.E_from_F()
    print vo.F
    print vo.E

    # Recover pose
    R, t = vo.get_pose(match.global_kpts1, match.global_kpts2,
                       vo.cam1.focal, vo.cam1.pp)
    print R

    print t

    # Compute camera matrix 2
    print "CAM2", vo.cam2.P
    vo.cam2.compute_P(R, t)
    print "CAM2", vo.cam2.P

    # Get the scene
    scene = get_structure_normalized(match, img, vo)
    plot_scene(scene)

    # What have we stored?
    print ("Permanent Keypoints in the first image stored: \
           {}".format(type(match.curr_kp[0])))
    print ("Permanent descriptors in the first image stored: \
           {}".format(len(match.curr_dsc)))

    print ("Format of global keypoints: \
           {}".format(type(match.global_kpts1)))

    print ("Format of global keypoints: \
            {}".format(type(match.global_kpts1[0])))
    print ("Shape of global kpts1: {}".format(np.shape(match.global_kpts1)))

    # print ("global keypoint: \
    #       {}".format(match.global_kpts1[0]))
    # Acquire image
    img.copy_image()
    img.acquire()
    d, prev_points, points_tracked = match.lktracker(img.prev_image, \
                                                     img.new_image,
                                                     match.global_kpts2)
    print ("Points tracked: \ {}".format(len(points_tracked)))

    plot_two_points(np.reshape(match.global_kpts2,
                               (len(match.global_kpts2), 2)),
                    prev_points, img)
    test = []
    for (x, y), good_flag in zip(match.global_kpts2, d):
        if not good_flag:
            continue
        test.append((x, y))
    # plot_two_points(np.reshape(match.global_kpts2, (len(match.global_kpts2), 2)),
     #                np.asarray(points_tracked), img)
    plot_two_points(np.asarray(test), np.asarray(points_tracked), img)
    # points, st, err = cv2.calcOpticalFlowPyrLK(img.prev_grey, img.new_image,
    #                                            match.global_kpts2, None,
    #                                            **lk_params)
    # print len(points)
    print "Shape of p1: {}".format(np.shape(p1))
    plane = vo.opt_triangulation(p1, p2,
                                 vo.cam1.P, vo.cam2.P)
    plot_scene(plane)
    print "Shpe of plane: {}".format(np.shape(plane))
    print "Type of plane: {}".format(type(plane))
    print np.transpose(plane[:, :3])
    plane = np.transpose(plane)
    print "shape plane: {}".format(np.shape(plane))
    plane_inhomogeneous = np.delete(plane, 3, 1)
    print "shape plane: {}".format(np.shape(plane_inhomogeneous))
    print plane_inhomogeneous[:3, :]
    # Use ransac to fit a plane
    debug = False
    plane_model = RansacModel(debug)
    ransac_fit, ransac_data = ransac.ransac(plane_inhomogeneous,
                                            plane_model,
                                            4, 1000, 1e-4, 50,
                                            debug=debug, return_all=True)
    print "Ransac fit: {}".format(ransac_fit)
    # PLot the plane
    X, Y = np.meshgrid(np.arange(-0.3, 0.7, 0.1), np.arange(0, 0.5, 0.1))
    Z = -(ransac_fit[0] * X - ransac_fit[1] * Y - ransac_fit[3]) / ransac_fit[2]
    plot_plane(X, Y, Z, plane_inhomogeneous[ransac_data['inliers']])
match.match(roi, roi_prev)
print "good_matches bf", len(match.good_matches)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)

match.draw_matches(roi, match.good_matches)
cv2.namedWindow('roi', cv2.WINDOW_NORMAL)
cv2.imshow('roi', roi)
cv2.waitKey(0)
cv2.destroyAllWindows()

# Compute F
vo = VisualOdometry()
[match.good_kp1, match.good_kp2] = vo.EstimateF_multiprocessing(match.good_kp1,
                                                                match.good_kp2)
print vo.F
# Obtener matrices de cmara
vo.P_from_F(vo.F)
vo.create_P1()
print "P1", vo.cam1.P
print "P2", vo.cam2.P
# Triangulate points
vo.optimal_triangulation(match.good_kp2, match.good_kp1)
print "origianl points1", match.good_kp1[0:5, :]
print "corrected points1", vo.correctedkpts1[:, 0:5]
print "origianl points2", match.good_kp2[0:5, :]
print "corrected points2", vo.correctedkpts2[:, 0:5]
# print "structure", np.shape(vo.structure)
print "good_matches bf", len(match.good_matches)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)



match.draw_matches(roi, match.good_matches)
cv2.namedWindow('roi', cv2.WINDOW_NORMAL)
cv2.imshow('roi', roi)
cv2.waitKey(0)
cv2.destroyAllWindows()

# Compute F
vo = VisualOdometry()
[match.good_kp1, match.good_kp2] = vo.EstimateF_multiprocessing(match.good_kp1, match.good_kp2)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)
#sk = np.array([[0, -vo.e[2], vo.e[1]], [vo.e[2], 0, -vo.e[0]],
#               [-vo.e[1], vo.e[0], 0]])
#vo.P_from_F(vo.F)

#vo.create_P1()

print "prev F", vo.F
#vo.optimal_triangulation(match.good_kp1, match.good_kp2)
# error = vo.functiontominimize(match.good_kp1, match.good_kp2)
vo.minimize_cost(match.good_kp1, match.good_kp2)
n = 2  # Number of roi's
size = np.array([[w / n], [h / n]], np.int32)
start = np.array([[0], [0]], np.int32)
# Create roi
roi = img.crop_image(start, size, img.new_image)
roi_prev = img.crop_image(start, size, img.prev_image)

match.match(roi, roi_prev)
print "good_matches bf", len(match.good_matches)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)

# Compute F
vo = VisualOdometry()
[match.good_kp1, match.good_kp2] = vo.EstimateF_multiprocessing(match.good_kp1,
                                                                match.good_kp2)
print "Estimated F",  vo.F
# Obtener matrices de cmara
vo.P_from_F(vo.F)
vo.create_P1()
print "P1", vo.cam1.P
print "P2", vo.cam2.P
# Triangulate points

scene = vo.opt_triangulation(match.good_kp1, match.good_kp2,
                             vo.cam1.P, vo.cam2.P)
point2d = vo.cam1.project(scene)
point2d_prime = vo.cam2.project(scene)
print scene[:, :3]
Example #6
0
class UWVisualOdometry(object):
    def __init__(self, params, imgtopic):
        # Old and new imgs
        self.old_frame = np.array([])
        self.new_frame = np.array([])

        #form str for csv file name
        self.strfile = '~/catkin_ws/' + params['detector'] + params[
            'matcher'] + str(datetime.datetime.now()) + '.csv'

        log = pd.DataFrame(columns=[
            'nfeatures', 'nmatches', 'ngoodmatches', 'reproj_error', 'ex_time'
        ])
        log.to_csv(self.strfile)
        rospy.loginfo('saving log file in %s', self.strfile)

        # Create filter image instance for video VideoCorrections
        self.videocorrect = VideoCorrect()

        # Create Visual Odometry instance
        self.vo = VisualOdometry(params)

        self.image_sub = rospy.Subscriber(imgtopic,
                                          CompressedImage,
                                          self.img_callback,
                                          queue_size=1)
        rospy.loginfo("Subscribed to %s topic", imgtopic)

        self.img_pub = rospy.Publisher('/BlueRov2/image_matches',
                                       Image,
                                       queue_size=1)

    def img_callback(self, data):
        """ Receives the Image topic message and converts it from sensor_msgs
        to cv image using cv_bridge.
        """
        rospy.logdebug('Received image topic...')
        start = time.time()
        bridge = CvBridge()
        # Read and convert data
        #current_frame = bridge.imgmsg_to_cv2(data)
        np_arr = np.fromstring(data.data, np.uint8)
        current_frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

        #corrected = self.videocorrect.filter(current_frame)
        corrected = current_frame
        end = time.time()
        rospy.logdebug('Time taking for homomorphic %s', end - start)

        self.new_frame = corrected

        # If first iteration, copy image twice
        if not self.old_frame.size:
            self.old_frame = self.new_frame.copy()

        # Prepare csv for log file
        saved = pd.read_csv(self.strfile, index_col=0, header=0)
        #self.vo.cam.K = self.videocorrect.K
        #try:
        # Do VO stuff.. If you can!!
        # try:
        self.vo.init_reconstruction(optimize=False,
                                    image1=self.old_frame,
                                    image2=self.new_frame)
        print(len(self.vo.matcher.good_matches))
        # Log time  taken
        end = time.time()
        ttaken = end - start

        #rospy.loginfo('VO done in %s s', ttaken)

        new = pd.DataFrame([[
            len(self.vo.matcher.kp1),
            len(self.vo.matcher.matches1),
            len(self.vo.matcher.good_matches), self.vo.reprojection_error_mean,
            ttaken
        ]],
                           columns=[
                               'nfeatures', 'nmatches', 'ngoodmatches',
                               'reproj_error', 'ex_time'
                           ])
        #except:
        # rospy.logwarn('Not enough matches in this pair of frames')
        # end = time.time()
        # ttaken = end-start
        # if not self.vo.matcher.kp1:
        #     nkp1 = 0
        # else:
        #     nkp1 = len(self.vo.matcher.kp1)
        # if not self.vo.matcher.matches1:
        #     nmatches = 0
        # else:
        #     nmatches = len(self.vo.matcher.matches1)
        # if not self.vo.matcher.good_matches:
        #     ngood = 0
        # else:
        #     ngood = len(self.vo.matcher.good_matches)
        # if not self.vo.reprojection_error_mean:
        #     repr_err = 0
        # else:
        #     repr_err = self.vo.reprojection_error_mean
        # new = pd.DataFrame([[ nkp1, nmatches,
        #                       ngood, repr_err ,ttaken]],
        #                        columns = ['nfeatures','nmatches',
        #                                   'ngoodmatches','reproj_error',
        #                                   'ex_time'])

        log = pd.concat([saved, new])
        log.to_csv(self.strfile)

        # Print things
        imgmatch = self.new_frame.copy()
        self.vo.matcher.draw_matches(img=imgmatch,
                                     matches=self.vo.matcher.good_matches)
        # rospy.logdebug("# good matches: {}".format(len(self.vo.matcher.good_matches)))
        # cv2.imshow('matches', imgmatch)
        # cv2.waitKey(3)
        # Now the new frame becomes the older one
        self.old_frame = self.new_frame.copy()

        image_message = bridge.cv2_to_imgmsg(imgmatch, encoding="passthrough")
        del imgmatch
        self.img_pub.publish(image_message)
        del image_message

        rospy.loginfo('EXITING IMG CALLBACK')