Exemplo n.º 1
0
 def __init__(self,
              name,
              rate_limiter,
              map_frame="occupancy_grid",
              laser_range=[np.pi / 3, np.pi / 3],
              laser_dist=3):
     self.pose_history = []
     self.publisher = rospy.Publisher('/' + name + '/cmd_vel',
                                      Twist,
                                      queue_size=5)
     self.laser = Laser(name=name,
                        laser_range=laser_range,
                        max_range=laser_dist)
     self.slam = SLAM(name=name, map_frame=map_frame)
     self.name = name
     self.rate_limiter = rate_limiter
     with open('/tmp/gazebo_exercise_' + self.name + '.txt', 'w'):
         pass
Exemplo n.º 2
0
import numpy as np
from slam import SLAM
from renderer import Renderer
from display import Display2D, Display3D

if __name__ == "__main__":
    W, H = 640, 480
    F = H  # with 45 degree FoV

    K = np.array([[F, 0, W // 2], [0, F, H // 2], [0, 0, 1]])
    Kinv = np.linalg.inv(K)

    disp3d = Display3D()
    disp2d = Display2D(W, H)

    slam = SLAM(W, H, K)
    r = Renderer(W, H)

    fn = 0
    pos_x = 0
    dir_x = True
    while 1:
        fn += 1

        # render
        frame, verts = r.draw([pos_x, 0, 0])

        # add gaussian noise
        verts += np.random.normal(0.0, 0.1, verts.shape)

        # ground truth pose
Exemplo n.º 3
0
num_features=parameters.kNumFeatures 
"""
select your feature tracker 
N.B.1: here you can use just ORB descriptors! ... at present time
N.B.2: ORB detector (not descriptor) does not work as expected!
"""
tracker_type = TrackerTypes.DES_BF
#tracker_type = TrackerTypes.DES_FLANN
feature_tracker = feature_tracker_factory(min_num_features=num_features, detector_type = FeatureDetectorTypes.SHI_TOMASI, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)    
#feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 1, detector_type = FeatureDetectorTypes.FAST, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)
#feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 3, detector_type = FeatureDetectorTypes.BRISK, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)    
#feature_tracker = feature_tracker_factory(min_num_features=num_features, detector_type = FeatureDetectorTypes.ORB, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)


slam = SLAM(cam, feature_tracker, grountruth)

timer = Timer()

#------------- 

# N.B.: keep this coherent with the above forced camera settings 
img_ref = cv2.imread('kitti06-12.png',cv2.IMREAD_COLOR)
#img_cur = cv2.imread('kitti06-12-01.png',cv2.IMREAD_COLOR)
img_cur = cv2.imread('kitti06-13.png',cv2.IMREAD_COLOR)


slam.track(img_ref, frame_id=0)
slam.track(img_cur, frame_id=1)

f_ref = slam.map.frames[-2]
Exemplo n.º 4
0
    feature_tracker = feature_tracker_factory(
        min_num_features=num_features,
        num_levels=3,
        detector_type=FeatureDetectorTypes.SHI_TOMASI,
        descriptor_type=FeatureDescriptorTypes.ORB,
        tracker_type=tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 4, detector_type = FeatureDetectorTypes.FAST, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 4, detector_type = FeatureDetectorTypes.BRISK, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 4, detector_type = FeatureDetectorTypes.BRISK, descriptor_type = FeatureDescriptorTypes.BRISK, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 4, detector_type = FeatureDetectorTypes.AKAZE, descriptor_type = FeatureDescriptorTypes.AKAZE, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, num_levels = 4, detector_type = FeatureDetectorTypes.ORB, descriptor_type = FeatureDescriptorTypes.ORB, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, detector_type = FeatureDetectorTypes.SIFT, descriptor_type = FeatureDescriptorTypes.SIFT, tracker_type = tracker_type)
    #feature_tracker = feature_tracker_factory(min_num_features=num_features, detector_type = FeatureDetectorTypes.SURF, descriptor_type = FeatureDescriptorTypes.SURF, tracker_type = tracker_type)

    # create SLAM object
    slam = SLAM(cam, feature_tracker, grountruth)

    viewer3D = Viewer3D()
    display2d = Display2D(cam.width, cam.height)

    is_draw_matched_points = True
    matched_points_plt = Mplot2d(xlabel='img id',
                                 ylabel='# matches',
                                 title='# matches')

    img_id = 0  #200   # you can start from a desired frame id if needed
    while dataset.isOk():

        print('..................................')
        print('frame: ', img_id)
Exemplo n.º 5
0
	parser.add_argument("source", nargs="?", type=str, help="path to the video")
	parser.add_argument("pose_path",  nargs="?", type=str, help="pose file path", default=None)
	parser.add_argument("--headless", type=bool, default=None)
	parser.add_argument("-f", type=int, default=525)
	parser.add_argument("--seek", type=int, default=False)
	parser.add_argument("--flip_colorspace",action ='store_true', default=False)
	args = parser.parse_args()
	stream = ImageStream(args.source)
	W, H, K, CNT = stream.create_camera_params(float(args.f))
	if args.seek:
		stream.ss.set_seek(args.seek)
	disp2d, disp3d = None, None
	if args.headless is None:
		disp2d = Display2D(W, H)
		disp3d = Display3D()
	slam = SLAM(W, H, K)

	"""
	mapp.deserialize(open('map.json').read())
	while 1:
	  disp3d.paint(mapp)
	  time.sleep(1)
	"""

	gt_pose = None
	if args.pose_path:
		gt_pose = np.load(args.pose_path)['pose']
		# add scale param?
		gt_pose[:, :3, 3] *= 50

	main(stream, slam, args.flip_colorspace)
Exemplo n.º 6
0
    parser = argparse.ArgumentParser(description='TODO')
    parser.add_argument('path', metavar='path', type=str, help='data')
    args = parser.parse_args()

    path = args.path

    image_names = sorted(os.listdir(path))

    w = 1280
    h = 1024
    calibration_matrix = np.array(
        [[0.535719308086809 * w, 0, 0.493248545285398 * w],
         [0, 0.669566858850269 * h, 0.500408664348414 * h], [0, 0, 1]])
    sigma = 0.897966326944875

    slam = SLAM(width=w, height=h, calibration_matrix=calibration_matrix)

    t = tqdm.tqdm(image_names, total=len(image_names))

    for name in t:
        #print(name)
        #fig = plt.figure()
        img = cv2.imread(path + '/' + name, cv2.IMREAD_GRAYSCALE)
        #plt.imshow(img)
        #plt.show()
        img2 = cv2.undistort(img, calibration_matrix, sigma)
        # plt.imshow(img2)
        # plt.show()
        slam.run(img)

    fig = plt.figure()
Exemplo n.º 7
0
        )
        sys.exit()

    try:
        assert algorithm in ["feature", "icp"]
    except AssertionError:
        print(
            "\nInvalid algorithm choice, choose between 'feature' or 'icp'. Correct usage of function:"
        )
        print(
            "´´´$ python3 main.py [algorithm] [path-to-IMU-data] [path-to-LiDAR-data]´´´\n"
        )
        sys.exit()

    try:
        slam = SLAM(algorithm, imu_path, lid_path)
        slam.get_imu_data()
        slam.get_lidar_data()
    except:
        print("\nInvalid filename(s). Correct usage of function:")
        print(
            "´´´$ python3 main.py [algorithm] [path-to-IMU-data] [path-to-LiDAR-data]´´´\n"
        )
        sys.exit()

    # Visualize ground truth
    print("Plotting ground truth\n" + 50 * "=")
    slam.plot_ground_truth()

    # Processing data
    slam.set_params()