Esempio n. 1
0
def main():
    odom_global = [
        [0, 0, 0], [-0.0300632963656, 0.0, 1.3448275862099877],
        [-0.06607260741470895, -0.0008453538851817006, 2.91379310344999],
        [-0.10714376028563645, -0.00293584149525012, 4.931034482759998],
        [-0.14218198242984106, -0.005958795717176256, 6.724137931039991]
    ]
    odom_relative = [[-0.0300632963656, 0, 1.34482758621],
                     [-0.036019232438, 0, 1.56896551724],
                     [-0.0411243205001, 0, 2.01724137931],
                     [-0.0351683844277, 0, 1.79310344828]]
    visual_global = [
        [0, 0, 0], [0.0, 0.0, 0.0],
        [0.18664165536919183, 0.0, -0.1036353915413315],
        [0.45068738580529732, -0.0004776008413494182, -0.099896609254045643],
        [0.71659621910361793, -0.00094121994138562253, 0.31771146544934936]
    ]
    visual_relative = [[0, 0, 0],
                       [0.18664165536919183, 0, -0.10363539154133775],
                       [0.26404616237336254, 0, 0.0037387822872720043],
                       [0.26590923746410905, 0, 0.41760807470338812]]

    try:
        s = Slam(odom_global, odom_relative, visual_global, visual_relative)
        estimated_global = s.estimate_global()
        print(estimated_global)
    except Exception as e:
        print("opencv test fail - fail")
Esempio n. 2
0
    def __init__(self, datadir, ppi, writeData=False):
        # Initialise data parameters
        self.ppi = ppi
        self.ppi.set_velocity(0, 0)
        self.img = np.zeros([240, 320, 3], dtype=np.uint8)
        self.aruco_img = np.zeros([240, 320, 3], dtype=np.uint8)
        self.previous_time = -1

        # Set up subsystems
        camera_matrix, dist_coeffs, scale, baseline = self.getCalibParams(
            datadir)

        # Control subsystem
        self.keyboard = Keyboard.Keyboard(self.ppi)
        # SLAM subsystem
        self.pibot = Robot.Robot(baseline, scale, camera_matrix, dist_coeffs)
        self.aruco_det = aruco.aruco_detector(self.pibot, marker_length=0.07)
        self.slam = Slam.Slam(self.pibot)

        # Optionally record input data to a dataset
        if writeData:
            self.data = dh.DatasetWriter('test')
        else:
            self.data = None

        self.output = dh.OutputWriter('system_output')
Esempio n. 3
0
    def __init__(self, datadir, ppi):
        # Initialise
        self.ppi = ppi
        self.ppi.set_velocity(0, 0)
        self.img = np.zeros([240, 320, 3], dtype=np.uint8)
        self.aruco_img = np.zeros([240, 320, 3], dtype=np.uint8)

        # Keyboard teleoperation components

        # Get camera / wheel calibration info for SLAM
        camera_matrix, dist_coeffs, scale, baseline = self.getCalibParams(
            datadir)

        # SLAM components
        self.pibot = Robot.Robot(baseline, scale, camera_matrix, dist_coeffs)
        self.aruco_det = aruco.aruco_detector(self.pibot, marker_length=0.1)
        self.slam = Slam.Slam(self.pibot)
Esempio n. 4
0
    def setUp(self):
        self.world = sim_framework.World()

        # Walls for RANSAC.
        self.world.add_obs(sim_framework.Wall(-15, '-x'))
        self.world.add_obs(sim_framework.Wall(15, '+x'))
        self.world.add_obs(sim_framework.Wall(-15, '-y'))
        self.world.add_obs(sim_framework.Wall(15, '+y'))

        # Small box for Spikes.
        self.world.add_obs(sim_framework.Box(3, 3.2, -3.2, -3))

        self.bot = sim_framework.CircleBot(1, 0, 0, 0)
        self.world.add_ent(self.bot)

        self.bot_control = sim_framework.SimBotControl(self.world, self.bot)

        self.slam = Slam(self.bot_control)
Esempio n. 5
0
    def __init__(self):
        #robot parameters
        self.omtrek_wiel = 0.08 * np.pi
        self.afstand_wielen = 0.26
        self.correctiefactor = 0.95
        self.scan_hoek = 365 / 4 / 60
        self.Robot_parts = [
            'linkermotor', 'rechtermotor', 'Laser_Sensor', 'Laser_Sensor0',
            'Laser_Sensor1', 'Laser_Sensor2', 'Lidar_Motor', 'Dummy'
        ]
        self.motors = ['linkermotor', 'rechtermotor']
        self.sensors = [
            'Laser_Sensor', 'Laser_Sensor0', 'Laser_Sensor1', 'Laser_Sensor2'
        ]
        self.rejection_distance = 0.2

        #initialize slam
        self.ObjSlam = Slam()

        #startup parameters
        self.modus = 0  #0 is ronde scannen
        #1 is stukje rijden
        self.ticks = 0
        self.locatie = [0, 0, 0]
        self.distance = {
            'Laser_Sensor': [],
            'Laser_Sensor0': [],
            'Laser_Sensor1': [],
            'Laser_Sensor2': []
        }
        self.map_landmark = []
        self.geschiedenis_locatie_x = []
        self.geschiedenis_locatie_y = []
        self.slam_position = [0, 0, 0]
        self.real_position = [0, 0]

        self.clientID = make_connection()
        self.handle = get_handle(self.clientID, self.Robot_parts)
        self.old_motor_position = position_motor(self.handle, self.motors,
                                                 self.clientID)
        vrep.simxSynchronousTrigger(self.clientID)
        plt.ion()
        plt.show()
Esempio n. 6
0
tracker_type = FeatureTrackerTypes.DES_BF  # descriptor-based, brute force matching with knn
#tracker_type = FeatureTrackerTypes.DES_FLANN  # descriptor-based, FLANN-based matching

# select your tracker configuration (see the file feature_tracker_configs.py)
tracker_config = FeatureTrackerConfigs.TEST
tracker_config['num_features'] = num_features
tracker_config['match_ratio_test'] = 0.8  # 0.7 is the default
tracker_config['tracker_type'] = tracker_type

feature_tracker = feature_tracker_factory(**tracker_config)

#============================================
# create SLAM object
#============================================
slam = Slam(cam, feature_tracker)  #, grountruth)

timer = Timer()

#============================================

# N.B.: keep this coherent with the above forced camera settings
img_ref = cv2.imread('../data/kitti06-12.png', cv2.IMREAD_COLOR)
#img_cur = cv2.imread('../data/kitti06-12-01.png',cv2.IMREAD_COLOR)
img_cur = cv2.imread('../data/kitti06-13.png', cv2.IMREAD_COLOR)

slam.track(img_ref, frame_id=0)
slam.track(img_cur, frame_id=1)

f_ref = slam.map.get_frame(-2)
f_cur = slam.map.get_frame(-1)
Esempio n. 7
0
    num_features = 2000

    tracker_type = FeatureTrackerTypes.DES_BF  # descriptor-based, brute force matching with knn
    #tracker_type = FeatureTrackerTypes.DES_FLANN  # descriptor-based, FLANN-based matching

    # select your tracker configuration (see the file feature_tracker_configs.py)
    # FeatureTrackerConfigs: SHI_TOMASI_ORB, FAST_ORB, ORB, ORB2, ORB2_FREAK, ORB2_BEBLID, BRISK, AKAZE, FAST_FREAK, SIFT, ROOT_SIFT, SURF, SUPERPOINT, FAST_TFEAT, CONTEXTDESC
    tracker_config = FeatureTrackerConfigs.TEST
    tracker_config['num_features'] = num_features
    tracker_config['tracker_type'] = tracker_type

    print('tracker_config: ', tracker_config)
    feature_tracker = feature_tracker_factory(**tracker_config)

    # create SLAM object
    slam = Slam(cam, feature_tracker, groundtruth)
    time.sleep(1)  # to show initial messages

    viewer3D = Viewer3D()

    if platform.system() == 'Linux':
        display2d = Display2D(cam.width, cam.height)  # pygame interface
    else:
        display2d = None  # enable this if you want to use opencv window

    matched_points_plt = Mplot2d(xlabel='img id',
                                 ylabel='# matches',
                                 title='# matches')

    do_step = False
    is_paused = False
Esempio n. 8
0
# import setup_path
import airsim
from slam import Slam
import assistant
from clients import Clients
import numpy as np


tmp_dir = "./img"
assistant.create_directory(tmp_dir)

clients = Clients()
slam = Slam()

airsim.wait_key('Press any key to takeoff')

slam.calibrate_camera(clients.client_2)
clients.takeoff()

assistant.print_state(clients.client_1)

clients.move_and_take_photos()
slam.append(np.array(clients.queue.queue))
#print(slam.images)

#print(slam.images.shape)
slam.estimate_trajectory_dataset()
slam.plot_trajectory()


clients.end()