コード例 #1
0
ファイル: ekf_slam.py プロジェクト: wecacuee/articulated-slam
 def __init__(self, K, max_depth=4):
     # (z_view) real_k assumes Z in viewing direction, Y-axis downwards
     # (x_viwe) Simulated data format assumes X in viewing direction, Z -axis upwards
     R_x_view_2_z_view = rodrigues([0, 1, 0], -np.pi / 2).dot(
         rodrigues([1, 0, 0], np.pi / 2))
     K_x_view = K.dot(R_x_view_2_z_view)
     self.camera_K_x_view = K_x_view
     self._first_gt_pose = None
     self._all_ldmks = np.zeros((2, 0))
     self._max_depth = max_depth
コード例 #2
0
def robot_to_world(robot_state,gen_obv,SIM):
    # Robot state consists of (x,y,\theta) where \theta is heading angle
    x = robot_state[0]; y= robot_state[1]; theta = robot_state[2]

    # v2.0 Using inverse rotation matrix to retrieve landmark world frame co    ordinates
    #R = np.array([[np.cos(theta), -np.sin(theta),0],
    #            [np.sin(theta), np.cos(theta),0],
    #            [0,0,1]])
    if not SIM:
        R = rodrigues([0,0,1],theta)
    else:
        R = rodrigues([0,0,1],theta).T

    # v2.0 
    # State is (x,y,0) since we assume robot is on the ground
    return R.dot(gen_obv)+ np.array([x,y,0.0])
コード例 #3
0
ファイル: ekf_slam.py プロジェクト: wecacuee/articulated-slam
 def prepare_ldmks_all_world(self, theta, ldmk_robot_obs, xy, track_ids):
     # prepare ldmks
     R_c2w = rodrigues([0, 0, 1], theta)
     ldmks_world = R_c2w.dot(ldmk_robot_obs) \
             + np.asarray([xy[0], xy[1], 0]).reshape(-1, 1)
     max_track_id = np.maximum(np.max(track_ids), self._all_ldmks.shape[1])
     all_ldmks = np.zeros((ldmks_world.shape[0], max_track_id + 1))
     if (self._all_ldmks.shape[1]):
         all_ldmks[:, :self._all_ldmks.shape[1]] = self._all_ldmks
     all_ldmks[:, track_ids] = ldmks_world
     self._all_ldmks = all_ldmks
     return self._all_ldmks
コード例 #4
0
def articulated_slam(debug_inp=True):
    # Writing to file variables
    f_gt = open('gt.txt', 'w')
    f_slam = open('slam.txt', 'w')
    img_shape = (240, 320)
    f = 300
    camera_K_z_view = np.array([[f, 0, img_shape[1] / 2.],
                                [0, f, img_shape[0] / 2.], [0, 0, 1]])

    # Motion probability threshold
    m_thresh = 0.40  # Choose the articulation model with greater than this threhsold's probability

    ldmk_estimater = dict()
    # id -> mm.Estimate_Mm()
    ldmk_am = dict()
    # id->am Id here maps to the chosen Articulated model for the landmark
    ekf_map_id = dict()
    # Formulating consistent EKF mean and covariance vectors

    rev_color, pris_color, stat_color = [
        np.array(l) for l in ([255, 0, 0], [0, 255, 0], [0, 0, 255])
    ]

    # Handle simulated and real data , setup required variables
    motion_model = 'nonholonomic'
    art = ARtag()
    rob_obs_iter = art.processData()
    chk_F = False

    lmvis = landmarkmap.LandmarksVisualizer([0, 0], [7, 7],
                                            frame_period=10,
                                            imgshape=(700, 700))

    # EKF parameters for filtering
    first_traj_pt = dict()
    # Initially we only have the robot state
    (init_timestamp, _, rob_state_and_input, _, _) = rob_obs_iter.next()
    rob_state_and_input = rob_state_and_input[0]

    model = dict()
    slam_state = np.array(
        rob_state_and_input[:3])  # \mu_{t} state at current time step

    # Covariance following discussion on page 317
    # Assuming that position of the robot is exactly known
    slam_cov = np.diag(np.ones(
        slam_state.shape[0]))  # covariance at current time step
    ld_ids = [
    ]  # Landmark ids which will be used for EKF motion propagation step
    index_set = [
        slam_state.shape[0]
    ]  # To update only the appropriate indices of state and covariance
    # Observation noise
    Q_obs = np.array([[5.0, 0, 0], [0, 5.0, 0], [0, 0, 5.0]])
    #For plotting
    obs_num = 0
    # For error estimation in robot localization
    true_robot_states = []
    slam_robot_states = []
    ldmktracks = dict()

    # Plotting variables for simulated data
    if PLOTSIM:
        prob_plot1 = []
        prob_plot2 = []
        prob_plot3 = []
        traj_ldmk1 = []
        obs_ldmk1 = []

    # Processing all the observations
    for fidx, (timestamp, ids, rob_state_and_input, ldmks,
               ldmk_robot_obs) in enumerate(rob_obs_iter):

        # Need more data to skip and find model
        if fidx % 1 != 0:
            continue
        if len(ids) != 0:
            rob_state_and_input = rob_state_and_input[0]
        if len(ldmk_robot_obs) != 0:
            ldmk_robot_obs = ldmk_robot_obs[0]
        #if fidx < 200:
        #    continue
        dt = timestamp[0] - init_timestamp[0]
        init_timestamp = timestamp

        rob_state = rob_state_and_input[:3]
        robot_input = rob_state_and_input[3:]
        print '+++++++++++++ fidx = %d +++++++++++' % fidx
        print 'Robot true state and inputs:', rob_state, robot_input
        print 'Observations size:', ldmk_robot_obs.shape

        # Following EKF steps now

        # First step is propagate : both robot state and motion parameter of any active landmark
        slam_state[0:3], slam_cov[0:3, 0:3] = robot_motion_prop(
            slam_state[0:3],
            slam_cov[0:3, 0:3],
            robot_input,
            dt,
            motion_model=motion_model)
        # Active here means the landmark for which an articulation model has been associated
        if len(ld_ids) > 0:
            '''
            Step 2: When any landmark's motion model is estimated, we start doing EKF SLAM with 
            state as robot's pose and motion parameter of each of the landmarks currently estimated
            Here we propagate motion parameters of each model
            '''
            for (ld_id, start_ind, end_ind) in zip(ld_ids, index_set[:-1],
                                                   index_set[1:]):
                # Landmark with ld_id to propagate itself from the last state
                slam_state[start_ind:end_ind] = ldmk_am[ld_id].predict_motion_pars(\
                        slam_state[start_ind:end_ind])
                # Propagateontact directly to setup meeting

                slam_cov[
                    start_ind:end_ind,
                    start_ind:end_ind] = ldmk_am[ld_id].prop_motion_par_cov(
                        slam_cov[start_ind:end_ind, start_ind:end_ind])
            # end of loop over ekf propagation

        # end of if

        colors = []
        mm_probs = []
        # Collecting all the predictions made by the landmark
        ld_preds = []
        ld_ids_preds = []
        ids_list = []
        lk_pred = None

        # Processing all the observations
        # v2.0
        if len(ids[0]) != 0:

            for id, ldmk_rob_obv in zip(ids, np.dstack(ldmk_robot_obs)[0]):
                id = id[0]
                if id not in first_traj_pt:

                    #first_traj_pt[id]=ldmk_rob_obv
                    Rc2w = rodrigues([0, 0, 1], slam_state[2])
                    first_traj_pt[id] = Rc2w.T.dot(ldmk_rob_obv) + np.asarray(
                        [slam_state[0], slam_state[1], 0.0])

                motion_class = ldmk_estimater.setdefault(id, mm.Estimate_Mm())
                # For storing the chosen articulated model
                chosen_am = ldmk_am.setdefault(id, None)
                '''
                Step 1: Process Observations to first determine the motion model of each landmark
                During this step we will use robot's internal odometry to get the best position of 
                external landmark as we can and try to get enough data to estimate landmark's motion
                model. 
                '''
                if PLOTSIM == 1 and id == 1:
                    obs_ldmk1.append(ldmk_rob_obv.copy())

                # For each landmark id, we want to check if the motion model has been estimated
                if ldmk_am[id] is None:

                    motion_class.process_inp_data([0, 0], slam_state[:3],
                                                  ldmk_rob_obv,
                                                  first_traj_pt[id])
                    # Still need to estimate the motion class
                    # Check if the model is estimated
                    print motion_class.prior
                    if sum(motion_class.prior > m_thresh) > 0:
                        model[id] = np.where(motion_class.prior > m_thresh)[0]
                        print("INFO: Model estimated %s " %
                              models_names[int(model[id])])
                        print id
                        ldmk_am[id] = motion_class.am[int(model[id])]
                        ld_ids.append(id)
                        curr_ld_state = ldmk_am[id].current_state()
                        curr_ld_cov = ldmk_am[id].current_cov()
                        index_set.append(index_set[-1] +
                                         curr_ld_state.shape[0])
                        # Extend Robot state by adding the motion parameter of the landmark
                        slam_state = np.append(slam_state, curr_ld_state)
                        # Extend Robot covariance by adding the uncertainity corresponding to the
                        # robot state
                        slam_cov = scipy.linalg.block_diag(
                            slam_cov, curr_ld_cov)
                else:
                    # This means this landmark is an actual observation that must be used for filtering
                    # the robot state as well as the motion parameter associated with the observed landmark
                    # Getting the predicted observation from the landmark articulated motion
                    # Getting the motion parameters associated with this landmark
                    curr_ind = ld_ids.index(id)
                    # Following steps from Table 10.2 from book Probabilistic Robotics
                    lk_pred = ldmk_am[id].predict_model(
                        slam_state[index_set[curr_ind]:index_set[curr_ind +
                                                                 1]])
                    print "Obs: ", ldmk_rob_obv
                    print "Pred: ", lk_pred
                    if PLOTSIM and id == 1:
                        traj_ldmk1.append(lk_pred.copy())

                    ld_preds.append(lk_pred)
                    ld_ids_preds.append(curr_ind)

                    # v2.0
                    R_temp = rodrigues([0, 0, 1], slam_state[2]).T
                    pos_list = np.ndarray.tolist(slam_state[0:2])
                    pos_list.append(0.0)
                    # To match R_w2c matrix
                    z_pred = R_temp.dot(lk_pred - np.array(pos_list))

                    H_mat = np.zeros((3, index_set[-1]))
                    curr_obs = ldmk_rob_obv
                    theta = slam_state[2]
                    H_mat[0, 0:3] = np.array([
                        -np.cos(theta), -np.sin(theta),
                        -np.sin(theta) * curr_obs[0] +
                        np.cos(theta) * curr_obs[1]
                    ])
                    H_mat[1, 0:3] = np.array([
                        np.sin(theta), -np.cos(theta),
                        (-np.cos(theta) * curr_obs[0]) -
                        (np.sin(theta) * curr_obs[1])
                    ])
                    H_mat[2, 0:3] = np.array([0, 0, 0])

                    H_mat[:, index_set[curr_ind]:index_set[
                        curr_ind + 1]] = np.dot(
                            R_temp, ldmk_am[id].observation_jac(
                                slam_state[
                                    index_set[curr_ind]:index_set[curr_ind +
                                                                  1]],
                                first_traj_pt[id]))

                    # Innovation covariance
                    inno_cov = np.dot(H_mat, np.dot(slam_cov, H_mat.T)) + Q_obs
                    # Kalman Gain
                    K_mat = np.dot(np.dot(slam_cov, H_mat.T),
                                   np.linalg.inv(inno_cov))

                    # Updating SLAM state
                    # v2.0
                    slam_state = slam_state + np.hstack(
                        (np.dot(K_mat, (np.vstack((ldmk_rob_obv - z_pred))))))
                    #print slam_state[0:3],id
                    # Updating SLAM covariance
                    slam_cov = np.dot(
                        np.identity(slam_cov.shape[0]) - np.dot(K_mat, H_mat),
                        slam_cov)
                # end of if else ldmk_am[id]

                if PLOTSIM:
                    # Assuming single landmark exists
                    if id == 1:
                        prob_plot1.append(motion_class.prior.copy())

                mm_probs.append(motion_class.prior.copy())

                motion_class = ldmk_estimater[id]
                p1, p2, p3 = motion_class.prior[:3]
                color = np.int64(
                    (p1 * rev_color + p2 * pris_color + p3 * stat_color))
                color = color - np.min(color)
                colors.append(color)
                ids_list.append(id)

        # end of loop over observations in single frame

        #if PLOTSIM:
        #    if 1 in ldmk_am.keys() and ldmk_am[1] is not None and not chk_F:
        #        import pdb; pdb.set_trace()
        #        motion_class1 = ldmk_estimater.setdefault(1, mm.Estimate_Mm())
        #        #lk_pred = ldmk_am[1].predict_model(slam_state[index_set[ld_ids.index(1)]:index_set[ld_ids.index(1)]+1])
        #        lk_pred = ldmk_am[1].predict_model(motion_class1.means[int(model[1])])
        #        traj_ldmk1.append(lk_pred.copy())
        #chk_F = False

        # Follow all the steps on
        #print "SLAM State for robot and landmarks is",slam_state
        #obs_num = obs_num+1
        #print 'motion_class.priors', mm_probs
        #print 'ids:', ids_list
        #print 'colors:', colors
        ##up.slam_cov_plot(slam_state,slam_cov,obs_num,rob_state,ld_preds,ld_ids_preds)
        #visualize_ldmks_robot_cov(lmvis, ldmks, robview, slam_state[:2],
        #                          slam_cov[:2, :2], colors,obs_num)

        #for i, id in enumerate(ids):
        #    ldmktracks.setdefault(id, []).append(
        #        TrackedLdmk(timestamp, ldmk_robot_obs[:, i:i+1]))

        ##Handle new robot view code appropriately
        #robview.set_robot_pos_theta((rob_state[0], rob_state[1], 0),
        #                            rob_state[2])
        #assert ldmk_robot_obs.shape[1] == len(colors), '%d <=> %d' % (
        #    ldmk_robot_obs.shape[1], len(colors))

        ##Img = lmvis.genframe(ldmks, ldmk_robot_obs=ldmk_robot_obs, robview = robview,colors=colors,SIMULATEDDATA=SIMULATEDDATA)
        ##Imgr = lmvis.drawrobot(robview, img)
        ##Imgrv = robview.drawtracks([ldmktracks[id] for id in ids],
        ##                           imgidx=robview.imgidx_by_timestamp(timestamp),
        ##                           colors=colors)

        ##if not SIMULATEDDATA:

        ##    for id in ld_ids:
        ##        if model[id] == 0:
        ##            config_pars = ldmk_am[id].config_pars
        ##            vec1 = config_pars['vec1']
        ##            vec2 = config_pars['vec2']
        ##            center = config_pars['center']
        ##            center3D = center[0]*vec1 + center[1]*vec2 + ldmk_am[id].plane_point
        ##            axis_vec = np.cross(vec1, vec2)
        ##            radius = config_pars['radius']
        ##            imgrv = robview.drawrevaxis(imgrv, center3D, axis_vec, radius, rev_color)
        ##            imgr = lmvis.drawrevaxis(imgr, center3D, axis_vec, radius,
        ##                                     rev_color)

        ##robview.visualize(imgrv)
        ##lmvis.imshow_and_wait(imgr)
        #

        quat_true = Rtoquat(rodrigues([0, 0, 1], rob_state[2]))
        quat_slam = Rtoquat(rodrigues([0, 0, 1], slam_state[2]))
        if fidx < 1000:
            f_gt.write(
                str(fidx + 1) + " " + str(rob_state[0]) + " " +
                str(rob_state[1]) + " " + str(0) + " " + str(quat_true[0]) +
                " " + str(quat_true[1]) + " " + str(quat_true[2]) + " " +
                str(quat_true[3]) + " " + "\n")
            f_slam.write(
                str(fidx + 1) + " " + str(slam_state[0]) + " " +
                str(slam_state[1]) + " " + str(0) + " " + str(quat_slam[0]) +
                " " + str(quat_slam[1]) + " " + str(quat_slam[2]) + " " +
                str(quat_slam[3]) + " " + "\n")
        true_robot_states.append(rob_state)
        slam_robot_states.append(slam_state[0:3].tolist())

        #obs_num = obs_num + 1
        print 'SLAM state:', slam_state[0:3]
    # end of loop over frames

    if PLOTSIM:
        # Plot experimental results
        plot_sim_res(PLOTSIM, prob_plot1, traj_ldmk1, obs_ldmk1,
                     true_robot_states, slam_robot_states, ldmk_am[1], model)

    # Debugging
    if debug_inp is True:
        # Going over all the landmarks
        for ldmk_id, ldmk in ldmk_estimater.iteritems():
            if ldmk.num_data > ldmk.min_samples:
                # This landmark has atleast got enought observations for estimating motion parameters
                if ldmk_am[ldmk_id] is None:
                    print "Could not estimate model for landmark ", ldmk_id,\
                            "with ", ldmk.num_data, " samples"
    f_gt.close()
    f_slam.close()
    return (true_robot_states, slam_robot_states)
コード例 #5
0
ファイル: ekf_slam.py プロジェクト: wecacuee/articulated-slam
def slam(debug_inp=True):
    # Writing to file
    f_gt = open('gt_orig.txt', 'w')
    f_slam = open('slam_orig.txt', 'w')
    f = 300
    img_shape = (240, 320)
    camera_K_z_view = np.array([[f, 0, img_shape[1] / 2],
                                [0, f, img_shape[0] / 2], [0, 0, 1]])
    # Getting the map
    #nframes, lmmap, lmvis, robtraj, maxangle, maxdist = threeptmap3d()
    nframes, lmmap, robtraj, maxangle, maxdist = threeptmap3d()

    ldmk_am = dict()
    # To keep a tab on the landmarks that have been previously seen

    rev_color, pris_color, stat_color = [
        np.array(l) for l in ([255, 0, 0], [0, 255, 0], [0, 0, 255])
    ]
    # to get the landmarks with ids that are being seen by robot
    if SIMULATEDDATA:
        rob_obs_iter = landmarkmap.get_robot_observations(lmmap,
                                                          robtraj,
                                                          maxangle,
                                                          maxdist,
                                                          img_shape,
                                                          camera_K_z_view,
                                                          lmvis=None)
        motion_model = 'nonholonomic'
        csv = np.genfromtxt('expt_noise.csv', delimiter=',')
        count = 0
        robview = landmarkmap.RobotView(img_shape, camera_K_z_view, maxdist)
    else:
        timeseries_data_file = "../mid/articulatedslam/2016-01-22/rev_2016-01-22-13-56-28/extracttrajectories_GFTT_SIFT_odom_gt_timeseries.txt"
        bag_file = "../mid/articulatedslam/2016-01-22/rev2_2016-01-22-14-32-13.bag"
        timeseries_dir = os.path.dirname(timeseries_data_file)
        image_file_fmt = os.path.join(timeseries_dir, "img/frame%04d.png")
        timestamps_file = os.path.join(timeseries_dir, 'img/timestamps.txt')

        robview = landmarkmap.RobotView(img_shape,
                                        camera_K_z_view,
                                        maxdist,
                                        image_file_format=image_file_fmt,
                                        timestamps_file=timestamps_file)
        use_bag = False
        if use_bag:
            data_iter = feature_odom_gt_pose_iter_from_bag(
                bag_file, "GFTT", "SIFT")
        else:
            data_iter = get_timeseries_data_iter(timeseries_data_file)

        VISDEBUG = 0
        if VISDEBUG:
            plotables = dict()
            for i, (ts, (features, odom, gt_pose)) in enumerate(data_iter):
                if i % 100 != 0:
                    continue
                (pos, quat), (linvel, angvel) = odom
                xyz, abg = gt_pose
                plotables.setdefault('linvel[0]', []).append(linvel[0])
                plotables.setdefault('linvel[1]', []).append(linvel[1])
                plotables.setdefault('linvel[2]', []).append(linvel[2])
                plotables.setdefault('angvel[0]', []).append(angvel[0])
                plotables.setdefault('angvel[1]', []).append(angvel[1])
                plotables.setdefault('angvel[2]', []).append(angvel[2])
                plotables.setdefault('xyz[0]', []).append(xyz[0])
                plotables.setdefault('xyz[1]', []).append(xyz[1])
                plotables.setdefault('xyz[2]', []).append(xyz[2])
                plotables.setdefault('abg[0]', []).append(abg[0])
                plotables.setdefault('abg[1]', []).append(abg[1])
                plotables.setdefault('abg[2]', []).append(abg[2])

            for (k, v), m in zip(plotables.items(), ',.1234_x|^vo'):
                plt.plot(v, label=k, marker=m)
            plt.legend(loc='best')
            plt.show()
            sys.exit(0)

        formatter = RealDataToSimulatedFormat(camera_K_z_view)
        rob_obs_iter_all = imap(formatter.real_data_to_required_format,
                                data_iter)
        filter_by_length = FilterTrackByLength()
        filter_by_length.count(islice(rob_obs_iter_all, 500))

        if use_bag:
            data_iter = feature_odom_gt_pose_iter_from_bag(
                bag_file, "GFTT", "SIFT")
        else:
            data_iter = get_timeseries_data_iter(timeseries_data_file)

        formatter = RealDataToSimulatedFormat(camera_K_z_view)
        rob_obs_iter_all = imap(formatter.real_data_to_required_format,
                                data_iter)
        rob_obs_iter = filter_by_length.filter(islice(rob_obs_iter_all, 500))

        motion_model = 'holonomic'

    lmvis = landmarkmap.LandmarksVisualizer([0, 0], [7, 7],
                                            frame_period=10,
                                            imgshape=(700, 700))

    #rob_obs_iter = list(rob_obs_iter)
    first_traj_pt = dict()
    #frame_period = lmvis.frame_period
    # EKF parameters for filtering

    # Initially we only have the robot state
    (init_timestamp, _, rob_state_and_input, LDD, _) = rob_obs_iter.next()
    model = dict()
    slam_state = np.array(
        rob_state_and_input[:3])  # \mu_{t} state at current time step
    # Covariance following discussion on page 317
    # Assuming that position of the robot is exactly known
    slam_cov = np.diag(np.ones(
        slam_state.shape[0]))  # covariance at current time step
    ld_ids = [
    ]  # Landmark ids which will be used for EKF motion propagation step
    index_set = [
        slam_state.shape[0]
    ]  # To update only the appropriate indices of state and covariance
    # Observation noise
    Q_obs = np.array([[5.0, 0, 0], [0, 5.0, 0], [0, 0, 5.0]])
    # For plotting
    obs_num = 0
    # Initial covariance for landmarks (x,y) position
    initial_cov = np.array([[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])
    # For error estimation in robot localization
    true_robot_states = []
    slam_robot_states = []
    ldmktracks = dict()

    if PLOTSIM:
        prob_plot1 = []
        prob_plot2 = []
        prob_plot3 = []
        traj_ldmk1 = []
        traj_ldmk2 = []
        traj_ldmk3 = []
        # Processing all the observations
        # We need to skip the first observation because it was used to initialize SLAM State
    for fidx, (timestamp, ids, rob_state_and_input, ldmks,
               ldmk_robot_obs) in enumerate(rob_obs_iter):
        if not SIMULATEDDATA:
            if fidx < 0:
                continue
            dt = timestamp - init_timestamp
            init_timestamp = timestamp
        rob_state = rob_state_and_input[:3]
        robot_input = rob_state_and_input[3:]
        print '+++++++++++++ fidx = %d +++++++++++' % fidx
        print 'Robot true state:', rob_state
        print 'Observations:', ldmk_robot_obs.shape

        #posdir = map(np.array, ([rob_state[0], rob_state[1]],
        #                        [np.cos(rob_state[2]), np.sin(rob_state[2])]))
        #robview = landmarkmap.RobotView(posdir[0], posdir[1], maxangle, maxdist)

        # Following EKF steps now

        # First step is propagate : both robot state and motion parameter of any active landmark
        if not SIMULATEDDATA:
            slam_state[0:3], slam_cov[0:3, 0:3] = robot_motion_prop(
                slam_state[0:3],
                slam_cov[0:3, 0:3],
                robot_input,
                dt * 1e-9,
                motion_model=motion_model)
        else:
            slam_state[0:3], slam_cov[0:3, 0:3] = robot_motion_prop(
                slam_state[0:3], slam_cov[0:3, 0:3], robot_input)

        colors = []
        mm_probs = []
        # Collecting all the predictions made by the landmark
        ids_list = []
        # Processing all the observations
        for id, ldmk_rob_obv in zip(ids, np.dstack(ldmk_robot_obs)[0]):
            # Observation corresponding to current landmark is
            #obs = np.array([r, theta])
            if SIMULATEDDATA:
                # Adding noise to observations
                ldmk_rob_obv = ldmk_rob_obv + csv[count, :]
                count = count + 1

            #if id not in first_traj_pt:
            #    first_traj_pt[id] = robot_to_world(slam_state[0:3],ldmk_rob_obv)
            '''
            Step 1: Process Observations to first determine the motion model of each landmark
            During this step we will use robot's internal odometry to get the best position of 
            external landmark as we can and try to get enough data to estimate landmark's motion
            model. 
            '''
            # Setting default none value for the current landmark observation
            ldmk_am.setdefault(id, None)
            # For each landmark id, we want to check if the landmark has been previously seen
            if ldmk_am[id] is None:
                # Assign a static landmark id
                ldmk_am[id] = 2
                ld_ids.append(id)
                # Getting the current state to be added to the SLAM state (x,y) position of landmark
                curr_ld_state = robot_to_world(slam_state, ldmk_rob_obv,
                                               SIMULATEDDATA)
                curr_ld_cov = initial_cov
                index_set.append(index_set[-1] + curr_ld_state.shape[0])
                # Extend Robot state by adding the motion parameter of the landmark
                slam_state = np.append(slam_state, curr_ld_state)
                # Extend Robot covariance by adding the uncertainity corresponding to the
                # robot state
                slam_cov = scipy.linalg.block_diag(slam_cov, curr_ld_cov)
            else:
                # This means this landmark is an actual observation that must be used for filtering
                # the robot state as well as the motion parameter associated with the observed landmark

                # Getting the predicted observation from the landmark articulated motion
                # Getting the motion parameters associated with this landmark
                curr_ind = ld_ids.index(id)
                # Following steps from Table 10.2 from book Probabilistic Robotics
                lk_pred = robot_to_world(slam_state[0:3], ldmk_rob_obv,
                                         SIMULATEDDATA)

                if not SIMULATEDDATA:
                    R_temp = rodrigues([0, 0, 1], slam_state[2]).T
                else:
                    R_temp = rodrigues([0, 0, 1], slam_state[2])
                #R_temp = np.array([[np.cos(slam_state[2]), -np.sin(slam_state[2]),0],
                #         [np.sin(slam_state[2]), np.cos(slam_state[2]),0],
                #         [0,0,1]])

                z_pred = R_temp.dot(
                    slam_state[index_set[curr_ind]:index_set[curr_ind + 1]] -
                    np.append(slam_state[0:2], [0]))

                #diff_vec = lk_pred-slam_state[0:2]
                #q_val = np.dot(diff_vec,diff_vec)
                #z_pred = np.array([np.sqrt(q_val),np.arctan2(diff_vec[1],diff_vec[0])-slam_state[2]])
                # Getting the jacobian matrix
                H_mat = np.zeros((3, index_set[-1]))
                # w.r.t robot state
                curr_obs = ldmk_rob_obv
                theta = slam_state[2]
                H_mat[0, 0:3] = np.array([
                    -np.cos(theta), -np.sin(theta),
                    -np.sin(theta) * curr_obs[0] + np.cos(theta) * curr_obs[1]
                ])
                H_mat[1, 0:3] = np.array([
                    np.sin(theta), -np.cos(theta),
                    (-np.cos(theta) * curr_obs[0]) -
                    (np.sin(theta) * curr_obs[1])
                ])
                H_mat[2, 0:3] = np.array([0, 0, 0])
                H_mat[:, index_set[curr_ind]:index_set[curr_ind + 1]] = np.dot(
                    R_temp, np.array([[1], [1], [1]]))

                # w.r.t landmark associated states
                # Differentiation w.r.t landmark x and y first
                #H_mat[:,index_set[curr_ind]:index_set[curr_ind+1]] = (1.0/q_val)*np.array(\
                #        [[np.sqrt(q_val)*diff_vec[0],np.sqrt(q_val)*diff_vec[1]],\
                #        [-diff_vec[1],diff_vec[0]]])

                # Innovation covariance
                inno_cov = np.dot(H_mat, np.dot(slam_cov, H_mat.T)) + Q_obs
                # Kalman Gain
                K_mat = np.dot(np.dot(slam_cov, H_mat.T),
                               np.linalg.inv(inno_cov))
                # Updating SLAM state
                slam_state = slam_state + np.hstack(
                    (np.dot(K_mat, np.vstack((ldmk_rob_obv - z_pred)))))
                # Updating SLAM covariance
                slam_cov = np.dot(
                    np.identity(slam_cov.shape[0]) - np.dot(K_mat, H_mat),
                    slam_cov)
            # end of if else ldmk_am[id]

            #p1, p2, p3 = (0,0,1) # We are assuming everything to be static
            #color = np.int64((p1*rev_color
            #         + p2*pris_color
            #         + p3*stat_color))
            #color = color - np.min(color)
            #colors.append(color)
            ids_list.append(id)

        # end of loop over observations in single frame
        # Follow all the steps on
        for i, id in enumerate(ids):
            ldmktracks.setdefault(id, []).append(
                TrackedLdmk(timestamp, ldmk_robot_obs[:, i:i + 1]))

        robview.set_robot_pos_theta((rob_state[0], rob_state[1], 0),
                                    rob_state[2])

        #img = lmvis.genframe(ldmks, ldmk_robot_obs=ldmk_robot_obs, robview = robview,colors=colors,SIMULATEDDATA=SIMULATEDDATA)
        #imgr = lmvis.drawrobot(robview, img)
        #imgrv = robview.drawtracks([ldmktracks[id] for id in ids],
        #        imgidx=robview.imgidx_by_timestamp(timestamp),
        #        colors=colors)
        #robview.visualize(imgrv)
        #lmvis.imshow_and_wait(imgr)

        #print "SLAM State for robot and landmarks is",slam_state
        obs_num = obs_num + 1
        #up.slam_cov_plot(slam_state,slam_cov,obs_num,rob_state,ld_preds,ld_ids_preds)
        #visualize_ldmks_robot_cov(lmvis, ldmks, robview, slam_state[:2],
        #                          slam_cov[:2, :2], colors)
        R_temp_true = rodrigues([0, 0, 1], rob_state[2])
        R_temp = rodrigues([0, 0, 1], slam_state[2])
        #R_temp_true = np.array([[np.cos(rob_state[2]), -np.sin(rob_state[2]),0],
        #              [np.sin(rob_state[2]), np.cos(rob_state[2]),0],
        #              [0,0,1]])
        #R_temp = np.array([[np.cos(slam_state[2]), -np.sin(slam_state[2]),0],
        #              [np.sin(slam_state[2]), np.cos(slam_state[2]),0],
        #              [0,0,1]])

        quat_true = Rtoquat(R_temp_true)
        quat_slam = Rtoquat(R_temp)
        if (fidx < 100 and SIMULATEDDATA) or (fidx < 1000
                                              and not SIMULATEDDATA):
            f_gt.write(
                str(fidx + 1) + " " + str(rob_state[0]) + " " +
                str(rob_state[1]) + " " + str(0) + " " + str(quat_true[0]) +
                " " + str(quat_true[1]) + " " + str(quat_true[2]) + " " +
                str(quat_true[3]) + " " + "\n")
            f_slam.write(
                str(fidx + 1) + " " + str(slam_state[0]) + " " +
                str(slam_state[1]) + " " + str(0) + " " + str(quat_slam[0]) +
                " " + str(quat_slam[1]) + " " + str(quat_slam[2]) + " " +
                str(quat_slam[3]) + " " + "\n")

        true_robot_states.append(rob_state)
        slam_robot_states.append(slam_state[0:3].tolist())
    # end of loop over frames

    if PLOTSIM:
        plot_sim_res(PLOTSIM, prob_plot1, prob_plot2, prob_plot3, traj_ldmk1,
                     traj_ldmk2, traj_ldmk3, true_robot_states,
                     slam_robot_states)

    # Generating plots for paper

    #plt.figure('Trajectories')
    #true_robot_states = np.dstack(true_robot_states)[0]
    #slam_robot_states = np.dstack(slam_robot_states)[0]
    #plt.plot(true_robot_states[0],true_robot_states[1],'-k',linestyle='dashed',label='True Robot trajectory',markersize=15.0)
    #plt.plot(slam_robot_states[0],slam_robot_states[1],'^g',label='EKF SLAM trajectory',markersize=15.0)
    #plt.plot(prob_plot1[0],prob_plot1[1],'*g',label='Prismatic',markersize=15.0)
    #plt.plot(prob_plot2[0],prob_plot2[1],'ob',label='Revolute',markersize=15.0)
    #plt.plot(prob_plot3[0],prob_plot3[1],'^r',label='Static',markersize=15.0)
    #plt.yticks([-2,0,2,4,6],fontsize = 24)
    #plt.xticks([-2,0,2,4,6],fontsize = 24)
    #plt.legend(loc=4,fontsize=24)
    #plt.show()

    f_gt.close()
    f_slam.close()
    return (true_robot_states, slam_robot_states)
コード例 #6
0
def articulated_slam(debug_inp=True):
    # Writing to file variables
    f_gt = open('gt.txt', 'w')
    f_slam = open('slam.txt', 'w')
    img_shape = (240, 320)
    f = 300
    camera_K_z_view = np.array([[f, 0, img_shape[1] / 2.],
                                [0, f, img_shape[0] / 2.], [0, 0, 1]])

    # Motion probability threshold
    m_thresh = 0.60  # Choose the articulation model with greater than this threhsold's probability

    # Getting the map
    nframes, lmmap, robtraj, maxangle, maxdist = threeptmap3d()
    #nframes, lmmap, lmvis, robtraj, maxangle, maxdist = threeptmap()

    ldmk_estimater = dict()
    # id -> mm.Estimate_Mm()
    ldmk_am = dict()
    # id->am Id here maps to the chosen Articulated model for the landmark
    ekf_map_id = dict()
    # Formulating consistent EKF mean and covariance vectors

    rev_color, pris_color, stat_color = [
        np.array(l) for l in ([255, 0, 0], [0, 255, 0], [0, 0, 255])
    ]

    # Handle simulated and real data , setup required variables
    if SIMULATEDDATA:
        rob_obs_iter = landmarkmap.get_robot_observations(
            lmmap,
            robtraj,
            maxangle,
            maxdist,
            img_shape,
            camera_K_z_view,
            # Do not pass visualizer to
            # disable visualization
            lmvis=None)
        motion_model = 'nonholonomic'
        robview = landmarkmap.RobotView(img_shape, camera_K_z_view, maxdist)
        csv = np.genfromtxt('expt_noise.csv', delimiter=',')
        count = 0

    else:

        timeseries_data_file = "../mid/articulatedslam/2016-01-22/rev_2016-01-22-13-56-28/extracttrajectories_GFTT_SIFT_odom_gt_timeseries.txt"
        bag_file = "../mid/articulatedslam/2016-01-22/rev2_2016-01-22-14-32-13.bag"
        timeseries_dir = os.path.dirname(timeseries_data_file)
        image_file_fmt = os.path.join(timeseries_dir, "img/frame%04d.png")
        timestamps_file = os.path.join(timeseries_dir, 'img/timestamps.txt')

        robview = landmarkmap.RobotView(img_shape,
                                        camera_K_z_view,
                                        maxdist,
                                        image_file_format=image_file_fmt,
                                        timestamps_file=timestamps_file)
        use_bag = False
        if use_bag:
            data_iter = feature_odom_gt_pose_iter_from_bag(
                bag_file, "GFTT", "SIFT")
        else:
            data_iter = get_timeseries_data_iter(timeseries_data_file)

        VISDEBUG = 0
        if VISDEBUG:
            plotables = dict()
            for i, (ts, (features, odom, gt_pose)) in enumerate(data_iter):
                if i % 100 != 0:
                    continue
                (pos, quat), (linvel, angvel) = odom
                xyz, abg = gt_pose
                plotables.setdefault('linvel[0]', []).append(linvel[0])
                plotables.setdefault('linvel[1]', []).append(linvel[1])
                plotables.setdefault('linvel[2]', []).append(linvel[2])
                plotables.setdefault('angvel[0]', []).append(angvel[0])
                plotables.setdefault('angvel[1]', []).append(angvel[1])
                plotables.setdefault('angvel[2]', []).append(angvel[2])
                plotables.setdefault('xyz[0]', []).append(xyz[0])
                plotables.setdefault('xyz[1]', []).append(xyz[1])
                plotables.setdefault('xyz[2]', []).append(xyz[2])
                plotables.setdefault('abg[0]', []).append(abg[0])
                plotables.setdefault('abg[1]', []).append(abg[1])
                plotables.setdefault('abg[2]', []).append(abg[2])

            for (k, v), m in zip(plotables.items(), ',.1234_x|^vo'):
                plt.plot(v, label=k, marker=m)
            plt.legend(loc='best')
            plt.show()
            sys.exit(0)

        formatter = RealDataToSimulatedFormat(camera_K_z_view)
        rob_obs_iter_all = imap(formatter.real_data_to_required_format,
                                data_iter)
        filter_by_length = FilterTrackByLength()
        filter_by_length.count(islice(rob_obs_iter_all, 500))

        if use_bag:
            data_iter = feature_odom_gt_pose_iter_from_bag(
                bag_file, "GFTT", "SIFT")
        else:
            data_iter = get_timeseries_data_iter(timeseries_data_file)

        formatter = RealDataToSimulatedFormat(camera_K_z_view)
        rob_obs_iter_all = imap(formatter.real_data_to_required_format,
                                data_iter)
        rob_obs_iter = filter_by_length.filter(islice(rob_obs_iter_all, 500))

        motion_model = 'holonomic'

    lmvis = landmarkmap.LandmarksVisualizer([0, 0], [7, 7],
                                            frame_period=10,
                                            imgshape=(700, 700))

    # EKF parameters for filtering
    first_traj_pt = dict()
    # Initially we only have the robot state
    (init_timestamp, _, rob_state_and_input, _, _) = rob_obs_iter.next()
    model = dict()
    slam_state = np.array(
        rob_state_and_input[:3])  # \mu_{t} state at current time step

    # Covariance following discussion on page 317
    # Assuming that position of the robot is exactly known
    slam_cov = np.diag(np.ones(
        slam_state.shape[0]))  # covariance at current time step
    ld_ids = [
    ]  # Landmark ids which will be used for EKF motion propagation step
    index_set = [
        slam_state.shape[0]
    ]  # To update only the appropriate indices of state and covariance
    # Observation noise
    Q_obs = np.array([[5.0, 0, 0], [0, 5.0, 0], [0, 0, 5.0]])
    #For plotting
    obs_num = 0
    # For error estimation in robot localization
    true_robot_states = []
    slam_robot_states = []
    ldmktracks = dict()

    # Plotting variables for simulated data
    if PLOTSIM:
        prob_plot1 = []
        prob_plot2 = []
        prob_plot3 = []
        traj_ldmk1 = []
        traj_ldmk2 = []
        traj_ldmk3 = []

    # Processing all the observations
    for fidx, (timestamp, ids, rob_state_and_input, ldmks,
               ldmk_robot_obs) in enumerate(rob_obs_iter):
        if not SIMULATEDDATA:
            if fidx < 250:
                continue
            dt = timestamp - init_timestamp
            init_timestamp = timestamp
        rob_state = np.asarray(rob_state_and_input[:3])
        robot_input = rob_state_and_input[3:]
        print '+++++++++++++ fidx = %d +++++++++++' % fidx
        print 'Robot true state and inputs:', rob_state, robot_input
        print 'Observations size:', ldmk_robot_obs.shape

        # Following EKF steps now

        # First step is propagate : both robot state and motion parameter of any active landmark
        if not SIMULATEDDATA:
            slam_state[0:3], slam_cov[0:3, 0:3] = robot_motion_prop(
                slam_state[0:3],
                slam_cov[0:3, 0:3],
                robot_input,
                dt * 1e-9,
                motion_model=motion_model)
        else:
            slam_state[0:3], slam_cov[0:3, 0:3] = robot_motion_prop(
                slam_state[0:3], slam_cov[0:3, 0:3], robot_input)

        # Active here means the landmark for which an articulation model has been associated
        if len(ld_ids) > 0:
            '''
            Step 2: When any landmark's motion model is estimated, we start doing EKF SLAM with 
            state as robot's pose and motion parameter of each of the landmarks currently estimated
            Here we propagate motion parameters of each model
            '''
            for (ld_id, start_ind, end_ind) in zip(ld_ids, index_set[:-1],
                                                   index_set[1:]):
                # Landmark with ld_id to propagate itself from the last state
                slam_state[start_ind:end_ind] = ldmk_am[ld_id].predict_motion_pars(\
                        slam_state[start_ind:end_ind])
                # Propagateontact directly to setup meeting

                slam_cov[
                    start_ind:end_ind,
                    start_ind:end_ind] = ldmk_am[ld_id].prop_motion_par_cov(
                        slam_cov[start_ind:end_ind, start_ind:end_ind])
            # end of loop over ekf propagation

        # end of if

        #if fidx == 13:
        #    pdb.set_trace()
        colors = []
        mm_probs = []
        # Collecting all the predictions made by the landmark
        ld_preds = []
        ld_ids_preds = []
        ids_list = []
        lk_pred = None

        # Processing all the observations
        # v2.0
        for id, ldmk_rob_obv in zip(ids, np.dstack(ldmk_robot_obs)[0]):
            # Adding noise
            if SIMULATEDDATA:
                ldmk_rob_obv = ldmk_rob_obv + csv[count, :]
                count = count + 1

            if id not in first_traj_pt.keys():

                #first_traj_pt[id]=ldmk_rob_obv
                if not SIMULATEDDATA:
                    Rc2w = rodrigues([0, 0, 1], slam_state[2])
                else:
                    Rc2w = rodrigues([0, 0, 1], slam_state[2]).T
                first_traj_pt[id] = Rc2w.dot(ldmk_rob_obv) + np.asarray(
                    [slam_state[0], slam_state[1], 0.0])

            motion_class = ldmk_estimater.setdefault(id, mm.Estimate_Mm())
            # For storing the chosen articulated model
            chosen_am = ldmk_am.setdefault(id, None)
            '''
            Step 1: Process Observations to first determine the motion model of each landmark
            During this step we will use robot's internal odometry to get the best position of 
            external landmark as we can and try to get enough data to estimate landmark's motion
            model. 
            '''
            # For each landmark id, we want to check if the motion model has been estimated
            if ldmk_am[id] is None:
                motion_class.process_inp_data([0, 0], slam_state[:3],
                                              ldmk_rob_obv, first_traj_pt[id])
                # Still need to estimate the motion class
                # Check if the model is estimated
                if sum(motion_class.prior > m_thresh) > 0:
                    model[id] = np.where(motion_class.prior > m_thresh)[0]
                    print("INFO: Model estimated %s " %
                          models_names[int(model[id])])
                    print id
                    ldmk_am[id] = motion_class.am[int(model[id])]
                    ld_ids.append(id)
                    curr_ld_state = ldmk_am[id].current_state()
                    curr_ld_cov = ldmk_am[id].current_cov()
                    index_set.append(index_set[-1] + curr_ld_state.shape[0])
                    # Extend Robot state by adding the motion parameter of the landmark
                    slam_state = np.append(slam_state, curr_ld_state)
                    # Extend Robot covariance by adding the uncertainity corresponding to the
                    # robot state
                    slam_cov = scipy.linalg.block_diag(slam_cov, curr_ld_cov)
            else:
                # This means this landmark is an actual observation that must be used for filtering
                # the robot state as well as the motion parameter associated with the observed landmark
                # Getting the predicted observation from the landmark articulated motion
                # Getting the motion parameters associated with this landmark
                curr_ind = ld_ids.index(id)
                # Following steps from Table 10.2 from book Probabilistic Robotics
                lk_pred = ldmk_am[id].predict_model(motion_class.means[int(
                    model[id])])
                ld_preds.append(lk_pred)
                ld_ids_preds.append(curr_ind)

                # v2.0
                if not SIMULATEDDATA:
                    R_temp = rodrigues([0, 0, 1], slam_state[2]).T
                else:
                    R_temp = rodrigues([0, 0, 1], slam_state[2])

                pos_list = np.ndarray.tolist(slam_state[0:2])
                pos_list.append(0.0)
                # To match R_w2c matrix
                z_pred = R_temp.dot(lk_pred -
                                    np.append(slam_state[0:2], [0.0]))

                H_mat = np.zeros((3, index_set[-1]))
                curr_obs = ldmk_rob_obv
                theta = slam_state[2]
                H_mat[0, 0:3] = np.array([
                    -np.cos(theta), -np.sin(theta),
                    -np.sin(theta) * curr_obs[0] + np.cos(theta) * curr_obs[1]
                ])
                H_mat[1, 0:3] = np.array([
                    np.sin(theta), -np.cos(theta),
                    (-np.cos(theta) * curr_obs[0]) -
                    (np.sin(theta) * curr_obs[1])
                ])
                H_mat[2, 0:3] = np.array([0, 0, 0])

                H_mat[:, index_set[curr_ind]:index_set[curr_ind + 1]] = np.dot(
                    R_temp, ldmk_am[id].observation_jac(
                        slam_state[index_set[curr_ind]:index_set[curr_ind +
                                                                 1]],
                        first_traj_pt[id]))

                # Innovation covariance
                inno_cov = np.dot(H_mat, np.dot(slam_cov, H_mat.T)) + Q_obs
                # Kalman Gain
                K_mat = np.dot(np.dot(slam_cov, H_mat.T),
                               np.linalg.inv(inno_cov))

                # Updating SLAM state
                # v2.0
                slam_state = slam_state + np.hstack(
                    (np.dot(K_mat, (np.vstack((ldmk_rob_obv - z_pred))))))
                #print slam_state[0:3],id
                # Updating SLAM covariance
                slam_cov = np.dot(
                    np.identity(slam_cov.shape[0]) - np.dot(K_mat, H_mat),
                    slam_cov)
            # end of if else ldmk_am[id]

            if PLOTSIM:
                if id == 40:
                    prob_plot1.append(motion_class.prior.copy())
                if id == 41:
                    prob_plot2.append(motion_class.prior.copy())
                if id == 13:
                    prob_plot3.append(motion_class.prior.copy())

            mm_probs.append(motion_class.prior.copy())

            motion_class = ldmk_estimater[id]
            p1, p2, p3 = motion_class.prior[:3]
            color = np.int64(
                (p1 * rev_color + p2 * pris_color + p3 * stat_color))
            color = color - np.min(color)
            colors.append(color)
            ids_list.append(id)

        # end of loop over observations in single frame
        if PLOTSIM:
            if 40 in ldmk_am.keys() and ldmk_am[40] is not None:
                lk_pred = ldmk_am[40].predict_model(slam_state[
                    index_set[ld_ids.index(40)]:index_set[ld_ids.index(40)] +
                    1])
                traj_ldmk1.append(lk_pred.copy())
            if 41 in ldmk_am.keys() and ldmk_am[41] is not None:
                lk_pred = ldmk_am[41].predict_model(slam_state[
                    index_set[ld_ids.index(41)]:index_set[ld_ids.index(41)] +
                    1])
                traj_ldmk2.append(lk_pred.copy())
            if 13 in ldmk_am.keys() and ldmk_am[13] is not None:
                lk_pred = ldmk_am[13].predict_model(slam_state[
                    index_set[ld_ids.index(13)]:index_set[ld_ids.index(13)] +
                    1])
                traj_ldmk3.append(lk_pred.copy())

        # Follow all the steps on
        #print "SLAM State for robot and landmarks is",slam_state
        #obs_num = obs_num+1
        #print 'motion_class.priors', mm_probs
        #print 'ids:', ids_list
        #print 'colors:', colors
        #up.slam_cov_plot(slam_state,slam_cov,obs_num,rob_state,ld_preds,ld_ids_preds)
        #visualize_ldmks_robot_cov(lmvis, ldmks, robview, slam_state[:2],
        #                          slam_cov[:2, :2], colors,obs_num)

        for i, id in enumerate(ids):
            ldmktracks.setdefault(id, []).append(
                TrackedLdmk(timestamp, ldmk_robot_obs[:, i:i + 1]))

        #Handle new robot view code appropriately
        robview.set_robot_pos_theta((rob_state[0], rob_state[1], 0),
                                    rob_state[2])
        assert ldmk_robot_obs.shape[1] == len(
            colors), '%d <=> %d' % (ldmk_robot_obs.shape[1], len(colors))

        img = lmvis.genframe(ldmks,
                             ldmk_robot_obs=ldmk_robot_obs,
                             robview=robview,
                             colors=colors,
                             SIMULATEDDATA=SIMULATEDDATA)
        imgr = lmvis.drawrobot(robview, img)
        imgrv = robview.drawtracks(
            [ldmktracks[id] for id in ids],
            imgidx=robview.imgidx_by_timestamp(timestamp),
            colors=colors)

        if not SIMULATEDDATA:
            fcenter3D = np.empty(3)
            faxis_vec = np.empty(3)
            fradius = np.empty(1)
            mode_count = 0
            for id in ld_ids:
                if model[id] == 0:
                    config_pars = ldmk_am[id].config_pars
                    vec1 = config_pars['vec1']
                    vec2 = config_pars['vec2']
                    center = config_pars['center']
                    center3D = center[0] * vec1 + center[1] * vec2 + ldmk_am[
                        id].plane_point
                    axis_vec = np.cross(vec1, vec2)
                    radius = config_pars['radius']
                    imgrv = robview.drawrevaxis(imgrv, center3D, axis_vec,
                                                radius, rev_color)
                    imgr = lmvis.drawrevaxis(imgr, center3D, axis_vec, radius,
                                             rev_color)

        robview.visualize(imgrv)
        lmvis.imshow_and_wait(imgr)

        quat_true = Rtoquat(rodrigues([0, 0, 1], rob_state[2]))
        quat_slam = Rtoquat(rodrigues([0, 0, 1], slam_state[2]))
        # Within 1 period = 100 for simulated
        if (fidx < 100 and SIMULATEDDATA) or (fidx < 1000
                                              and not SIMULATEDDATA):
            f_gt.write(
                str(fidx + 1) + " " + str(rob_state[0]) + " " +
                str(rob_state[1]) + " " + str(0) + " " + str(quat_true[0]) +
                " " + str(quat_true[1]) + " " + str(quat_true[2]) + " " +
                str(quat_true[3]) + " " + "\n")
            f_slam.write(
                str(fidx + 1) + " " + str(slam_state[0]) + " " +
                str(slam_state[1]) + " " + str(0) + " " + str(quat_slam[0]) +
                " " + str(quat_slam[1]) + " " + str(quat_slam[2]) + " " +
                str(quat_slam[3]) + " " + "\n")
        true_robot_states.append(rob_state)
        slam_robot_states.append(slam_state[0:3].tolist())

        #obs_num = obs_num + 1
        print 'SLAM state:', slam_state[0:3]
    # end of loop over frames

    if PLOTSIM:
        # Plot experimental results
        plot_sim_res(PLOTSIM, prob_plot1, prob_plot2, prob_plot3, traj_ldmk1,
                     traj_ldmk2, traj_ldmk3, true_robot_states,
                     slam_robot_states)

    # Debugging
    if debug_inp is True:
        # Going over all the landmarks
        for ldmk_id, ldmk in ldmk_estimater.iteritems():
            if ldmk.num_data > ldmk.min_samples:
                # This landmark has atleast got enought observations for estimating motion parameters
                if ldmk_am[ldmk_id] is None:
                    print "Could not estimate model for landmark ", ldmk_id,\
                            "with ", ldmk.num_data, " samples"
    f_gt.close()
    f_slam.close()
    return (true_robot_states, slam_robot_states)