Beispiel #1
0
    def predict_pose(self,
                     timestamp,
                     prev_position=None,
                     prev_orientation=None):
        '''
        Predict the next camera pose.
        '''
        if prev_position is not None:
            self.position = prev_position
        if prev_orientation is not None:
            self.orientation = prev_orientation

        if not self.initialized:
            return (g2o.Isometry3d(self.orientation,
                                   self.position), self.covariance)

        dt = timestamp - self.timestamp

        delta_angle = g2o.AngleAxis(self.v_angular_angle * dt * self.damp,
                                    self.v_angular_axis)
        delta_orientation = g2o.Quaternion(delta_angle)

        orientation = delta_orientation * self.orientation
        position = self.position + delta_orientation * (self.v_linear * dt *
                                                        self.damp)

        return (g2o.Isometry3d(orientation, position), self.covariance)
def pose_to_se3quat(pose):
    """Convert a pose vector to a :class: g2o.Isometry3d instance.

    Args:
        pose: A 7 element 1-d numpy array encoding x, y, z, qx, qy, qz, and qw respectively.
    Returns:
        A :class: g2o.Isometry3d instance encoding the same information as the input pose.
    """
    return g2o.SE3Quat(g2o.Quaternion(*np.roll(pose[3:7], 1)), pose[:3])
Beispiel #3
0
 def __init__(self, 
         timestamp=None, 
         initial_position=None, 
         initial_orientation=None, 
         initial_covariance=None):
     super().__init__(timestamp, initial_position, initial_orientation, initial_covariance)
     
     self.delta_position = np.zeros(3)    # delta translation 
     self.delta_orientation = g2o.Quaternion()
Beispiel #4
0
    def __init__(self, params):
        self.params = params

        self.timestamp = None
        self.position = np.zeros(3)
        self.orientation = g2o.Quaternion()
        self.covariance = None  # pose covariance

        self.v_linear = np.zeros(3)  # linear velocity
        self.v_angular_angle = 0
        self.v_angular_axis = np.array([1, 0, 0])

        self.initialized = False
        self.damp = 0.95  # damping factor
Beispiel #5
0
    def process(self, imu_msg):
        while self.is_measuring:
            time.sleep(1e-4)
        self.is_processing = True

        timestamp, data = imu_msg
        state_new = ESKFState(self.config)
        state_new.wm = data[0]
        state_new.am = data[1]
        state_new.timestamp = timestamp

        if len(self.state_buffer) == 0:
            self.initialize(state_new)
            return

        # ========================
        # === ESKF Propagation ===
        # ========================

        state_old = self.state_buffer[-1]
        dt = state_new.timestamp - state_old.timestamp

        if dt < 0:
            return

        if dt >= self.config.imu_max_reading_time:
            print('[Warning] The interval between readings is too big')
            raise RuntimeError
            # self.initialize(state_new, True)
            # return

        if np.linalg.norm(state_new.am) >= self.config.imu_max_reading_accel:
            print('[Warning] The accelerometer readings is too big')
            state_new.am = state_old.am

        if np.linalg.norm(state_new.wm) >= self.config.imu_max_reading_gyro:
            print('[Warning] The gyroscope readings is too big')
            state_new.wm = state_old.wm

        self.propagate(state_old, state_new)
        self.state_buffer.append(state_new)
        self.shrink_buffer()

        print(state_new.timestamp, '\n p   ', state_new.p, state_new.p-self.state_buffer[0].p, '\n q   ', state_new.q, '\n ba  ', state_new.ba, '\n bw  ', state_new.bw)
        print(' v   ', state_new.v)
            
        self.current_pose = g2o.Isometry3d(
            g2o.Quaternion(state_new.q), state_new.p)
        self.is_processing = False
    def add_edge(self, v0,v1, measurement=None, 
            information=np.identity(6),
            robust_kernel = None): #95% CI
        
        edge = g2o.EdgeSE3()
        # edge = g2o.EdgeProjectXYZ2UV()

        edge.set_vertex(0, v0)
        edge.set_vertex(1, v1)
        if measurement is None:
            measurement = g2o.Isometry3d(g2o.Quaternion(0,0,0,1),np.array([0,1,0]))
        edge.set_measurement(measurement)  # relative pose
        edge.set_information(information)
        # edge.set_parameter_id(0, 0)
        robust_kernel=g2o.RobustKernelHuber(np.sqrt(5.991))#5.991
        if robust_kernel is not None:
            edge.set_robust_kernel(robust_kernel)
        super().add_edge(edge)
Beispiel #7
0
    def __init__(self,
                 timestamp=None,
                 initial_position=np.zeros(3),
                 initial_orientation=g2o.Quaternion(),
                 initial_covariance=None):

        self.timestamp = timestamp
        self.position = initial_position
        self.orientation = initial_orientation
        self.covariance = initial_covariance  # pose covariance

        self.v_linear = np.zeros(3)  # linear velocity
        self.v_angular_angle = 0
        self.v_angular_axis = np.array([1, 0, 0])

        self.initialized = False
        # damping factor
        self.damp = 0.95
Beispiel #8
0
    def predict_pose(self, timestamp):
        '''
        Predict the next camera pose.
        '''
        if not self.initialized:
            return (g2o.Isometry3d(self.orientation,
                                   self.position), self.covariance)

        dt = timestamp - self.timestamp

        # Use quaternion instead of rotation matrix due to performance
        delta_angle = g2o.AngleAxis(self.v_angular_angle * dt * self.damp,
                                    self.v_angular_axis)
        delta_orientation = g2o.Quaternion(delta_angle)

        position = self.position + self.v_linear * dt * self.damp
        orientation = self.orientation * delta_orientation

        return (g2o.Isometry3d(orientation, position), self.covariance)
 def optimize(self, max_iterations=20):
     optimizer = G2OPoseGraphOptimizer()
     id_to_name = {}
     name_to_id = {}
     # populate optimizer
     with self._lock:
         # add vertices
         for nname, ndata in self.nodes.items():
             # compute node ID
             node_id = len(id_to_name)
             id_to_name[node_id] = nname
             name_to_id[nname] = node_id
             # get node pose and other attributes
             pose = g2o.Isometry3d(g2o.Quaternion(ndata["pose"].Q('wxyz')), ndata["pose"].t) \
                 if "pose" in ndata else None
             fixed = ndata["fixed"] if "fixed" in ndata else False
             # add vertex
             optimizer.add_vertex(node_id, pose=pose, fixed=fixed)
         # add edges
         for u, v, edata in self.edges.data():
             # get nodes IDs
             iu = name_to_id[u]
             iv = name_to_id[v]
             # get edge measurement and other attributes
             measurement = edata["measurement"]
             T = tr.compose_matrix(
                 translate=measurement.t,
                 angles=tr.euler_from_quaternion(measurement.q)
             )
             # add edge
             optimizer.add_edge([iu, iv], g2o.Isometry3d(T))
     # optimize
     optimizer.optimize(max_iterations=max_iterations)
     # update graph
     with self._lock:
         for nid, nname in id_to_name.items():
             npose = optimizer.get_pose(nid)
             T = tr.compose_matrix(
                 translate=npose.t,
                 angles=tr.euler_from_matrix(npose.R)
             )
             q = tr.quaternion_from_matrix(T)
             self.nodes[nname]["pose"] = TF(t=npose.t, q=q)
Beispiel #10
0
    def __init__(self, 
            timestamp=None, 
            initial_position=None, 
            initial_orientation=None, 
            initial_covariance=None):

        self.timestamp = timestamp
        if initial_position is not None: 
            self.position = initial_position
        else:
            self.position = np.zeros(3)
        if initial_orientation is not None: 
            self.orientation = initial_orientation
        else:
            self.orientation = g2o.Quaternion()            
        self.orientation = initial_orientation
        self.covariance = initial_covariance    # pose covariance

        self.is_ok = False 
        self.initialized = False
Beispiel #11
0
def transform_to_SE3Quat(transformer, cur_frame, target_frame, pose):
    (tpose, tquat) = transform_frame(transformer, cur_frame, target_frame,
                                     pose)

    return g2o.SE3Quat(g2o.Quaternion(tquat.w, tquat.x, tquat.y, tquat.z),
                       np.array([tpose.x, tpose.y, tpose.z]))
Beispiel #12
0
                markerIDset = False

        unique_list = []
        bag_num += 1
        bag.close()

    msg_point = geometry_msgs.msg.PoseStamped()
    trans = geometry_msgs.msg.PoseStamped()

    # Create nodes in graph for sensors
    print("Generate graph for optimization...")
    for snode in sensor_list:
        spose = snode.pose
        add_frame(t, "s" + str(snode.id), origin_frame, spose)
        pose = g2o.SE3Quat(
            g2o.Quaternion(spose['qw'], spose['qx'], spose['qy'], spose['qz']),
            np.array([spose['x'], spose['y'], spose['z']]))
        optimizer.add_vertex(snode.id, pose.Isometry3d(),
                             snode.id is fixed_sensor)

    # Create nodes in graph for tracking points (transformed in origin frame)
    assocs = []
    graph_id = sensor_num + 1
    for tnode in tracking_points:
        if tnode.sensor_id is fixed_sensor:
            tpose = tnode.pose
            add_frame(t, "t" + str(tnode.tracker_id),
                      "s" + str(tnode.sensor_id), tpose)

            pose = transform_to_SE3Quat(t, "s" + str(tnode.sensor_id),
                                        origin_frame, tpose)
Beispiel #13
0
    def measure(self, vo_msg):
        timestamp_from, timestamp_to, vo_T = vo_msg
        assert timestamp_from < timestamp_to

        # if timestamp_from < self.state_buffer[0].timestamp + 3:  # test
        #     return

        vo_q = vo_T.rotation()
        vo_t = vo_T.translation()

        tic = time.time()
        while (self.is_processing or len(self.state_buffer) < 2 or 
            self.state_buffer[-1].timestamp < timestamp_to):
            time.sleep(1e-4)
            if time.time() - tic > 1:
                print('[Update] Something went wrong with IMU !!')
                self.is_measuring = False
                return

        self.is_measuring = True
        if self.state_buffer[0].timestamp > timestamp_from:
            print('[Update] Keyframe time is older than the first state,'
                'drop this update')
            self.is_measuring = False
            return

        if self.state_buffer[-1].timestamp - timestamp_to > self.config.max_vo_imu_delay:
            print('[Update] Delay between imu and vo is too large, drop this update')
            self.is_measuring = False
            return
            
        state_from = self.get_closest_state(timestamp_from)
        state_to = self.get_closest_state(timestamp_to)
        if state_from is None or state_to is None:
            print('[Update] Nearest state not found')
            self.is_measuring = False
            return

        from_q = quaternion(state_from.q)
        to_q = quaternion(state_to.q)

        odom_q = self.cam2imu_q * vo_q * self.cam2imu_q.conjugate()
        odom_q.normalize()
        odom_t = self.cam2imu_q * vo_t + self.cam2imu_t

        meas_q = from_q * odom_q
        meas_q.normalize()
        meas_p = from_q * odom_t + state_from.p

        R = np.identity(6)
        R[:3, :3] = self.config.vo_fixedcov_p * np.identity(3)
        R[3:, 3:] = self.config.vo_fixedcov_q * np.identity(3)

        # ========================
        # === Update procedure ===
        # ========================
        # ## Step1: Calculate the residual

        residual_p = meas_p - state_to.p
        residual_q = meas_q * to_q.conjugate()
        
        residual_th = 2 * residual_q.vec() / residual_q.w()   # q ~= [1, 0.5*theta]
        residual = np.concatenate([residual_p, residual_th])

        # ## Step2: Compute the Innovation, Kalman gain and Correction state
        S = self.H @ state_to.P @ self.H.T + R    # (6, 6)
        S_inv = np.linalg.inv(S)

        K = state_to.P @ self.H.T @ S_inv    # (15, 6)
        update = K.dot(residual)

        # ##Step3: Update the state
        state_to.p  += update[0:3]
        state_to.v  += update[3:6]
        state_to.ba += update[9:12]
        state_to.bw += update[12:15]

        q_update = np.array([1, *(0.5 * update[6:9])])
        state_to.q = quatmat(state_to.q).dot(q_update)
        state_to.q /= np.linalg.norm(state_to.q)

        # covariance: P k+1|k+1 = (I d −KH)P k+1|k (I d −KH) T +KRK T
        KH = np.identity(15) - K @ self.H    # (15, 15)
        state_to.P = KH @ state_to.P @ KH.T + K @ R @ K.T
        # Make sure P stays symmetric
        state_to.P = (state_to.P + state_to.P.T) / 2.

        # ##Step4: Repropagate State & Covariance
        assert state_to in self.state_buffer
        idx = self.state_buffer.index(state_to)
        if idx < len(self.state_buffer) - 1:
            for i in range(idx, len(self.state_buffer) - 1):
                self.propagate(self.state_buffer[i], self.state_buffer[i+1])

        self.current_pose = g2o.Isometry3d(
            g2o.Quaternion(self.state_buffer[-1].q), self.state_buffer[-1].p)
        self.is_measuring = False
pose_graph = PoseGraphOptimization()
pose_file = open('sphere.g2o')
line = pose_file.readline()

while line:

    data = line.split(' ')

    if (data[0] == 'VERTEX_SE3:QUAT'):

        pose_id = int(data[1])
        pose_info = np.array([float(i) for i in data[2:9]])

        # print(pose_id,pose_info)

        q = g2o.Quaternion(pose_info[6], pose_info[3], pose_info[4],
                           pose_info[5])
        t = g2o.Isometry3d(q, pose_info[0:3])

        pose_graph.add_vertex(pose_id, t)

    if (data[0] == 'EDGE_SE3:QUAT'):

        pose_id_left = int(data[1])
        pose_id_righ = int(data[2])
        pose_info = np.array([float(i) for i in data[3:10]])

        q = g2o.Quaternion(pose_info[6], pose_info[3], pose_info[4],
                           pose_info[5])
        t = g2o.Isometry3d(q, pose_info[0:3])

        pose_graph.add_edge([pose_id_left, pose_id_righ], t)
def main():
    # camera matrix
    fx = 3551.342810
    fy = 3522.689669
    cx = 2033.513326
    cy = 1455.489194

    # fx = 718.8560
    # fy = 718.8560
    # cx = 607.1928
    # cy = 185.2157

    K = np.float64([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    D = np.float64([-0.276796, 0.113400, -0.000349, -0.000469])

    #load images
    # dataset1_dir = '/home/linjian/datasets/Data_trajectory/2018-08-21/22_47_20_load/'
    # dataset2_dir = '/home/linjian/datasets/Data_trajectory/2018-08-22/'
    # dataset1_dir ='/home/linjian/dataset/docking_dataset/image/Data_trajectory/2018-08-21/22_47_20_load/'
    # dataset1_dir = '/home/linjian/dataset/docking_dataset/image/Data_trajectory/2018-08-22/16h-26m-42s load/'
    # dataset1_dir = '/home/linjian/dataset/docking_dataset/image/raw_data/2018-08-22/17h-21m-57s load/'
    # dataset1_dir = '/home/linjian/dataset/docking_dataset/image/raw_data/2018-08-22/17h-24m-21s unload/'
    # dataset1_dir = '/home/linjian/dataset/tum_image/sequence_30/resized_images/'
    dataset1_dir = '/home/linjian/dataset/docking_dataset/image/Data_trajectory/load_unload/'
    filelist1 = glob.glob(dataset1_dir + '*.jpg')
    # filelist1 = sorted(filelist1)
    filelist1.sort(key=lambda f: float(re.findall(r'(\d+\.\d)', f)[0]))
    print(filelist1)
    img_num = len(filelist1)

    #load scale as speed of the wheel
    # loaded_data = load_data(dataset1_dir)
    # scale = loaded_data.get_speed()

    #initialization

    #poses
    rotation_array = []
    transformation_array = []
    pose_array = []
    R = np.eye(3)
    t = np.zeros((1, 3))
    rotation_array.append(R)
    transformation_array.append(t)
    pose_array.append(t)

    #optimization initialization
    optimization_class = PoseGraphOptimization()
    #let first vertex fixed
    pose = g2o.Isometry3d(g2o.Quaternion(0, 0, 0, 1),
                          np.transpose(pose_array[0]))
    optimization_class.add_vertex(0, pose, True)

    #bag of virtual words init
    detector = cv2.ORB_create()
    bovw_class = bovw(detector)

    #init loop closure class
    # loopclosure_class = loopclosure()
    loopclosure_pairs = []
    max_num_lc = 50

    #init keypoints and descriptors
    keypoints_list = []
    descriptors_list = []

    #keyframe init
    keyframe_file_list = []
    keyframe_file_list.append(filelist1[0])
    keyframe_number = 1
    frame_skipped = 0
    frame_skipped_floor = 5
    frame_skipped_ceil = 20
    #initialize input images
    img1 = cv2.imread(filelist1[0])
    img2 = cv2.imread(filelist1[1])
    keyframe_index = 1

    #init the relative scale list
    relative_scale_list = []
    relative_scale_list.append(1)

    #initialize matching class with camera parameters
    matching_class = matching(K, D)
    #create a detector
    detector = cv2.ORB_create(nfeatures=800)
    for i in range(0, 170):
        # for i in range(1,img_num):
        # for i in range(0,img_num):
        #insert images
        matching_class.load_image(img1, img2)

        #scan matching
        goodkf, matches = matching_class.match_images(detector)
        #keyframe choose conditions are
        #a. not skip more than skipped threshold frames
        #b. real scale are always not 0
        #c. have enough matches
        #d. the adjacent frames
        # if (enough_match == False or scale[i-1] < 0.001) and (frame_skipped < frame_skipped_threshold):
        if (goodkf == False or frame_skipped < frame_skipped_floor) and (
                frame_skipped < frame_skipped_ceil):
            # print("for the ",i,"image absolute scale is ",scale[i-1])
            # print('not a good keyframe')
            frame_skipped = frame_skipped + 1
            keyframe_index = keyframe_index + 1
            print("keyframe index is ", keyframe_index, "skipped ",
                  frame_skipped)
            if keyframe_index > img_num - 1:
                break
            img2 = cv2.imread(filelist1[keyframe_index])
            continue

        #get keypoints
        kp1_match, kp2_match = matches
        keypoints_list.append(matching_class.kp1)
        descriptors_list.append(matching_class.des1)

        #calculate the relative scale
        relative_scale = comput_relative_scale(kp1_match, kp2_match)
        #remove absolute wrong scale
        if relative_scale > 3:
            keyframe_index = keyframe_index + 1
            print("keyframe index is ", keyframe_index)
            if keyframe_index > img_num - 1:
                break
            img2 = cv2.imread(filelist1[keyframe_index])
            continue
        #
        cumulate_relative_scale = relative_scale_list[-1] * relative_scale
        relative_scale_list.append(cumulate_relative_scale)
        # print("for the ",i,"image relative scale between two keyframe is ",relative_scale)
        print(
            "for the ", i,
            "image calculated relative scale relate to the first keyframe is ",
            cumulate_relative_scale)

        #reinit frame_skipped
        frame_skipped = 0

        #append keyframe files
        keyframe_file_list.append(filelist1[keyframe_index])

        #add into bovw
        bovw_class.add_histogram(matching_class.des1)

        #calculate rotation and transformation
        dR = matching_class.getRotation()
        rotation_array.append(dR)
        dt = np.transpose(matching_class.getTransformation())
        dt = dt.dot(R) * relative_scale_list[-1]
        transformation_array.append(dt)
        R = dR.dot(R)
        t = t + dt
        pose_array.append(t)
        #plot
        # plot_pose(np.asarray(pose_array))
        # plt.show()
        # cv2.waitKey(1)
        # plt.close()
        ###############optimizatoin###############
        #####add vertex
        vertex_id = len(pose_array) - 1
        pose = g2o.Isometry3d(g2o.Quaternion(0, 0, 0, 1),
                              np.transpose(pose_array[vertex_id]))

        optimization_class.add_vertex(vertex_id, pose)

        #####add edge between adjacent frames
        constraint = dt + np.append(
            np.random.normal(0, 0, 2), np.array([0]), axis=0)
        constraint = np.transpose(constraint)
        optimization_class.add_edge(optimization_class.vertex(vertex_id),
                                    optimization_class.vertex(vertex_id - 1),
                                    measurement=g2o.Isometry3d(
                                        g2o.Quaternion(0, 0, 0, 1),
                                        constraint))

        #find loop closure
        lc_index, lc_cost = bovw_class.find_lc(matching_class.des2)
        lc_indices = bovw_class.get_lowest_costs_index(
            max_num_lc)  # number of lowest cost indices
        print(lc_cost)
        # img_lc = cv2.imread(keyframe_file_list[lc_index])
        # cv2.imshow('Loop closure matched',img_lc)
        for lc_i in lc_indices:
            if (lc_cost < 0.15) and (lc_i < keyframe_number - 5):
                img_lc = cv2.imread(keyframe_file_list[lc_i])
                cv2.imshow('Loop closure matched', img_lc)
                #add the loopclosure kf to list
                loopclosure_pairs.append([lc_i, keyframe_number])
                #scale calculate, 1st calutate the good matches, then relative scale
                # lc_scale = comput_relative_scale(,)
                # print('lc scale is ', scale[i-2]/relative_scale_list[i-1]*lc_scale)

        cv2.waitKey(1)
        #assign new keyframe image
        keyframe_number = keyframe_number + 1
        img1 = img2
        keyframe_index = keyframe_index + 1
        if keyframe_index > img_num - 1:
            break
        #plot lc matched image
        img2 = cv2.imread(filelist1[keyframe_index])

    print("loopclosure pairs are:", loopclosure_pairs)
    ##############optimization
    #add vertices
    # opt = PoseGraphOptimization()
    # for vertex_id in range(len(keyframe_file_list)):
    #     pose = g2o.Isometry3d(g2o.Quaternion(0,0,0,1),np.transpose(pose_array[vertex_id]))
    #     if vertex_id == 0:
    #         opt.add_vertex(vertex_id,pose,True)
    #     else:
    #         opt.add_vertex(vertex_id,pose)

    #get measurement
    print(keyframe_file_list)
    print(len(keyframe_file_list))

    save_to_pickle(keyframe_file_list, "keyframe_file_list.pkl")
    save_to_pickle(loopclosure_pairs, "loopclosure_pairs.pkl")

    # def compute_scale(file_list, id1, id2):
    #     img1 = cv2.imread(file_list[id1])
    #     img2 = cv2.imread(file_list[id2])
    #     matching_class.load_image(img1,img2)
    #     #scan matching
    #     enough_match,matches = matching_class.match_images(detector)
    #     cv2.waitKey(1)
    #     kp1_match,kp2_match = matches

    #     #calculate the relative scale
    #     relative_scale = comput_relative_scale(kp1_match,kp2_match)
    #     #remove absolute wrong scale
    #     if relative_scale >2:
    #         print('bad scale')
    #         # continue
    #     measurement_scale = relative_scale_list[id1]*relative_scale
    #     dt = np.transpose(pose_array[id2]- pose_array[id1])
    #     # dt = matching_class.getTransformation()
    #     # print("scale, ",measurement_scale)
    #     # print("dt, ", dt)
    #     # print("dt, ", dt.shape)
    #     measurement= measurement_scale*dt

    for pairs in loopclosure_pairs:
        # load loopclosed keyframe pairs
        keyframe_id = pairs[0]
        keyframe_id2 = pairs[1]

        img1 = cv2.imread(keyframe_file_list[keyframe_id])
        img2 = cv2.imread(keyframe_file_list[keyframe_id2])
        matching_class.load_image(img1, img2)
        #scan matching
        enough_match, matches = matching_class.match_images(detector)
        cv2.waitKey(1)
        kp1_match, kp2_match = matches

        #calculate the relative scale
        relative_scale = comput_relative_scale(kp1_match, kp2_match)
        #remove absolute wrong scale
        if relative_scale > 2:
            print('bad scale')
            # continue
        measurement_scale = relative_scale_list[keyframe_id] * relative_scale
        dt = np.transpose(pose_array[keyframe_id2] - pose_array[keyframe_id])
        # dt = matching_class.getTransformation()
        # print("scale, ",measurement_scale)
        # print("dt, ", dt)
        # print("dt, ", dt.shape)
        measurement = measurement_scale * dt
        #add to edge
        optimization_class.add_edge(optimization_class.vertex(keyframe_id2),
                                    optimization_class.vertex(keyframe_id),
                                    measurement=g2o.Isometry3d(
                                        g2o.Quaternion(0, 0, 0, 1),
                                        measurement))

    # pair_id = 0
    # keyframe_id = loopclosure_pairs[pair_id][0]
    # keyframe_id2 = loopclosure_pairs[pair_id][1]
    # img1 = cv2.imread(keyframe_file_list[keyframe_id])
    # img2 = cv2.imread(keyframe_file_list[keyframe_id2])
    # matching_class.load_image(img1,img2)
    # #scan matching
    # enough_match,matches = matching_class.match_images(detector)
    # kp1_match,kp2_match = matches

    # #calculate the relative scale
    # relative_scale = comput_relative_scale(kp1_match,kp2_match)
    # #remove absolute wrong scale
    # if relative_scale >5:
    #     print('bad scale')
    #     # continue
    # measurement_scale = relative_scale_list[keyframe_id]*relative_scale
    # dt = np.transpose(pose_array[keyframe_id2]- pose_array[keyframe_id])
    # # dt = matching_class.getTransformation()
    # print("scale, ",measurement_scale)
    # print("dt, ", dt)
    # print("dt, ", dt.shape)
    # measurement= measurement_scale*dt
    # #add to edge
    # optimization_class.add_edge(optimization_class.vertex(keyframe_id2),optimization_class.vertex(keyframe_id),measurement=g2o.Isometry3d(g2o.Quaternion(0,0,0,1),measurement))

    # print("original pose0 is, ",pose_array[keyframe_id])
    # print("original pose1 is, ",pose_array[keyframe_id2])
    # print("measurement is ,", measurement, "between vertex ",keyframe_id," and ",keyframe_id2)

    print("error", optimization_class.compute_active_errors())

    # optimization_class.optimize(0)
    # print("optimized fisrt pose is ", optimization_class.get_pose(0).translation())
    # print("optimized fisrt pose is ", optimization_class.get_pose(1).translation())
    # print("optimized fisrt pose is ", optimization_class.get_pose(2).translation())
    # optimization_class.optimize(2)
    # print("optimized fisrt pose is ", optimization_class.get_pose(0).translation())
    # print("optimized fisrt pose is ", optimization_class.get_pose(1).translation())
    # print("optimized fisrt pose is ", optimization_class.get_pose(2).translation())

    ##############end optimization

    # bovw_class.save_bovw_lib()
    save_to_pickle(keyframe_file_list, "image_file_list.pkl")
    #convert lists to array
    #plot origin
    rotation_array = np.asarray(rotation_array)
    transformation_array = np.asarray(transformation_array)
    pose_array = np.asarray(pose_array)
    plot_pose(pose_array)
    print(pose_array.shape)
    plt.show()

    optimization_class.optimize(10)
    #plot opt
    opt_pose = []
    for i in range(len(pose_array)):
        arr = optimization_class.get_pose(i).translation()
        opt_pose.append(np.expand_dims(arr, axis=0))
    opt_pose = np.asarray(opt_pose)
    plot_pose(opt_pose)

    plt.show()
Beispiel #16
0
def rotmat(q):
    q = g2o.Quaternion(q)
    q.normalize()
    return q.matrix()
        super().add_edge(edge)


    def test_add_edge(self,edge):
        super().add_edge(edge)

if __name__ == "__main__":
    pose0 = np.array([0,0,0]) #+ np.random.normal(0,0.1,3)
    pose1 = np.array([0,1,0]) + np.random.normal(0,0.1,3)
    pose2 = np.array([0,2,0]) + np.random.normal(0,0.1,3)
    print("poses:",'\n',pose0,'\n',pose1,'\n',pose2,'\n')
    opt = PoseGraphOptimization()
    # pose0 = g2o.SE3Quat(np.identity(6), pose0)
    # pose1 = g2o.SE3Quat(np.identity(6), pose1)
    # pose2 = g2o.SE3Quat(np.identity(6), pose2)
    pose0 = g2o.Isometry3d(g2o.Quaternion(0,0,0,1),pose0)
    pose1 = g2o.Isometry3d(g2o.Quaternion(0,0,0,1),pose1)
    pose2 = g2o.Isometry3d(g2o.Quaternion(0,0,0,1),pose2)

    opt.add_vertex(0,pose0,True)
    opt.add_vertex(1,pose1)
    opt.add_vertex(2,pose2)
    
    vertices = opt.get_vertices()

    opt.add_edge(vertices[0],vertices[1],measurement=g2o.Isometry3d(g2o.Quaternion(0,0,0,1),np.array([0,-1,0])))
    opt.add_edge(vertices[1],vertices[2],measurement=g2o.Isometry3d(g2o.Quaternion(0,0,0,1),np.array([0,-1,0])))
    opt.add_edge(vertices[0],vertices[2],measurement=g2o.Isometry3d(g2o.Quaternion(0,0,0,1),np.array([0,-2,0])))

    opt.initialize_optimization()
    print("error", opt.compute_active_errors())
Beispiel #18
0
def quaternion(q):
    q = g2o.Quaternion(q)
    q.normalize()
    return q
Beispiel #19
0
 def set_from_rotation_and_translation(self, Rcw, tcw):
     self.set(g2o.Isometry3d(g2o.Quaternion(Rcw), tcw))
Beispiel #20
0
 def set_rotation_matrix(self, Rcw):
     self.set(g2o.Isometry3d(g2o.Quaternion(Rcw), self._pose.position()))
Beispiel #21
0
 def create_pose(self, orientation: np.ndarray, translation: np.ndarray) -> g2o.Isometry3d:
     pose = g2o.Isometry3d()
     pose.set_translation(translation)
     q = g2o.Quaternion(orientation)
     pose.set_rotation(q)
     return pose
Beispiel #22
0
import sys
import numpy as np
import math

sys.path.append("../../")
from config import Config

import g2o

position = np.zeros(3)  # delta translation
orientation = g2o.Quaternion()

pose = g2o.Isometry3d(orientation, position)

print('pose: ', pose.matrix())

position[0] = 1

print('pose: ', pose.matrix())

orientation2 = g2o.Quaternion(g2o.AngleAxis(math.pi, [0, 0, 1]))

print('position', position)
print('orientation2*position', orientation2 * position)