示例#1
0
    def get_farthest_in_range(self, start_point: Matrix,
                              path: List[Matrix]) -> Tuple[int, Matrix]:
        farthest = 0

        for i, point in enumerate(path):
            d = dist(tf.translation_from_matrix(point),
                     tf.translation_from_matrix(start_point))

            if d > self.max_stride:
                break
            else:
                farthest = i

        return farthest, path[farthest].copy()
示例#2
0
def interpolate(start: np.ndarray, stop: np.ndarray, phase: float) -> np.ndarray:
    start_trans = tf.translation_from_matrix(start)
    stop_trans = tf.translation_from_matrix(stop)
    shift_trans = phase * (stop_trans - start_trans)
    inter_trans = start_trans + shift_trans
    trans_matrix = tf.translation_matrix(inter_trans)

    start_rot = np.array(tf.euler_from_matrix(start, axes='sxyz'))
    stop_rot = np.array(tf.euler_from_matrix(stop, axes='sxyz'))
    shift_rot = phase * (stop_rot - start_rot)
    inter_rot = start_rot + shift_rot
    rot_matrix = tf.euler_matrix(*inter_rot, axes='sxyz')

    result = tf.concatenate_matrices(trans_matrix, rot_matrix)
    return result
    def updateData(self, feature, prop):
        r"""If a property of the handled feature has changed,
        we have the chance to handle this here
        
        See Also
        --------
        https://www.freecadweb.org/wiki/Scripted_objects

        """
        debug("ViewProviderAnchor/updateData")
        p = feature.getPropertyByName("p")
        u = feature.getPropertyByName("u")
        v = feature.getPropertyByName("v")

        self.transform.translation.setValue((p[0], p[1], p[2]))

        at = anchor_transformation(p0=(0, 0, 0),
                                   u0=(0, -1, 0),
                                   v0=(0, 0, 1),
                                   p1=(p[0], p[1], p[2]),
                                   u1=(u[0], u[1], u[2]),
                                   v1=(v[0], v[1], v[2]))

        t = translation_from_matrix(at)

        self.transform.translation.setValue((t[0], t[1], t[2]))

        angle, direction, point = rotation_from_matrix(at)
        # print("angle : %f" % angle)
        # print("direction : %s" % str(direction))
        # print("point : %s" % str(point))

        self.transform.rotation.setValue(coin.SbVec3f(direction), angle)
示例#4
0
	def determine_obj_frame(self):
		if self.detected_artags != None:

			eng_poses = []
			ar_tfs = []
			for da in self.detected_artags.markers:
				ar_pose = th.aruco_to_pose_stamp(da)

				eng_pose = self.get_engine_pose_from_ps(ar_pose)

				eng_poses.append(eng_pose)
				ar_tfs.append(th.pose_stamp_to_tf(eng_pose))

			ar_tf_sum = np.zeros((4,4))
			for tf in ar_tfs:
				ar_tf_sum = ar_tf_sum + tf/len(ar_tfs)
				
			engine_frame_combined = ar_tf_sum
			
			tran = transformations.translation_from_matrix(engine_frame_combined), 
			eul = transformations.euler_from_matrix(engine_frame_combined)
			
			
			self.updated_engine_pose = str(tran[0][0]) + ' ' + str(tran[0][1]) + ' ' + str(tran[0][2]) + ' ' + str(eul[0]) + ' ' + str(eul[1]) + ' ' + str(eul[2]) 
			
			ps = th.tf_to_pose_stamp(engine_frame_combined, 'engine_frame_combined')
			
			th.broadcast_transform(ps, global_frame)
			return engine_frame_combined
		else: 
			return None
示例#5
0
def back_front_thread():
    global published_transform
    rate = rospy.Rate(10)
    while True:
        time = rospy.Time.now()
        num = 0
        sum_mat = None
        #print("Time diff = %s" %(str(time.to_sec() - front_transform["time"].to_sec()) if front_transform["time"] is not None else "??"))
        if front_transform["transform"] is not None and time.to_sec(
        ) - front_transform["time"].to_sec() < 1:
            sum_mat = front_transform["transform"]
            num = 1.0
        # BRS Let's not use averages - seems to cause normailization errors?
        if back_transform["transform"] is not None and time.to_sec(
        ) - back_transform["time"].to_sec() < 1:
            sum_mat = back_transform[
                "transform"] if sum_mat is None else sum_mat + back_transform[
                    "transform"]
            num = num + 1.0
        if num > 0:
            transform = sum_mat / num
            published_transform = transform
            trans = tr.translation_from_matrix(transform)
            rot = tr.quaternion_from_matrix(transform)
            r, p, y = tr.euler_from_quaternion(rot)
            rot = tr.quaternion_from_euler(r, p, y)
            br.sendTransform(trans, rot, time, "odom", "map")
        # elif published_transform is not None:
        #     transform = published_transform
        #     trans = tr.translation_from_matrix(transform)
        #     rot = tr.quaternion_from_matrix(transform)
        #     br.sendTransform(trans, rot, time, "odom", "map")
        rate.sleep()
    def inframe(self, pose, frame):
        """ Transform a pose from one frame to another one.

        Uses transformation matrices. Could be refactored to use directly
        quaternions.
        """
        pose = self.get(pose)

        if pose['frame'] == "map":
            orig = numpy.identity(4)
        else:
            orig = self._to_mat4(self.get(pose["frame"]))

        if frame == "map":
            dest = numpy.identity(4)
        else:
            dest = numpy.linalg.inv(self._to_mat4(self.get(frame)))

        poseMatrix = self._to_mat4(pose)

        transf = numpy.dot(dest, orig)
        transformedPose = numpy.dot(transf, poseMatrix)

        qx,qy,qz,qw = transformations.quaternion_from_matrix(transformedPose)
        x,y,z = transformations.translation_from_matrix(transformedPose)

        return {"x":x,
                "y":y,
                "z":z,
                "qx":qx,
                "qy":qy,
                "qz":qz,
                "qw":qw,
                "frame": frame}
示例#7
0
    def draw(self, surface, baseTransform, parameters):
        armLength = tr.translation_matrix([self.length, 0, 0])
        rPitch = tr.rotation_matrix(parameters[0], yaxis)
        rYaw = tr.rotation_matrix(parameters[1], zaxis)
        transform = tr.concatenate_matrices(baseTransform, rYaw, rPitch,
                                            armLength)

        start = tr.translation_from_matrix(baseTransform)
        end = tr.translation_from_matrix(transform)
        # Only use the y and z coords for a quick and dirty orthogonal projection
        pygame.draw.line(surface, (0, 0, 0), start[::2], end[::2], 5)

        # Draw angle constraints
        #pygame.draw.aaline(surface, (100, 0, 0), position, position + to_vector(self.pitch.absolute_min(base_angle), 30))
        #pygame.draw.aaline(surface, (100, 0, 0), position, position + to_vector(self.pitch.absolute_max(base_angle), 30))
        if self.after is not None:
            self.after.draw(surface, transform, parameters[2:])
示例#8
0
    def error(self, target_pos, baseTransform, parameters):
        transform = np.dot(baseTransform,
                           self.transform(parameters[0], parameters[1]))

        if self.after is None:
            end = tr.translation_from_matrix(transform)
            return distance(target_pos, end)
        else:
            return self.after.error(target_pos, transform, parameters[2:])
示例#9
0
    def _joints_impl(self, baseTransform, parameters):
        transform = np.dot(baseTransform,
                           self.transform(parameters[0], parameters[1]))
        end = tr.translation_from_matrix(transform)

        if self.after is not None:
            return [end] + self.after._joints_impl(transform, parameters[2:])
        else:
            return [end]
示例#10
0
def tf_to_dof(T):
    if T.shape == (12, ):
        T.shape = (3, 4)
    if T.shape == (3, 4):
        T = np.concatenate((T, np.array([[0.0, 0.0, 0.0, 1.0]])), axis=0)
    assert T.shape == (4, 4), 'T.shape is {}, not (4,4)'.format(T.shape)
    translation = tr.translation_from_matrix(T)
    # euler_angles = np.array(tr.euler_from_matrix(T[0:3, 0:3]))
    euler_angles = np.array(tr.euler_from_matrix(T))
    return np.concatenate((translation, euler_angles), axis=0)
示例#11
0
def tf_to_quat(T):
    if T.shape == (12, ):
        T.shape = (3, 4)
    if T.shape == (3, 4):
        T = np.concatenate((T, np.array([[0.0, 0.0, 0.0, 1.0]])), axis=0)
    assert T.shape == (4, 4)
    translation = tr.translation_from_matrix(T)
    # quaternions = np.array(tr.quaternion_from_matrix(T[0:3, 0:3]))
    quaternions = np.array(tr.quaternion_from_matrix(T))
    return np.concatenate((translation, quaternions), axis=0)
示例#12
0
    def error(self, target_pos, baseTransform, parameters):
        armLength = tr.translation_matrix([self.length, 0, 0])
        rPitch = tr.rotation_matrix(parameters[0], yaxis)
        rYaw = tr.rotation_matrix(parameters[1], zaxis)
        transform = tr.concatenate_matrices(baseTransform, rYaw, rPitch,
                                            armLength)

        if self.after is None:
            end = tr.translation_from_matrix(transform)
            return distance(target_pos, end)
        else:
            return self.after.error(target_pos, transform, parameters[2:])
示例#13
0
def generate_leg(leg: str, params: list):
    front_signs = {'front': '', 'rear': '-'}
    side_signs = {'left': '', 'right': '-'}
    lower_limits = {'left': '0', 'right': str(-pi)}
    upper_limits = {'left': str(pi), 'right': '0'}
    with open('../urdf/leg.urdf.template', 'r') as file:
        leg_template = Template(file.read())

    mappings = {'l_width': 0.02, 'leg': leg}
    front, side = leg.split('_')
    mappings['front_sign'] = front_signs[front]
    mappings['side_sign'] = side_signs[side]
    mappings['lower_limit'] = lower_limits[side]
    mappings['upper_limit'] = upper_limits[side]

    for param in params:
        try:
            d = float(param['d'])
        except ValueError:
            d = 0.0
        try:
            th = float(param['th'])
        except ValueError:
            th = 0.0
        try:
            a = float(param['a'])
        except ValueError:
            a = 0.0
        try:
            al = float(param['al'])
        except ValueError:
            al = 0.0

        tz = translation_matrix((0, 0, d))
        rz = rotation_matrix(th, zaxis)
        tx = translation_matrix((a, 0, 0))
        rx = rotation_matrix(al, xaxis)

        matrix = concatenate_matrices(tz, rz, tx, rx)

        rpy = euler_from_matrix(matrix)
        xyz = translation_from_matrix(matrix)

        name = param['joint']

        mappings[f'{name}_j_xyz'] = '{} {} {}'.format(*xyz)
        mappings[f'{name}_j_rpy'] = '{} {} {}'.format(*rpy)
        mappings[f'{name}_l_xyz'] = '{} 0 0'.format(xyz[0] / 2.)
        mappings[f'{name}_l_rpy'] = '0 0 0'
        mappings[f'{name}_l_len'] = str(a)

    return leg_template.substitute(mappings)
示例#14
0
 def build_joint(index, parent_index):
     matrix = model['bone_original_matrix'][index]
     x, y, z = tf.translation_from_matrix(matrix.T)
     bone_pool.append(
         pmx.Bone(name=model['bone_name'][index],
                  english_name=model['bone_name'][index],
                  position=common.Vector3(-x, y, -z),
                  parent_index=parent_index,
                  layer=0,
                  flag=0))
     bone_pool[-1].setFlag(pmx.BONEFLAG_CAN_ROTATE, True)
     bone_pool[-1].setFlag(pmx.BONEFLAG_IS_VISIBLE, True)
     bone_pool[-1].setFlag(pmx.BONEFLAG_CAN_MANIPULATE, True)
示例#15
0
def matrix_to_pose(matrix):
    pose = Pose()
    trans = tf.translation_from_matrix(matrix)
    quat = tf.quaternion_from_matrix(matrix)

    pose.position.x = trans[0]
    pose.position.y = trans[1]
    pose.position.z = trans[2]
    pose.orientation.w = quat[0]
    pose.orientation.x = quat[1]
    pose.orientation.y = quat[2]
    pose.orientation.z = quat[3]

    return pose
示例#16
0
def errors(reference, target):
	if len(reference) != len(target):
		raise ValueError("reference and target should be of same length! len(reference): %d, len(target): %d" % (len(reference), len(target)) )

	F_ref = to_homogeneous(reference)
	F_tar = to_homogeneous(target)

	F_ref_inv = inverse_matrix(F_ref)

	T_tar_ref = concatenate_matrices(F_ref_inv, F_tar)

	trans = translation_from_matrix(T_tar_ref)
	ang, foo, bar = rotation_from_matrix(T_tar_ref)

	return (numpy.sum( numpy.square(trans) ), ang*ang)
def scale_and_translate(Tlistgroundtruth, Tlistartoolkit):
    ground_truth_pointcloud = np.asarray([utils.apply_transform(T, np.zeros(3))
                                          for T in Tlistgroundtruth])
    artoolkit_pointcloud = np.asarray([utils.apply_transform(T, np.zeros(3))
                                    for T in Tlistartoolkit])
    Tscale = tf.affine_matrix_from_points(artoolkit_pointcloud.T,
                                          ground_truth_pointcloud.T,
                                          shear=False,
                                          scale=True)
    #print("Scaling by {0}".format(tf.scale_from_matrix(Tscale)[0]))
    try:
        print("translation by {0}".format(tf.translation_from_matrix(Tscale)))
        print("rotation by {0}".format(tf.rotation_from_matrix(Tscale)[0]))
    except ValueError:
        pass
    Tlistartoolkit = [tf.concatenate_matrices(Tscale, T) for T in Tlistartoolkit]
    return Tlistartoolkit
示例#18
0
    def publish_transform(src: str, dst: str, pose: np.ndarray):
        trans = tf.translation_from_matrix(pose)
        rot = tf.quaternion_from_matrix(pose)

        transform = TransformStamped()
        transform.header.stamp = header_stamp(clock.now())
        transform.header.frame_id = src
        transform.child_frame_id = dst
        transform.transform.translation.x = trans[0]
        transform.transform.translation.y = trans[1]
        transform.transform.translation.z = trans[2]
        transform.transform.rotation.w = rot[0]
        transform.transform.rotation.x = rot[1]
        transform.transform.rotation.y = rot[2]
        transform.transform.rotation.z = rot[3]

        msg = TFMessage(transforms=[transform])
        broadcaster.publish(msg)
示例#19
0
def from_homogeneous(M,n):
	if n != 7 and n != 3:
		raise ValueError("Only poses with 3 or 7 elements supported! this one has %d" % n)

	trans = translation_from_matrix(M)
	if n == 7:
		quat = quaternion_from_matrix(M)
		out = list(trans)
		out.extend(quaternion_imag(quat))
		out.append(quaternion_real(quat))
	else:
		ang, foo, bar = rotation_from_matrix(M)
		out = list(trans[0:2])
		out.append(ang)

	if len(out) != n:
		raise ValueError("Wrong output length: %d should be %d" %(len(out), n))

	return out
示例#20
0
    def inframe(self, pose, frame):
        """ Transform a pose from one frame to another one.

        Uses transformation matrices. Could be refactored to use directly
        quaternions.
        """
        pose = self.get(pose)

        if pose["frame"] == frame:
            return pose

        if pose['frame'] == "map":
            orig = numpy.identity(4)
        else:
            orig = self._to_mat4(self.get(pose["frame"]))

        if frame == "map":
            dest = numpy.identity(4)
        else:
            dest = numpy.linalg.inv(self._to_mat4(self.get(frame)))

        pose_matrix = self._to_mat4(pose)

        transf = numpy.dot(dest, orig)
        transformedPose = numpy.dot(transf, pose_matrix)

        qx, qy, qz, qw = transformations.quaternion_from_matrix(
            transformedPose)
        x, y, z = transformations.translation_from_matrix(transformedPose)

        return {
            "x": float(x),
            "y": float(y),
            "z": float(z),
            "qx": float(qx),
            "qy": float(qy),
            "qz": float(qz),
            "qw": float(qw),
            "frame": frame
        }
示例#21
0
    def calc_legs(self, pose: ndarray, base_pose: ndarray,
                  left_side: bool) -> Dict[str, float]:
        if len(self.base_to_hip) == 0:
            self.init_transforms()
        names = []
        positions = []

        legs_positions = self.triangle(pose, left_side)
        legs = ['l1', 'r2', 'l3'] if left_side else ['r1', 'l2', 'r3']

        for leg, position in zip(legs, legs_positions):
            base_to_hip = self.base_to_hip[leg]
            relative = tf.concatenate_matrices(tf.inverse_matrix(base_to_hip),
                                               tf.inverse_matrix(base_pose),
                                               position)
            point = tf.translation_from_matrix(relative)
            a, b, g = self.inverse_kinematics(*point)
            positions += [a, b, g]
            names += [
                f'coxa_joint_{leg}', f'femur_joint_{leg}', f'tibia_joint_{leg}'
            ]

        return {name: position for name, position in zip(names, positions)}
示例#22
0
def process_markers(markers):
    global listener, br, marker_trans_mat

    sum_mat = None
    num = 0
    t_latest = rospy.Time(0)
    for marker in markers.markers:
        print("Seeing marker %s" % marker.id)
        marker_link = '/Marker' + str(marker.id) + "__link"
        marker_id = "/Marker" + str(marker.id)
        tw = listener.getLatestCommonTime(marker_link, 'world')
        to = listener.getLatestCommonTime(marker_id, 'odom')
        t = tw if tw > to else to
        t_latest = t if t > t_latest else t_latest
        (trans_w, rot_w) = listener.lookupTransform(marker_link, 'world', tw)
        (trans_o, rot_o) = listener.lookupTransform(marker_id, 'odom', to)

        w_mat = numpy.dot(tr.translation_matrix(trans_w),
                          tr.quaternion_matrix(rot_w))
        t_o = tr.concatenate_matrices(tr.translation_matrix(trans_o),
                                      tr.quaternion_matrix(rot_o))
        # Need to do the transform equivalent to 0.25 0 0 0 0.5 -0.5 -0.5 0.5 on marker_id
        #t_o = numpy.dot(t_o, marker_trans_mat);
        o_mat_i = tr.inverse_matrix(t_o)
        mat3 = numpy.dot(w_mat, o_mat_i)
        mat3 = numpy.inverse_matrix(mat3)
        if sum_mat is None:
            sum_mat = mat3
        else:
            sum_mat = sum_mat + mat3
        num = num + 1

    avg_mat = sum_mat / num

    trans = tr.translation_from_matrix(avg_mat)
    rot = tr.quaternion_from_matrix(avg_mat)
    br.sendTransform(trans, rot, t_latest, "odom", "map")
def marker_hover(vehicle, marker_tracker, rs=None, hover_alt=None, debug=0):

    if hover_alt is None:
        hover_alt = vehicle.location.global_relative_frame.alt

    # Set the PID setpoint to the desired hover altitude
    # Negative because of NED coord. system
    PID_Z.setpoint = -hover_alt

    # Queue to contain running average for Aruco markers
    pose_queue = np.zeros((RUNNING_AVG_LENGTH, 3), dtype=float)
    count = 0

    if debug > 0:
        vehicle_pose_file = file_utils.open_file(VEHICLE_POSE_FILE)

    # Hover until manual override
    start_time = time.time()
    time_found = time.time()
    print("Tracking marker...")

    while dronekit_utils.is_guided(vehicle):

        # Track marker
        marker_tracker.track_marker(
            alt=vehicle.location.global_relative_frame.alt)

        if marker_tracker.is_marker_found():

            # Note the most recent time marker was found
            time_found = time.time()

            # Get the marker pose relative to the camera origin
            marker_pose = marker_tracker.get_pose()

            # Transform the marker pose into UAV body frame (NED)
            marker_pose_body_ref = marker_ref_to_body_ref(
                marker_pose, marker_tracker, vehicle)

            # PID control; input is the UAV pose relative to the marker (hence the negatives)
            control_pose = np.array([[
                PID_X(-marker_pose_body_ref[0]),
                PID_Y(-marker_pose_body_ref[1]),
                PID_Z(-marker_pose_body_ref[2])
            ]])

            # Keep a running average if using Aruco markers
            # Yellow marker processing is too slow to use a running average
            if marker_tracker.get_marker_type() == "aruco":

                pose_queue[count % len(pose_queue)] = control_pose
                count += 1

                # Take the average
                if count < RUNNING_AVG_LENGTH - 1:
                    pose_avg = np.sum(pose_queue, axis=0) / count
                else:
                    pose_avg = np.average(pose_queue, axis=0)
            else:

                # Yellow marker
                pose_avg = control_pose[0]

            # Approach the marker at the current altitude until above the marker, then navigate to the hover altitude.
            if np.abs(pose_avg[0]) < HOVER_THRESHOLD and np.abs(
                    pose_avg[1]) < HOVER_THRESHOLD:
                #print("Marker Hover")
                alt_hover(vehicle, pose_avg)
            else:
                #print("Approaching Marker")
                marker_approach(vehicle, pose_avg)

            if debug > 0 and rs is not None:
                data = rs.read()
                rs_pose = tf.quaternion_matrix([
                    data.rotation.w, data.rotation.x, data.rotation.y,
                    data.rotation.z
                ])

                rs_pose[0][3] = data.translation.x
                rs_pose[1][3] = data.translation.y
                rs_pose[2][3] = data.translation.z
                vehicle_pose = realsense_localization.rs_to_body(rs_pose)
                vehicle_trans = np.array(
                    tf.translation_from_matrix(vehicle_pose))

                vehicle_pose_file.write("{} {} {} {} 0 0 0 0\n".format(
                    time.time() - start_time, vehicle_trans[0],
                    vehicle_trans[1], vehicle_trans[2]))

            if debug > 2:
                print("Nav command: {} {} {}".format(pose_avg[0], pose_avg[1],
                                                     pose_avg[2]))
        else:
            if time.time() - time_found > 5:
                return False

        # Maintain frequency of sending commands
        marker_tracker.wait()
                    H_body_camera[1][3] = body_offset_y
                    H_body_camera[2][3] = body_offset_z
                    H_camera_body = np.linalg.inv(H_body_camera)
                    H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))

                # Realign heading to face north using initial compass data
                if compass_enabled == 1:
                    H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))

                # Show debug messages here
                if debug_enable == 1:
                    os.system('clear') # This helps in displaying the messages to be more readable
                    progress("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
                    progress("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
                    progress("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
                    progress("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))

except Exception as e:
    progress(e)

except:
    send_msg_to_gcs('ERROR IN SCRIPT')  
    progress("Unexpected error: %s" % sys.exc_info()[0])

finally:
    progress('Closing the script...')
    # start a timer in case stopping everything nicely doesn't work.
    signal.setitimer(signal.ITIMER_REAL, 5)  # seconds...
    pipe.stop()
    mavlink_thread_should_exit = True
    mavlink_thread.join()
示例#25
0
def matrix_dist(a: Matrix, b: Matrix):
    return dist(tf.translation_from_matrix(a), tf.translation_from_matrix(b))
    def __init__(self, cfg, base_dir, sequences, frames=None):
        self.cfg = cfg
        self.sequences = sequences
        self.curr_batch_sequence = 0
        self.current_batch = 0

        if (self.cfg.bidir_aug == True) and (self.cfg.batch_size % 2 != 0):
            raise ValueError("Batch size must be even")

        if self.cfg.data_type != "cam" and self.cfg.data_type != "lidar":
            raise ValueError("lidar or camera for data type!")

        frames_data_type = np.uint8
        if self.cfg.data_type == "lidar":
            frames_data_type = np.float16

        if not frames:
            frames = [None] * len(sequences)

        self.input_frames = {}
        self.poses = {}

        # Mirror in y-z plane
        self.H = np.identity(3, dtype=np.float32)
        self.H[1][1] = -1.0

        self.se3_ground_truth = {}
        self.se3_mirror_ground_truth = {}
        self.fc_ground_truth = {}
        self.fc_reverse_ground_truth = {}
        self.fc_mirror_ground_truth = {}
        self.fc_reverse_mirror_ground_truth = {}

        self.imu_measurements = {}
        self.imu_measurements_mirror = {}
        self.imu_measurements_reverse = {}
        self.imu_measurements_reverse_mirror = {}

        # This keeps track of the number of batches in each sequence
        self.batch_counts = {}
        # this holds the batch ids for each sequence. It is randomized and the order determines the order in
        # which the batches are used
        self.batch_order = {}
        # this keeps track of the starting offsets in the input frames for batch id 0 in each sequence
        self.batch_offsets = {}
        # contains batch_cnt counters, each sequence is represented by it's index repeated for
        # however many batches are in that sequence
        self.sequence_ordering = None
        # contains batch_size counters. Keeps track of how many batches from each sequence
        # have already been used for the current epoch
        self.sequence_batch = {}

        self.batch_cnt = 0
        self.total_frames = 0

        for i_seq, seq in enumerate(sequences):
            seq_loader = DataLoader(self.cfg, base_dir, seq, frames=frames[i_seq])
            num_frames = seq_loader.get_num_frames()

            self.input_frames[seq] = np.zeros(
                    [num_frames, self.cfg.input_channels, self.cfg.input_height, self.cfg.input_width],
                    dtype=frames_data_type)
            self.poses[seq] = seq_loader.get_poses_in_corresponding_frame()
            self.se3_ground_truth[seq] = np.zeros([num_frames, 7], dtype=np.float32)
            self.se3_mirror_ground_truth[seq] = np.zeros([num_frames, 7], dtype=np.float32)
            self.fc_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_reverse_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_mirror_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_reverse_mirror_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)

            self.imu_measurements[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_mirror[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_reverse[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_reverse_mirror[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)

            for i_img in range(num_frames):
                if i_img % 100 == 0:
                    tools.printf("Loading sequence %s %.1f%% " % (seq, (i_img / num_frames) * 100))

                img = seq_loader.get_img(i_img)
                self.input_frames[seq][i_img, :] = img

                # now convert all the ground truth from 4x4 to xyz + quat, this is after the SE3 layer
                translation = transformations.translation_from_matrix(self.poses[seq][i_img])
                quat = transformations.quaternion_from_matrix(self.poses[seq][i_img])

                mirror_pose = np.identity(4, dtype=np.float32)
                mirror_pose[0:3, 0:3] = np.dot(self.H, np.dot(self.poses[seq][i_img][0:3, 0:3], self.H))
                mirror_pose[0:3, 3] = np.dot(self.H, translation)

                mirror_quat = transformations.quaternion_from_matrix(mirror_pose[0:3, 0:3])
                self.se3_ground_truth[seq][i_img] = np.concatenate([translation, quat])
                self.se3_mirror_ground_truth[seq][i_img] = np.concatenate([mirror_pose[0:3, 3], mirror_quat])

                # relative transformation labels
                if i_img + 1 < num_frames:
                    mirror_pose_next = np.identity(4, dtype=np.float32)
                    mirror_pose_next[0:3, 0:3] = np.dot(self.H, np.dot(self.poses[seq][i_img + 1][0:3, 0:3], self.H))
                    trans_next = transformations.translation_from_matrix(self.poses[seq][i_img + 1])
                    mirror_pose_next[0:3, 3] = np.dot(self.H, trans_next)

                    m_forward = np.dot(np.linalg.inv(self.poses[seq][i_img]), self.poses[seq][i_img + 1])
                    m_forward_mirror = np.dot(np.linalg.inv(mirror_pose), mirror_pose_next)
                    m_reverse = np.dot(np.linalg.inv(self.poses[seq][i_img + 1]), self.poses[seq][i_img])
                    m_reverse_mirror = np.dot(np.linalg.inv(mirror_pose_next), mirror_pose)

                    trans_forward = transformations.translation_from_matrix(m_forward)
                    ypr_forward = transformations.euler_from_matrix(m_forward, axes="rzyx")
                    trans_forward_mirror = transformations.translation_from_matrix(m_forward_mirror)
                    ypr_forward_mirror = transformations.euler_from_matrix(m_forward_mirror, axes="rzyx")
                    trans_reverse = transformations.translation_from_matrix(m_reverse)
                    ypr_reverse = transformations.euler_from_matrix(m_reverse, axes="rzyx")
                    trans_reverse_mirror = transformations.translation_from_matrix(m_reverse_mirror)
                    ypr_reverse_mirror = transformations.euler_from_matrix(m_reverse_mirror, axes="rzyx")

                    self.fc_ground_truth[seq][i_img] = np.concatenate([trans_forward, ypr_forward])
                    self.fc_mirror_ground_truth[seq][i_img] = np.concatenate([trans_forward_mirror, ypr_forward_mirror])
                    self.fc_reverse_ground_truth[seq][i_img] = np.concatenate([trans_reverse, ypr_reverse])
                    self.fc_reverse_mirror_ground_truth[seq][i_img] = np.concatenate(
                            [trans_reverse_mirror, ypr_reverse_mirror])

                    get_imu = seq_loader.get_averaged_imu_in_corresponding_frame
                    self.imu_measurements[seq][i_img] = get_imu(i_img, mirror=False, reverse=False)
                    self.imu_measurements_mirror[seq][i_img] = get_imu(i_img, mirror=True, reverse=False)
                    self.imu_measurements_reverse[seq][i_img] = get_imu(i_img, mirror=False, reverse=True)
                    self.imu_measurements_reverse_mirror[seq][i_img] = get_imu(i_img, mirror=True, reverse=True)

            # How many examples the sequence contains, were it to be processed using a batch size of 1
            sequence_examples = np.ceil((num_frames - self.cfg.timesteps) / self.cfg.sequence_stride).astype(
                    np.int32)
            # how many batches in the sequence, cutting off any extra
            if self.cfg.bidir_aug:
                self.batch_counts[seq] = np.floor(2 * sequence_examples / self.cfg.batch_size).astype(np.int32)
            else:
                self.batch_counts[seq] = np.floor(sequence_examples / self.cfg.batch_size).astype(np.int32)

            self.batch_cnt += self.batch_counts[seq].astype(np.int32)
            self.batch_order[seq] = np.arange(0, self.batch_counts[seq], dtype=np.uint32)
            self.batch_offsets[seq] = np.zeros([self.cfg.batch_size], dtype=np.uint32)
            self.sequence_batch[seq] = 0

            self.total_frames += num_frames

        tools.printf("All data loaded, batch_size=%d, timesteps=%d, num_batches=%d" % (
            self.cfg.batch_size, self.cfg.timesteps, self.batch_cnt))

        gc.collect()
        self.sequence_ordering = np.zeros([self.batch_cnt], dtype=np.uint16)
        self.set_batch_offsets()
        self.next_epoch(False)
示例#27
0
def translation_from_matrix(m):
    return tf.translation_from_matrix(m)
示例#28
0
def runVIO(veh, callback):
    global vehicle, body_offset_enabled, body_offset_x, body_offset_y, body_offset_z
    global compass_enabled, scale_calib_enable, scale_factor, camera_orientation, pipe, is_vehicle_connected
    global home_lat, home_lon, home_alt, auto_set_ekf_home_enable, debug_enable
    global H_aeroRef_T265Ref, H_T265body_aeroBody, data, current_confidence, H_aeroRef_aeroBody, heading_north_yaw
    global current_time
    print("runVIO() started")
    vehicle = veh
    is_vehicle_connected = True
    if body_offset_enabled == 1:
        print("INFO: Using camera position offset: Enabled, x y z is",
              body_offset_x, body_offset_y, body_offset_z)
    else:
        print("INFO: Using camera position offset: Disabled")

    if compass_enabled == 1:
        print(
            "INFO: Using compass: Enabled. Heading will be aligned to north.")
    else:
        print("INFO: Using compass: Disabled")

    if scale_calib_enable == True:
        print(
            "\nINFO: SCALE CALIBRATION PROCESS. DO NOT RUN DURING FLIGHT.\nINFO: TYPE IN NEW SCALE IN FLOATING POINT FORMAT\n"
        )
    else:
        if scale_factor == 1.0:
            print("INFO: Using default scale factor", scale_factor)
        else:
            print("INFO: Using scale factor", scale_factor)

    print("INFO: Using camera orientation", camera_orientation)

    # Transformation to convert different camera orientations to NED convention. Replace camera_orientation_default for your configuration.
    #   0: Forward, USB port to the right
    #   1: Downfacing, USB port to the right
    #   2: Forward, 45 degree tilted down
    # Important note for downfacing camera: you need to tilt the vehicle's nose up a little - not flat - before you run the script, otherwise the initial yaw will be randomized, read here for more details: https://github.com/IntelRealSense/librealsense/issues/4080. Tilt the vehicle to any other sides and the yaw might not be as stable.

    if camera_orientation == 0:
        # Forward, USB port to the right
        H_aeroRef_T265Ref = np.array([[0, 0, -1, 0], [1, 0, 0, 0],
                                      [0, -1, 0, 0], [0, 0, 0, 1]])
        H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
    elif camera_orientation == 1:
        # Downfacing, USB port to the right
        H_aeroRef_T265Ref = np.array([[0, 0, -1, 0], [1, 0, 0, 0],
                                      [0, -1, 0, 0], [0, 0, 0, 1]])
        H_T265body_aeroBody = np.array([[0, 1, 0, 0], [1, 0, 0, 0],
                                        [0, 0, -1, 0], [0, 0, 0, 1]])
    elif camera_orientation == 2:
        # 45degree forward
        H_aeroRef_T265Ref = np.array([[0, 0, -1, 0], [1, 0, 0, 0],
                                      [0, -1, 0, 0], [0, 0, 0, 1]])
        H_T265body_aeroBody = np.array([[0., 1., 0., 0.],
                                        [-0.70710676, -0., -0.70710676, -0.],
                                        [-0.70710676, 0., 0.70710676, 0.],
                                        [0., 0., 0., 1.]])
    else:
        # Default is facing forward, USB port to the right
        H_aeroRef_T265Ref = np.array([[0, 0, -1, 0], [1, 0, 0, 0],
                                      [0, -1, 0, 0], [0, 0, 0, 1]])
        H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)

    if auto_set_ekf_home_enable == False:
        print("INFO: Automatically set EKF home: DISABLED")
    else:
        print("INFO: Automatically set EKF home: Enabled")

    if not debug_enable:
        debug_enable = 0
    else:
        debug_enable = 1
        np.set_printoptions(precision=4,
                            suppress=True)  # Format output on terminal
        print("INFO: Debug messages enabled.")

    #######################################
    # Main code starts here
    #######################################

    print("INFO: Connecting to Realsense camera.")
    realsense_connect()
    print("INFO: Realsense connected.")

    # Listen to the mavlink messages that will be used as trigger to set EKF home automatically
    if auto_set_ekf_home_enable == True:
        vehicle.add_message_listener('STATUSTEXT', statustext_callback)

    if compass_enabled == 1:
        # Listen to the attitude data in aeronautical frame
        vehicle.add_message_listener('ATTITUDE', att_msg_callback)

    data = None
    current_confidence = None
    H_aeroRef_aeroBody = None
    heading_north_yaw = None

    # Send MAVlink messages in the background
    sched = BackgroundScheduler()

    sched.add_job(send_vision_position_message,
                  'interval',
                  seconds=1 / vision_msg_hz)
    sched.add_job(send_confidence_level_dummy_message,
                  'interval',
                  seconds=1 / confidence_msg_hz)

    # For scale calibration, we will use a thread to monitor user input
    if scale_calib_enable == True:
        scale_update_thread = threading.Thread(target=scale_update)
        scale_update_thread.daemon = True
        scale_update_thread.start()

    sched.start()

    if compass_enabled == 1:
        # Wait a short while for yaw to be correctly initiated
        time.sleep(1)

    print("INFO: Sending VISION_POSITION_ESTIMATE messages to FCU.")
    callback()
    try:
        while True:
            # Wait for the next set of frames from the camera
            frames = pipe.wait_for_frames()
            # Fetch pose frame
            pose = frames.get_pose_frame()
            if pose:
                # Store the timestamp for MAVLink messages
                current_time = int(round(time.time() * 1000000))

                # Pose data consists of translation and rotation
                data = pose.get_pose_data()

                # In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
                H_T265Ref_T265body = tf.quaternion_matrix([
                    data.rotation.w, data.rotation.x, data.rotation.y,
                    data.rotation.z
                ])
                H_T265Ref_T265body[0][3] = data.translation.x * scale_factor
                H_T265Ref_T265body[1][3] = data.translation.y * scale_factor
                H_T265Ref_T265body[2][3] = data.translation.z * scale_factor

                # Transform to aeronautic coordinates (body AND reference frame!)
                H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot(
                    H_T265Ref_T265body.dot(H_T265body_aeroBody))

                # Take offsets from body's center of gravity (or IMU) to camera's origin into account
                if body_offset_enabled == 1:
                    H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
                    H_body_camera[0][3] = body_offset_x
                    H_body_camera[1][3] = body_offset_y
                    H_body_camera[2][3] = body_offset_z
                    H_camera_body = np.linalg.inv(H_body_camera)
                    H_aeroRef_aeroBody = H_body_camera.dot(
                        H_aeroRef_aeroBody.dot(H_camera_body))

                # Realign heading to face north using initial compass data
                if compass_enabled == 1:
                    H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot(
                        tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))

                # Show debug messages here
                if debug_enable == 1:
                    os.system(
                        'clear'
                    )  # This helps in displaying the messages to be more readable
                    print("DEBUG: Raw RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_T265Ref_T265body, 'sxyz')) *
                        180 / m.pi))
                    print("DEBUG: NED RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz')) *
                        180 / m.pi))
                    print("DEBUG: Raw pos xyz : {}".format(
                        np.array([
                            data.translation.x, data.translation.y,
                            data.translation.z
                        ])))
                    print("DEBUG: NED pos xyz : {}".format(
                        np.array(
                            tf.translation_from_matrix(H_aeroRef_aeroBody))))

    except KeyboardInterrupt:
        print("T265: INFO: KeyboardInterrupt has been caught. Cleaning up...")

    finally:
        pipe.stop()
        print("INFO: Realsense pipeline and vehicle object closed.")
        sys.exit()
示例#29
0
	def position(self):
		'''
		@brief Extract branch origin position from the transformation matrix
		@return a 3D position vector
		'''
		return tr.translation_from_matrix(self.frame)
def marker_hover(vehicle, marker_tracker, rs=None):

    pid_x = PID(1, 0, 0, setpoint=0)
    pid_y = PID(1, 0, 0, setpoint=0)
    pid_z = PID(1, 0, 0, setpoint=-0.5)

    pid_x.sample_time = 0.01
    pid_y.sample_time = 0.01
    pid_z.sample_time = 0.01

    # Hover until manual override
    pose_queue = np.zeros((RUNNING_AVG_LENGTH, 3), dtype=float)
    count = 0

    if args["debug"] > 0:
        vehicle_pose_file = file_utils.open_file(VEHICLE_POSE_FILE)

    start_time = time.time()
    print("Tracking marker...")
    while True:
        # while vehicle.mode == VehicleMode("GUIDED"):

        # Track marker
        marker_tracker.track_marker(
            alt=vehicle.location.global_relative_frame.alt)

        if marker_tracker.is_marker_found():

            marker_pose = marker_tracker.get_pose()

            # Transform pose to UAV body frame; proportional gain
            marker_pose_body_ref = marker_ref_to_body_ref(marker_pose)

            # Pid response, input is UAV rel. to marker
            control_pose = np.array([[
                pid_x(-marker_pose_body_ref[0]),
                pid_y(-marker_pose_body_ref[1]),
                pid_z(-marker_pose_body_ref[2])
            ]])

            # Keep a queue of recent marker poses
            pose_queue[count % len(pose_queue)] = control_pose
            count += 1

            if count < RUNNING_AVG_LENGTH - 1:
                pose_avg = np.sum(pose_queue, axis=0) / count
            else:
                pose_avg = np.average(pose_queue, axis=0)

            # Send position command to the vehicle
            dronekit_utils.goto_position_target_body_offset_ned(
                vehicle,
                forward=pose_avg[0],
                right=pose_avg[1],
                down=-HOVER_ALTITUDE - pose_avg[2])

            if args["debug"] > 0 and rs is not None:
                data = rs.read()
                rs_pose = tf.quaternion_matrix([
                    data.rotation.w, data.rotation.x, data.rotation.y,
                    data.rotation.z
                ])

                rs_pose[0][3] = data.translation.x
                rs_pose[1][3] = data.translation.y
                rs_pose[2][3] = data.translation.z
                vehicle_pose = realsense_localization.rs_to_body(rs_pose)
                vehicle_trans = np.array(
                    tf.translation_from_matrix(vehicle_pose))

                vehicle_pose_file.write("{} {} {} {} 0 0 0 0\n".format(
                    time.time() - start_time, vehicle_trans[0],
                    vehicle_trans[1], vehicle_trans[2]))

            if args["debug"] > 2:
                print("Nav command: {} {} {}".format(pose_avg[0], pose_avg[1],
                                                     pose_avg[2]))

        # Maintain frequency of sending commands
        marker_tracker.wait()
示例#31
0
def saveiqe(model, filename):
    model['bone_translate'] = []
    model['bone_rotation'] = []
    bone_count = len(model['bone_parent'])
    for i in range(bone_count):
        matrix = tf.identity_matrix()
        parent_node = model['bone_parent'][i]
        if parent_node >= 0:
            matrix = model['bone_original_matrix'][parent_node]
        matrix = np.dot(model['bone_original_matrix'][i],
                        np.linalg.inv(matrix))
        model['bone_translate'].append(tf.translation_from_matrix(matrix.T))
        model['bone_rotation'].append(tf.quaternion_from_matrix(matrix.T))
    with open(filename + '.iqe ', 'w') as f:
        f.write('# Inter-Quake Export\n')
        f.write('\n')
        parent_child_dict = {}
        old2new = {}
        index_pool = [-1]
        for i in range(len(model['bone_parent'])):
            p = model['bone_parent'][i]
            if p not in parent_child_dict:
                parent_child_dict[p] = []
            parent_child_dict[p].append(i)

        def print_joint(index, parent_index):
            f.write('joint "{}" {}\n'.format(model['bone_name'][index],
                                             parent_index))
            x, y, z = model['bone_translate'][index]
            r, i, j, k = model['bone_rotation'][index]
            f.write('pq {} {} {} {} {} {} {}\n'.format(-x, y, z, i, -j, -k, r))

        def deep_first_search(index, index_pool, parent_index):
            index_pool[0] += 1
            current_node_index = index_pool[0]
            old2new[index] = current_node_index
            print_joint(index, parent_index)
            if index in parent_child_dict:
                for child in parent_child_dict[index]:
                    deep_first_search(child, index_pool, current_node_index)

        deep_first_search(model['bone_parent'].index(-1), index_pool, -1)

        f.write('\n')

        mesh_vertex_counter = 0
        mesh_face_counter = 0

        for mesh_i in range(len(model['mesh'])):

            mesh_vertex_counter_end = mesh_vertex_counter + model['mesh'][
                mesh_i][0]
            mesh_face_counter_end = mesh_face_counter + model['mesh'][mesh_i][1]

            f.write('mesh mesh{}\n'.format(mesh_i))
            f.write('material "mesh{}Mat"\n'.format(mesh_i))
            f.write('\n')

            for i in range(mesh_vertex_counter, mesh_vertex_counter_end):
                x, y, z = model['position'][i]
                f.write('vp {} {} {}\n'.format(-x, y, z))
            f.write('\n')

            for i in range(mesh_vertex_counter, mesh_vertex_counter_end):
                x, y, z = model['normal'][i]
                f.write('vn {} {} {}\n'.format(-x, y, z))
            f.write('\n')

            for i in range(mesh_vertex_counter, mesh_vertex_counter_end):
                u, v = model['uv'][i]
                f.write('vt {} {}\n'.format(u, 1 - v))
            f.write('\n')

            for i in range(mesh_vertex_counter, mesh_vertex_counter_end):
                f.write('vb')
                for j in range(4):
                    v = model['vertex_joint'][i][j]
                    if v == 255:
                        break
                    v = old2new[v]
                    w = model['vertex_joint_weight'][i][j]
                    f.write(' {} {}'.format(v, w))
                f.write('\n')
            f.write('\n')

            for i in range(mesh_face_counter, mesh_face_counter_end):
                v1, v2, v3 = model['face'][i]
                v1 -= mesh_vertex_counter
                v2 -= mesh_vertex_counter
                v3 -= mesh_vertex_counter
                f.write('fm {} {} {}\n'.format(v3, v1, v2))
            f.write('\n')

            mesh_vertex_counter = mesh_vertex_counter_end
            mesh_face_counter = mesh_face_counter_end
示例#32
0
    def __init__(self, config, base_dir, sequences, frames=None):
        self.truncated_seq_sizes = []
        self.end_of_sequence_indices = []
        self.curr_batch_idx = 0
        self.unused_batch_indices = []
        self.cfg = config

        if not frames:
            frames = [None] * len(sequences)

        total_num_examples = 0

        for i_seq, seq in enumerate(sequences):
            seq_data = pykitti.odometry(base_dir, seq, frames=frames[i_seq])
            num_frames = len(seq_data.poses)

            # less than timesteps number of frames will be discarded
            num_examples = (num_frames - 1) // self.cfg.timesteps
            self.truncated_seq_sizes.append(num_examples * self.cfg.timesteps + 1)
            total_num_examples += num_examples

        # less than batch size number of examples will be discarded
        self.total_batch_count = total_num_examples // self.cfg.batch_size
        # +1 adjusts for the extra image in the last time step
        total_timesteps = self.total_batch_count * (self.cfg.timesteps + 1)

        # since some examples will be discarded, readjust the truncated_seq_sizes
        deleted_frames = (total_num_examples - self.total_batch_count * self.cfg.batch_size) * self.cfg.timesteps
        for i in range(len(self.truncated_seq_sizes) - 1, -1, -1):
            if self.truncated_seq_sizes[i] > deleted_frames:
                self.truncated_seq_sizes[i] -= deleted_frames
                break
            else:
                self.truncated_seq_sizes[i] = 0
                deleted_frames -= self.truncated_seq_sizes[i]

        # for storing all training
        self.input_frames = np.zeros(
            [total_timesteps, self.cfg.batch_size, self.cfg.input_channels, self.cfg.input_height,
             self.cfg.input_width],
            dtype=np.uint8)
        poses_wrt_g = np.zeros([total_timesteps, self.cfg.batch_size, 4, 4], dtype=np.float32)  # ground truth poses

        num_image_loaded = 0
        for i_seq, seq in enumerate(sequences):
            seq_data = pykitti.odometry(base_dir, seq, frames=frames[i_seq])
            length = self.truncated_seq_sizes[i_seq]

            i = -1
            j = -1
            for i_img in range(length):

                if i_img % 100 == 0:
                    tools.printf("Loading sequence %s %.1f%% " % (seq, (i_img / length) * 100))

                i = num_image_loaded % total_timesteps
                j = num_image_loaded // total_timesteps

                # swap axis to channels first
                img = seq_data.get_cam2(i_img)
                img = img.resize((self.cfg.input_width, self.cfg.input_height))
                img = np.array(img)
                if img.shape[2] == 3:  # convert to bgr if colored image
                    img = img[..., [2, 1, 0]]
                img = np.reshape(img, [img.shape[0], img.shape[1], self.cfg.input_channels])
                img = np.moveaxis(np.array(img), 2, 0)
                pose = seq_data.poses[i_img]

                self.input_frames[i, j] = img
                poses_wrt_g[i, j] = pose
                num_image_loaded += 1

                # insert the extra image at the end of the batch, note the number of
                # frames per batch of batch size 1 is timesteps + 1
                if i_img != 0 and i_img != length - 1 and i_img % self.cfg.timesteps == 0:
                    i = num_image_loaded % total_timesteps
                    j = num_image_loaded // total_timesteps
                    self.input_frames[i, j] = img
                    poses_wrt_g[i, j] = pose

                    num_image_loaded += 1

            # If the batch has the last frame in a sequence, the following frame
            # in the next batch must have a reset for lstm state
            self.end_of_sequence_indices.append((i + 1, j,))

        # make sure the all of examples are fully loaded, just to detect bugs
        assert (num_image_loaded == total_timesteps * self.cfg.batch_size)

        # now convert all the ground truth from 4x4 to xyz + quat, this is after the SE3 layer
        self.se3_ground_truth = np.zeros([total_timesteps, self.cfg.batch_size, 7], dtype=np.float32)
        for i in range(0, self.se3_ground_truth.shape[0]):
            for j in range(0, self.se3_ground_truth.shape[1]):
                translation = transformations.translation_from_matrix(poses_wrt_g[i, j])
                quat = transformations.quaternion_from_matrix(poses_wrt_g[i, j])
                self.se3_ground_truth[i, j] = np.concatenate([translation, quat])

        # extract the relative transformation between frames after the fully connected layer
        self.fc_ground_truth = np.zeros([total_timesteps, self.cfg.batch_size, 6], dtype=np.float32)
        # going through rows, then columns
        for i in range(0, self.fc_ground_truth.shape[0]):
            for j in range(0, self.fc_ground_truth.shape[1]):

                # always identity at the beginning of the sequence
                if i % (self.cfg.timesteps + 1) == 0:
                    m = transformations.identity_matrix()
                else:
                    m = np.dot(np.linalg.inv(poses_wrt_g[i - 1, j]), poses_wrt_g[i, j])  # double check

                translation = transformations.translation_from_matrix(m)
                ypr = transformations.euler_from_matrix(m, axes="rzyx")
                assert (np.all(np.abs(ypr) < np.pi))
                self.fc_ground_truth[i, j] = np.concatenate([translation, ypr])  # double check

        tools.printf("All data loaded, batch_size=%d, timesteps=%d, num_batches=%d" % (
            self.cfg.batch_size, self.cfg.timesteps, self.total_batch_count))

        self.next_epoch()
        gc.collect()  # force garbage collection
示例#33
0
def localize(rs, sched=None):

    # Global variables are usually not good style, but are required here (for now)
    global vehicle, H0_2, data, current_time

    # Listen to the mavlink messages that will be used as trigger to set the EKF home automatically
    # The Pixhawk will not allow arming if the EKF and Home origins have not been set. Once data is
    # being received from the Realsense, this callback will set the origins.
    # NOTE: There has been some trouble with this; sometimes it fails to set the origin. If this happens,
    # reboot the Pixhawk until the issue resolves itself (or find a better solution!)
    vehicle.add_message_listener('STATUSTEXT', statustext_callback)

    if compass_enabled == 1:

        # Listen to the attitude data in aeronautical frame
        vehicle.add_message_listener('ATTITUDE', att_msg_callback)

    # Send MAVlink messages in the background
    if sched is None:
        sched = BackgroundScheduler()

    # Add message senders for vision pose message and confidence message
    sched.add_job(send_vision_position_message,
                  'interval',
                  seconds=1 / vision_msg_hz,
                  max_instances=5)
    sched.add_job(send_confidence_level_dummy_message,
                  'interval',
                  seconds=1 / confidence_msg_hz)

    # For scale calibration, we will use a thread to monitor user input
    # NOTE: not tested by VADL
    if scale_calib_enable:
        scale_update_thread = threading.Thread(target=scale_update)
        scale_update_thread.daemon = True
        scale_update_thread.start()

    # Start sending background messages to Mavlink
    sched.start()

    if compass_enabled == 1:
        # Wait a short while for yaw to be correctly initiated
        time.sleep(1)

    # This function is contained in its own thread and runs indefinitely.
    start_time = time.time()
    print("Sending VISION_POSITION_ESTIMATE messages to FCU.")
    try:
        while True:

            # Obtain pose from the Realsense
            data = rs.read()

            if data:

                # Store the timestamp for MAVLink messages
                current_time = int(round(time.time() * 1000000))

                # Extract pose info and format as a homogeneous matrix.
                # In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]
                H1_3 = tf.quaternion_matrix([
                    data.rotation.w, data.rotation.x, data.rotation.y,
                    data.rotation.z
                ])
                H1_3[0][3] = data.translation.x * scale_factor
                H1_3[1][3] = data.translation.y * scale_factor
                H1_3[2][3] = data.translation.z * scale_factor

                # Transform to NED coordinates (body AND reference frame!)
                H0_2 = rs_to_body(H1_3)

                # Account for offsets from body's center of gravity (or IMU) to camera's origin
                if body_offset_enabled == 1:
                    H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
                    H_body_camera[0][3] = body_offset_x
                    H_body_camera[1][3] = body_offset_y
                    H_body_camera[2][3] = body_offset_z
                    H_camera_body = np.linalg.inv(H_body_camera)

                    # Shift the NED pose by an offset
                    H0_2 = H_body_camera.dot(H0_2.dot(H_camera_body))

                # Record time
                timestamp = time.time() - start_time

                # Convert to NED & quaternion for data logging
                pose_ned = np.array(tf.translation_from_matrix(H0_2))

                # Write pose and accelerations to file
                # These logs follow the .tum format so they can be processed by
                # the EVO SLAM comparisons package:
                #
                # https://github.com/MichaelGrupp/evo
                rs_pose_file.write(
                    str(timestamp) + " " + str(pose_ned[0]) + " " +
                    str(pose_ned[1]) + " " + str(pose_ned[2]) + " " +
                    str(data.rotation.w) + " " + str(data.rotation.x) + " " +
                    str(data.rotation.y) + " " + str(data.rotation.z) + "\n")

                rs_accel_file.write(
                    str(timestamp) + " " + str(data.acceleration.x) + " " +
                    str(data.acceleration.y) + " " + str(data.acceleration.z) +
                    "\n")

                # Show debug messages here
                if debug_enable == 1:
                    os.system(
                        'clear'
                    )  # This helps in displaying the messages to be more readable

                    print("DEBUG: Raw RPY[deg]: {}".format(
                        np.array(tf.euler_from_matrix(H1_3, 'sxyz')) * 180 /
                        m.pi))

                    print("DEBUG: NED RPY[deg]: {}".format(
                        np.array(tf.euler_from_matrix(H0_2, 'sxyz')) * 180 /
                        m.pi))

                    print("DEBUG: Raw pos xyz : {}".format(
                        np.array([
                            data.translation.x, data.translation.y,
                            data.translation.z
                        ])))

                    print("DEBUG: NED pos xyz : {}".format(
                        np.array(tf.translation_from_matrix(H0_2))))

    except KeyboardInterrupt:
        print("EXCEPTION: KeyboardInterrupt has been caught. Cleaning up...")

    finally:

        # Clean up Realsense and Vehicle objects
        rs.stop()
        vehicle.close()

        print("Realsense pipeline and vehicle object closed.")
        sys.exit()
示例#34
0
 def _pack(T):
     eulxyz = tf.euler_from_matrix(T, axes='sxyz')
     trans = tf.translation_from_matrix(T)
     return np.hstack((trans, eulxyz))
                    progress("DEBUG: Raw RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_T265Ref_T265body, 'sxyz')) *
                        180 / m.pi))
                    progress("DEBUG: NED RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz')) *
                        180 / m.pi))
                    progress("DEBUG: Raw pos xyz : {}".format(
                        np.array([
                            data.translation.x, data.translation.y,
                            data.translation.z
                        ])))
                    progress("DEBUG: NED pos xyz : {}".format(
                        np.array(
                            tf.translation_from_matrix(H_aeroRef_aeroBody))))

        conn.home_location = LocationGlobal(home_lat, home_lon, home_alt)
        arm_and_takeoff(0)

except Exception as e:
    progress(e)

except:
    send_msg_to_gcs('ERROR IN SCRIPT')
    progress("Unexpected error: %s" % sys.exc_info()[0])

finally:
    progress('Closing the script...')
    # start a timer in case stopping everything nicely doesn't work.
    signal.setitimer(signal.ITIMER_REAL, 5)  # seconds...
def tf_to_pose_stamp(tf, id):
    q = transformations.quaternion_from_matrix(tf)
    t = transformations.translation_from_matrix(tf)
    return create_pose_stamp_msg(id, [t[0], t[1], t[2], q[0], q[1], q[2], q[3]])