Пример #1
0
def main():
    filenum = 1
    lidar_pitch = 9.5/180*np.pi
    
    filename = "/home/henry/Documents/projects/avl/Detection/ground/data/points_data_"+repr(filenum)+".txt"
    planes, pcds = read_points_data(filename)
    
    tf = Rotation(roll=0.0, pitch=lidar_pitch, yaw=0.0)
    for plane in planes:
        plane.rotate_around_axis(axis="y", angle=-lidar_pitch)
    for pcd in pcds:
        pcd.rotate(tf)
    
    
    pcd = pcds[0]
    plane = planes[1]
    # print(np.min(pcd.data[0,:]), np.max(pcd.data[0,:]))
    vis(plane, pcd, dim_2d=True)

    # the onlyfloor points
    # pcd = pcds[2]
    # plane = planes[2]
    # the reestimated plane
    sac = RANSAC(Plane3D, min_sample_num=3, threshold=0.1, iteration=50, method="MSAC")
    plane2, _, _, _ = sac.ransac(pcd.data.T)
    vis(plane2, pcd, dim_2d=True)
    plt.show()
Пример #2
0
class RoadEstimation:
    def __init__(self):
        # specify topic and data type
        self.sub_bbox_1 = rospy.Subscriber("camera1/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_bbox_6 = rospy.Subscriber("camera6/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_pcd = rospy.Subscriber("/points_raw", PointCloud2,
                                        self.pcd_callback)
        # self.sub_pose = rospy.Subscriber("/current_pose", PoseStamped, self.pose_callback)

        # Publisher
        self.pub_intersect_markers = rospy.Publisher(
            "/vision_objects_position_rviz", MarkerArray, queue_size=10)
        self.pub_plane_markers = rospy.Publisher("/estimated_plane_rviz",
                                                 MarkerArray,
                                                 queue_size=10)
        self.pub_plane = rospy.Publisher("/estimated_plane",
                                         Plane,
                                         queue_size=10)
        self.pub_pcd_inlier = rospy.Publisher("/points_inlier",
                                              PointCloud2,
                                              queue_size=10)
        self.pub_pcd_outlier = rospy.Publisher("/points_outlier",
                                               PointCloud2,
                                               queue_size=10)

        self.cam1 = camera_setup_1()
        self.cam6 = camera_setup_6()
        self.plane = Plane3D(-0.157, 0, 0.988, 1.9)
        self.plane_world = None
        self.plane_tracker = None
        self.sac = RANSAC(Plane3D,
                          min_sample_num=3,
                          threshold=0.22,
                          iteration=200,
                          method="MSAC")

        self.tf_listener = TransformListener()
        self.tfmr = Transformer()
        self.tf_ros = TransformerROS()

    def display_bboxes_in_world(self, cam, bboxes):
        vis_array = MarkerArray()
        xlist, ylist = [], []
        for box_id, bbox in enumerate(bboxes):
            d, C = cam.bounding_box_to_ray(bbox)
            intersection = self.plane.plane_ray_intersection(d, C)
            # print(intersection[0,0],'\t', intersection[1,0],'\t', intersection[2,0])
            marker = visualize_marker(intersection,
                                      mkr_id=box_id,
                                      scale=0.5,
                                      frame_id="velodyne",
                                      mkr_color=[0.0, 0.8, 0.0, 1.0])
            vis_array.markers.append(marker)
            xlist.append(intersection[0, 0])
            ylist.append(intersection[1, 0])

        self.pub_intersect_markers.publish(vis_array)

        # plt.pause(0.001)

    def bbox_array_callback(self, msg):
        if msg.header.frame_id == "camera1":
            cam = self.cam1
        elif msg.header.frame_id == "camera6":
            cam = self.cam6
        else:
            rospy.logwarn(
                "unrecognized frame id {}, bounding box callback in road estimation fail",
                msg.header.frame_id)
            return

        # rospy.loginfo("camera {:d} message received!!".format(camera.id))
        bboxes = []
        for obj in msg.objects:
            # rospy.loginfo("{}".format(obj.label))
            # rospy.loginfo("x:{} y:{} width:{} height:{} angle:{}".format(obj.x, obj.y, obj.width, obj.height, obj.angle))
            bbox = BoundingBox(obj.x,
                               obj.y,
                               obj.width,
                               obj.height,
                               obj.angle,
                               label=obj.label)
            bboxes.append(bbox)
        self.display_bboxes_in_world(cam, bboxes)

    def estimate_plane(self, pcd):
        # threshold_z = [2.0, -0.5]
        # pcd_z = clip_pcd_by_distance_plane(pcd, self.plane, threshold_z, in_only=True)
        vec1 = np.array([1, 0, 0])
        vec2 = np.array([0, 0, 1])
        pt1 = np.array([0, 0, 0])
        threshold = [-3.0, 6.0]
        plane_from_vec = Plane3D.create_plane_from_vectors_and_point(
            vec1, vec2, pt1)
        pcd_close = clip_pcd_by_distance_plane(pcd,
                                               plane_from_vec,
                                               threshold,
                                               in_only=True)

        pcd_close = pcd_close.extract_low()

        seed = 0
        np.random.seed(seed)
        plane1, _, _, _ = self.sac.ransac(pcd_close.data.T,
                                          constraint=self.plane.param,
                                          constraint_threshold=0.5)
        # vis(plane1, pcd, dim_2d=True)

        # normal = vec1.reshape([-1,1]) / np.linalg.norm(vec1)
        # depth = np.matmul(pcd_close.data.T , normal).reshape([-1])
        distance = plane1.distance_to_plane(pcd_close.data.T)
        # threshold_outer = 0.3
        # threshold_inner = 0.1
        # mask_outer = distance < threshold_outer
        # mask_inner = distance < threshold_inner
        # bin_dist = 5.0
        # depth_min = np.min(depth)
        # bin_num = int((np.max(depth) -  depth_min)/ bin_dist) + 1
        # for i in range(bin_num):
        #     depth_thred_min, depth_thred_max = i*bin_dist+depth_min, (i+1)*bin_dist+depth_min
        #     mask_depth = np.logical_and(depth > depth_thred_min, depth < depth_thred_max)
        #     part_inner = np.logical_and(mask_depth, mask_inner)
        #     part_outer = np.logical_and(mask_depth, mask_outer)
        #     sum_outer = np.sum(part_outer)
        #     sum_inner = np.sum(part_inner)
        #     if sum_outer == 0:
        #         ratio = 1
        #     else:
        #         ratio = float(sum_inner) / sum_outer
        #     if not ratio == 1:
        #         print(i, "{:.1f}".format(depth_thred_min), "{:.1f}".format(depth_thred_max), sum_inner, sum_outer, "{:.4f}".format(ratio))

        print('Plane params:', plane1.param.T)
        threshold_inlier = 0.15
        pcd_inlier = pcd_close.data[:, distance <= threshold_inlier]
        pcd_outlier = pcd_close.data[:, distance > threshold_inlier]
        return plane1, pcd_inlier, pcd_outlier

    def create_and_publish_plane_markers(self,
                                         plane,
                                         frame_id='velodyne',
                                         center=None,
                                         normal=None):
        if normal is None:
            v1 = np.array([[0, 0, 1.0]]).T
        else:
            v1 = normal
        v2 = np.array([[plane.a, plane.b, plane.c]]).T
        q_self = Quaternion_self.create_quaternion_from_vector_to_vector(
            v1, v2)
        q = Quaternion(q_self.x, q_self.y, q_self.z, q_self.w)
        euler = np.array(euler_from_quaternion(
            (q.x, q.y, q.z, q.w))) * 180 / np.pi
        print("Euler: ", euler)
        marker_array = MarkerArray()
        if center is None:
            center = [10, 0, (-plane.a * 10 - plane.d) / plane.c]
        marker = visualize_marker(center,
                                  frame_id=frame_id,
                                  mkr_type='cube',
                                  orientation=q,
                                  scale=[20, 10, 0.05],
                                  lifetime=30,
                                  mkr_color=[0.0, 0.8, 0.0, 0.8])
        marker_array.markers.append(marker)
        return marker_array

    # @profile # profiling for analysis
    def pcd_callback(self, msg):
        rospy.logwarn("Getting pcd at: %d.%09ds, (%d,%d)",
                      msg.header.stamp.secs, msg.header.stamp.nsecs,
                      msg.height, msg.width)

        pcd_original = pointcloud2_to_xyz_array(msg)

        pcd = PointCloud(pcd_original.T)

        self.plane, pcd_inlier, pcd_outlier = self.estimate_plane(pcd)
        transform_matrix, trans, rot, euler = get_transformation(
            frame_from='/velodyne',
            frame_to='/world',
            time_from=msg.header.stamp,
            time_to=msg.header.stamp,
            static_frame='/world',
            tf_listener=self.tf_listener,
            tf_ros=self.tf_ros)
        if not transform_matrix is None:
            plane_world_param = np.matmul(
                np.linalg.inv(transform_matrix).T,
                np.array(
                    [[self.plane.a, self.plane.b, self.plane.c,
                      self.plane.d]]).T)
            plane_world_param = plane_world_param / np.linalg.norm(
                plane_world_param[0:3])

            if self.plane_tracker is None:
                self.plane_tracker = Tracker(msg.header.stamp,
                                             plane_world_param)
            else:
                self.plane_tracker.predict(msg.header.stamp)
                self.plane_tracker.update(plane_world_param)
            print("plane_world:", plane_world_param.T)
            print("plane_traker:", self.plane_tracker.filter.x_post.T)

            # self.plane_world = Plane3D(plane_world_param[0,0], plane_world_param[1,0], plane_world_param[2,0], plane_world_param[3,0])
            self.plane_world = Plane3D(self.plane_tracker.filter.x_post[0, 0],
                                       self.plane_tracker.filter.x_post[1, 0],
                                       self.plane_tracker.filter.x_post[2, 0],
                                       self.plane_tracker.filter.x_post[3, 0])
            center_pos = np.matmul(
                transform_matrix,
                np.array([[
                    10, 0, (-self.plane.a * 10 - self.plane.d) / self.plane.c,
                    1
                ]]).T)
            center_pos = center_pos[0:3].flatten()
            # normal = np.matmul( transform_matrix, np.array([[0., 0., 1., 0.]]).T)
            # normal = normal[0:3]
            normal = None
            marker_array = self.create_and_publish_plane_markers(
                self.plane_world,
                frame_id='world',
                center=center_pos,
                normal=normal)
            self.pub_plane_markers.publish(marker_array)

        plane_msg = Plane()
        plane_msg.coef[0], plane_msg.coef[1], plane_msg.coef[
            2], plane_msg.coef[
                3] = self.plane.a, self.plane.b, self.plane.c, self.plane.d

        self.pub_plane.publish(plane_msg)

        # pcd_msg_inlier = create_point_cloud(pcd_inlier.T, frame_id='velodyne')
        # pcd_msg_outlier = create_point_cloud(pcd_outlier.T, frame_id='velodyne')
        pcd_msg_inlier = xyz_array_to_pointcloud2(pcd_inlier.T,
                                                  stamp=msg.header.stamp,
                                                  frame_id='velodyne')
        pcd_msg_outlier = xyz_array_to_pointcloud2(pcd_outlier.T,
                                                   stamp=msg.header.stamp,
                                                   frame_id='velodyne')
        self.pub_pcd_inlier.publish(pcd_msg_inlier)
        self.pub_pcd_outlier.publish(pcd_msg_outlier)

        rospy.logwarn("Finished plane estimation")

    def pose_callback(self, msg):
        rospy.logdebug("Getting pose at: %d.%09ds", msg.header.stamp.secs,
                       msg.header.stamp.nsecs)
class LandmarkClassifier(object):
    """Class for classifying landmarks
    """
    def __init__(self):
        #Subscribers
        self.image_sub = rospy.Subscriber('/ardrone/bottom/image_raw', Image,
                                          self.image_callback)
        self.sub_vicon = rospy.Subscriber('/vicon/ARDroneCarre/ARDroneCarre',
                                          TransformStamped,
                                          self.update_quadrotor_state)

        #Global variables
        self.image = Image
        self.bridge = CvBridge()
        self.quad_state = TransformStamped()
        self.T_body2vicon = np.identity(4)

        #wait for msg for initiating processing
        rospy.wait_for_message('/ardrone/bottom/image_raw', Image)

        #time flags for detecting when bag file is done publishing
        self.current_time = rospy.Time.now().to_sec()
        self.last_msg_time = rospy.Time.now().to_sec()

        #Define landmark locations
        self.landmarks = np.array([[4.32, 8.05], [0.874, 5.49], [7.68, 4.24],
                                   [4.27, 1.23]])
        #list of
        self.landmark_prev_save_rad = [
            float('inf'),
            float('inf'),
            float('inf'),
            float('inf')
        ]
        self.save_radius = 0.5

        #pose graph
        self.G = {}

        #load images for classification
        img1 = cv2.imread('cn_tower.png')
        img2 = cv2.imread('casa_loma.png')
        img3 = cv2.imread('nathan_philips_square.png')
        img4 = cv2.imread('princes_gates.png')
        self.landmark_ref_images = [img1, img2, img3, img4]
        self.landmark_names = [
            'cn_tower', 'casa_loma', 'nathan_philips_square', 'princes_gates'
        ]

        #Initialize ransac
        self.ransac = RANSAC()

    def image_callback(self, msg):
        """Callback for image topic
        """

        #store the current pose which will be attached to the current
        #camera frame
        current_quad_state = copy.deepcopy(self.quad_state)
        self.image = msg
        self.T_body2vicon[0, 3] = current_quad_state.transform.translation.x
        self.T_body2vicon[1, 3] = current_quad_state.transform.translation.y
        self.T_body2vicon[2, 3] = current_quad_state.transform.translation.z

        q = [
            current_quad_state.transform.rotation.x,
            current_quad_state.transform.rotation.y,
            current_quad_state.transform.rotation.z,
            current_quad_state.transform.rotation.w
        ]

        R = quaternion_matrix(q)

        self.T_body2vicon[0:3, 0:3] = R[0:3, 0:3]

        self.last_msg_time = rospy.Time.now().to_sec()

    def update_quadrotor_state(self, msg):
        """Callback for vicon topic
        """
        self.quad_state = msg

    def save_image(self, image, image_name):
        """Function for saving images to file
        """
        im_pil = IM.fromarray(image)
        b, g, r = im_pil.split()
        im_pil = IM.merge("RGB", (r, g, b))
        im_pil.save(image_name + '.png')

    def save_landmark_images(self):
        """Function that saves landmark images from project.bag, images
        that minimizes the distance between the drone and landmarks are
        saved
        """
        #run ros loop to save images
        while not rospy.is_shutdown():

            #Exit rosloop if project.bag is done publishing
            self.current_time = rospy.Time.now().to_sec()
            if (self.current_time - self.last_msg_time) > 2:
                break

            #Store the current pose and image
            T_b2vic = copy.deepcopy(self.T_body2vicon)
            img = copy.deepcopy(self.image)

            #convert image to opencv format
            cv2_img = self.bridge.imgmsg_to_cv2(img, "bgr8")

            #detect obstacle pixels
            pos = T_b2vic[0:2, 3]

            for i in range(self.landmarks.shape[0]):
                #landmark position
                l_pos = self.landmarks[i, :]

                rad = np.linalg.norm(pos - l_pos)
                #save the landmark if it will reduce the distance between drone and landmark
                if rad < self.save_radius and rad < self.landmark_prev_save_rad[
                        i]:
                    self.save_image(cv2_img, str(i))
                    self.G[i] = T_b2vic  #save pose for that landmark
                    self.landmark_prev_save_rad[i] = rad

    def classify_landmarks(self):
        """Function that classifies the landmarks based on the largest
        set of inliers from ransac
        """

        #Classify Landmarks
        for i in range(self.landmarks.shape[0]):
            #load saved images
            imgq = cv2.imread(str(i) + '.png')

            max_inliers = 0
            for j in range(len(self.landmark_ref_images)):
                #load reference image
                imgt = self.landmark_ref_images[j]

                #perform ransac outlier rejection
                inlier_count, avg_angle = self.ransac.ransac(imgq, imgt)

                #largest set of inliers record
                if inlier_count > max_inliers:
                    best_count = inlier_count
                    best_angle = avg_angle
                    max_inliers = inlier_count
                    best_idx = j

            #calculate landmark orientation
            R = self.G[i][0:3, 0:3]
            (phi, theta, psi) = euler_from_matrix(R, 'rxyz')
            landmark_orientation = psi + best_angle

            #print results
            print('Image :' + str(i) + '.png')
            print('location : ', self.landmarks[i])
            print('Landmark :' + self.landmark_names[best_idx])
            print('Landmark Orientation :' + str(landmark_orientation))
            print('inlier_count :' + str(best_count))