Ejemplo n.º 1
0
    def img_callback(self):
        ret = msg.Image()
        while (1):

            if not self.cap.isOpened():
                print("Camera can't open\nexit")
                return -1

            ret, img = self.cap.read()
            img = cv2.cvtColor(img, cv2.COLOR_BayerGB2RGB)
            img = cv2.resize(img, dsize=(args.width, args.height))
            width, height, channels = img.shape
            #print("width: %d, height: %d" %(width,height))
            cv2.imshow("fuck_flir", img)

            temp = CvBridge().cv2_to_imgmsg(img, encoding='bgr8')

            #print(width, height)
            '''
            ret.data = temp.data
            ret.encoding = temp.encoding
            ret.header.frame_id = temp.header.frame_id
            ret.step = temp.step
            ret.height = height
            ret.width = width
            '''

            self.publisher_.publish(temp)

            key = cv2.waitKey(1)
            if key == ord("q"):
                break
Ejemplo n.º 2
0
    def make_goal(self, ud, goal):
        occ_grid_map = rospy.wait_for_message('/map', nav_msgs.OccupancyGrid)
        goal.input_map = sensor_msgs.Image()
        goal.input_map.header = occ_grid_map.header
        goal.input_map.height = occ_grid_map.info.height
        goal.input_map.width = occ_grid_map.info.width
        goal.input_map.step = occ_grid_map.info.width
        goal.input_map.encoding = 'mono8'
        # Convert map into a black and white 8bit single-channel image (format 8UC1), which is 0 (black)
        # for obstacles and unknown space, and 255 (white) for free space
        for i in range(occ_grid_map.info.height):
            for j in range(occ_grid_map.info.width):
                pixel_value = b'\x00' if occ_grid_map.data[
                    i * occ_grid_map.info.width + j] in [-1, 100] else b'\xFF'
                goal.input_map.data += pixel_value


# [/exploration/room_sequence_planning_server  INFO 1592274444.350931252, 465.900000000]: ********Sequence planning started************
# terminate called after throwing an instance of 'cv_bridge::Exception'
#   what():  [32SC1] is not a color format. but [mono8] is. The conversion does not make sense
#        la img tie mal format pa planning      y parece lento

# y fix   [exploration/room_segmentation_server-17] escalating to SIGTERM
# [move_base-10] escalating to SIGTERM

        rospy.Publisher('/exploration/img',
                        sensor_msgs.Image,
                        queue_size=1,
                        latch=True).publish(goal.input_map)
        # ALTERNATIVE: read image from file
        # import cv2
        # from cv_bridge import CvBridge
        # bridge = CvBridge()
        # cv_image = cv2.imread('/home/jorge/catkin_ws/thorp/src/thorp/thorp_simulation/worlds/maps/cat_house.png',
        #                       cv2.IMREAD_UNCHANGED)
        # for i in range(cv_image.shape[0]):
        #     for j in range(cv_image.shape[1]):
        #         if cv_image[i][j] < 250:
        #             cv_image[i][j] = 0
        #         else:
        #             cv_image[i][j] = 255
        # goal.input_map = bridge.cv2_to_imgmsg(cv_image, encoding='mono8')
        rospy.Publisher('/exploration/img',
                        sensor_msgs.Image,
                        queue_size=1,
                        latch=True).publish(goal.input_map)
        goal.map_origin = occ_grid_map.info.origin
        goal.map_resolution = occ_grid_map.info.resolution
        goal.return_format_in_meter = True
        goal.return_format_in_pixel = True
        goal.robot_radius = rospy.get_param(
            '/move_base_flex/global_costmap/robot_radius', 0.18)
        # those values are also needed by PlanRoomSequence and PlanRoomExploration, so share them as output keys
        # the segmented map comes with its own origin and resolution, but are the same as for the input map
        ud['map_image'] = goal.input_map
        ud['map_origin'] = goal.map_origin
        ud['map_resolution'] = goal.map_resolution
        ud['robot_radius'] = goal.robot_radius
 def __init__(
     self,
     param={
         "low_H": 110,
         "high_H": 120,
         "low_S": 0,
         "high_S": 254,
         "low_V": 220,
         "high_V": 230,
     },
 ):
     self.DIGITS_LOOKUP = {
         (0, 0, 1, 1, 0, 1, 0): -1,
         (0, 0, 0, 1, 0, 0, 0): "-",
         (1, 1, 1, 0, 1, 1, 1): 0,
         (0, 0, 1, 0, 0, 1, 0): 1,
         (1, 0, 1, 1, 1, 0, 1): 2,
         (1, 0, 1, 1, 0, 1, 1): 3,
         (0, 1, 1, 1, 0, 1, 0): 4,
         (1, 1, 0, 1, 0, 1, 1): 5,
         (1, 1, 0, 1, 1, 1, 1): 6,
         (1, 0, 1, 0, 0, 1, 0): 7,
         (1, 1, 1, 1, 1, 1, 1): 8,
         (1, 1, 1, 1, 0, 1, 1): 9,
     }
     self.heading_data = mrs_msgs.Float64Stamped()
     self.nav = Position_Info()
     self.camera_setting = sensor.CameraInfo()
     self.camera_frame = sensor.Image()
     self.sub_Camera = rospy.Subscriber(
         "/uav1/bluefox_optflow/camera_info",
         sensor.CameraInfo,
         self.setting_callback,
     )
     self.sub = rospy.Subscriber("/uav1/bluefox_optflow/image_raw",
                                 sensor.Image, self.camera_callback)
     self.sub_hdg = self.sub = rospy.Subscriber(
         "/uav1/control_manager/heading",
         mrs_msgs.Float64Stamped,
         self.heading_callback,
     )
     while not self.camera_frame.data:
         pass
     while self.camera_setting.K[0] <= 0:
         pass
     self.fx = self.camera_setting.K[0]
     self.fy = self.camera_setting.K[5]
     self.height = self.camera_setting.height
     self.width = self.camera_setting.width
     self.hfov = 2 * np.arctan2(self.width, 2 * self.fx)
     self.vfov = 2 * np.arctan2(self.height, 2 * self.fy)
     self.parameters = param
     self.kernel = np.ones((5, 5), np.uint8)
     self.bridge = CvBridge()
     self.img = ""
Ejemplo n.º 4
0
 def make_goal(self, ud, goal):
     # Listen for current map
     occ_grid_map = rospy.wait_for_message('/map', nav_msgs.OccupancyGrid)
     goal.input_map = sensor_msgs.Image()
     goal.input_map.header = occ_grid_map.header
     goal.input_map.height = occ_grid_map.info.height
     goal.input_map.width = occ_grid_map.info.width
     goal.input_map.step = occ_grid_map.info.width
     goal.input_map.encoding = 'mono8'
     # Convert map into a black and white 8bit single-channel image (format 8UC1), which is 0 (black)
     # for obstacles and unknown space, and 255 (white) for free space
     for i in range(occ_grid_map.info.height):
         for j in range(occ_grid_map.info.width):
             pixel_value = b'\x00' if occ_grid_map.data[
                 i * occ_grid_map.info.width + j] in [-1, 100] else b'\xFF'
             goal.input_map.data += pixel_value
     # ALTERNATIVE: read image from file
     # import cv2
     # from cv_bridge import CvBridge
     # bridge = CvBridge()
     # cv_image = cv2.imread('/home/jorge/catkin_ws/thorp/src/thorp/thorp_simulation/worlds/maps/fun_house.png',
     #                       cv2.IMREAD_UNCHANGED)
     # for i in range(cv_image.shape[0]):
     #     for j in range(cv_image.shape[1]):
     #         if cv_image[i][j] < 250:
     #             cv_image[i][j] = 0
     #         else:
     #             cv_image[i][j] = 255
     # goal.input_map = bridge.cv2_to_imgmsg(cv_image, encoding='mono8')
     rospy.Publisher('/exploration/img',
                     sensor_msgs.Image,
                     queue_size=1,
                     latch=True).publish(goal.input_map)
     goal.map_origin = occ_grid_map.info.origin
     goal.map_resolution = occ_grid_map.info.resolution
     goal.return_format_in_meter = True
     goal.return_format_in_pixel = True
     goal.robot_radius = rospy.get_param(
         '/move_base_flex/global_costmap/robot_radius', 0.18)
     # those values are also needed by PlanRoomSequence and PlanRoomExploration, so share them as output keys
     # the segmented map comes with its own origin and resolution, but are the same as for the input map
     ud['map_image'] = goal.input_map
     ud['map_origin'] = goal.map_origin
     ud['map_resolution'] = goal.map_resolution
     ud['robot_radius'] = goal.robot_radius
Ejemplo n.º 5
0
 def __init__(self, engine):
     super().__init__('IProc_TestDisplayNode')
     self.__window_name = "ROS2 PoseNet"
     profile = qos.QoSProfile()
     self.sub = self.create_subscription(msg.Image,
                                         '/image',
                                         self.msg_callback,
                                         qos_profile=profile)
     self.pub = self.create_publisher(msg.Image, '/recognized_image')
     self.engine = engine
     self.fps = ""
     self.detectfps = ""
     self.framecount = 0
     self.detectframecount = 0
     self.time1 = 0
     self.time2 = 0
     self.t1 = time.perf_counter()
     self.t2 = time.perf_counter()
     self.msg = msg.Image()
     self.cv_bridge = CvBridge()
Ejemplo n.º 6
0
 def result_cb(self, ud, status, result):
     # make a random color rooms map to show in RViz
     rooms_color_map = sensor_msgs.Image()
     rooms_color_map.header = result.segmented_map.header
     rooms_color_map.width = result.segmented_map.width
     rooms_color_map.height = result.segmented_map.height
     rooms_color_map.step = rooms_color_map.width * 3
     rooms_color_map.encoding = 'rgb8'
     for i in range(result.segmented_map.height):
         for j in range(result.segmented_map.width):
             idx = i * result.segmented_map.width * 4 + j * 4
             val = result.segmented_map.data[idx:idx + 4]
             room = struct.unpack('<BBBB', val)[0]
             if room > 0:
                 random.seed(room)
                 r, g, b = random.randint(0, 255), random.randint(
                     0, 255), random.randint(0, 255)
                 rooms_color_map.data += struct.pack('<BBB', r, g, b)
             else:
                 rooms_color_map.data += struct.pack('<BBB', 0, 0, 0)
     self.img_pub.publish(rooms_color_map)