def make_msg(self, Depth1Image, Depth2Image, detection_data, camera1_param, camera2_param): bboxes_from_camera1 = BoundingBoxes() bboxes_from_camera2 = BoundingBoxes() bboxes_from_camera1.header = Depth1Image.header bboxes_from_camera2.header = Depth2Image.header self.now = rospy.get_rostime() self.timebefore = detection_data.header.stamp # Add point obstacle self.obstacle_msg.header.stamp = detection_data.header.stamp self.obstacle_msg.header.frame_id = "odom" # CHANGE HERE: odom/map #opencvに変換 bridge = CvBridge() try: Depth1image = bridge.imgmsg_to_cv2(Depth1Image, desired_encoding="passthrough") Depth2image = bridge.imgmsg_to_cv2(Depth2Image, desired_encoding="passthrough") except CvBridgeError as e: print(e) for bbox in detection_data.bounding_boxes: if (bbox.ymin + bbox.ymax)/2 < Depth1image.shape[0]: if bbox.ymax > Depth1image.shape[0]: bbox.ymax = Depth1image.shape[0] bboxes_from_camera1.bounding_boxes.append(bbox) else: bbox.ymin = bbox.ymin - Depth1image.shape[0] bbox.ymax = bbox.ymax - Depth1image.shape[0] if bbox.ymin < 0: bbox.ymin = 0 bboxes_from_camera2.bounding_boxes.append(bbox) camera1_obstacle_msg, camera2_obstacle_msg = ObstacleArrayMsg(), ObstacleArrayMsg() camera1_marker_data, camera2_marker_data = MarkerArray(), MarkerArray() camera1_obstacle_msg, camera1_marker_data = self.bbox_to_position_in_odom(bboxes_from_camera1, Depth1image, camera1_param) obstacle_msg, marker_data = self.bbox_to_position_in_odom(bboxes_from_camera2, Depth2image, camera2_param, len(camera1_obstacle_msg.obstacles), camera1_obstacle_msg, camera1_marker_data) self.obstacle_msg.obstacles, self.marker_data.markers = self.update_obstacles(self.obstacle_msg, obstacle_msg, self.marker_data, marker_data)
def msgDN(self, image, boxes, scores, classes): """ Create the Object Detector message to publish with ROS This uses the Darknet BoundingBox[es] messages """ msg = BoundingBoxes() msg.header = image.header scores_above_threshold = np.where(scores > self.threshold)[1] for s in scores_above_threshold: # Get the properties bb = boxes[0, s, :] sc = scores[0, s] cl = classes[0, s] # Create the bounding box message detection = BoundingBox() detection.Class = self.category_index[int(cl)]['name'] detection.probability = sc detection.xmin = int((image.width - 1) * bb[1]) detection.ymin = int((image.height - 1) * bb[0]) detection.xmax = int((image.width - 1) * bb[3]) detection.ymax = int((image.height - 1) * bb[2]) msg.boundingBoxes.append(detection) return msg
def process_frame(self): """ Main function to process the frame and run the infererence """ # Deque the next image msg current_msg = self.msg_queue.get() current_image = None # Convert to image to OpenCV format try: current_image = self._bridge.imgmsg_to_cv2(current_msg, "bgr8") rospy.logdebug("[trt_yolo_ros] image converted for processing") except CvBridgeError as e: rospy.logdebug("Failed to convert image %s", str(e)) # Initialize detection results if current_image is not None: rospy.logdebug("[trt_yolo_ros] processing frame") boxes, classes, scores, visualization = self.model(current_image) detection_results = BoundingBoxes() detection_results.header = current_msg.header detection_results.image_header = current_msg.header # construct message self._write_message(detection_results, boxes, scores, classes) # send message try: rospy.logdebug("[trt_yolo_ros] publishing") self._pub.publish(detection_results) if self.publish_image: self._pub_viz.publish( self._bridge.cv2_to_imgmsg(visualization, "bgr8") ) except CvBridgeError as e: rospy.logdebug("[trt_yolo_ros] Failed to convert image %s", str(e))
def camera_image_callback(self, image, camera): """Gets images from camera to generate detections on Computes where the bounding boxes should be in the image, and fakes YOLO bounding boxes output as well as publishing a debug image showing where the detections are if turned on Parameters ---------- image : sensor_msgs.Image The image from the camera to create detections for camera : Camera Holds camera parameters for projecting points """ cv_image = self.bridge.imgmsg_to_cv2(image, desired_encoding="rgb8") if camera.is_ready(): # Fake darknet yolo detections message bounding_boxes = BoundingBoxes() bounding_boxes.header = image.header bounding_boxes.image_header = image.header bounding_boxes.image = image bounding_boxes.bounding_boxes = [] for _, obj in self.objects.iteritems(): detections = self.get_detections(obj, camera, image.header.stamp) if detections is not None: if camera.debug_image_pub: self.render_debug_context(cv_image, detections, camera) for det in detections: bounding_box = BoundingBox() bounding_box.Class = det[2] bounding_box.probability = 1.0 bounding_box.xmin = int(det[0][0]) bounding_box.xmax = int(det[0][1]) bounding_box.ymin = int(det[0][2]) bounding_box.ymax = int(det[0][3]) bounding_boxes.bounding_boxes.append(bounding_box) # Only publish detection if there are boxes in it if bounding_boxes.bounding_boxes: self.darknet_detection_pub.publish(bounding_boxes) if camera.debug_image_pub: image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding="rgb8") camera.debug_image_pub.publish(image_message)
def publish_d_msgs(self, tracks, img_msg): boxes = BoundingBoxes() boxes.header = img_msg.header for track in tracks: boxes.bounding_boxes.append(self.create_d_msgs_box(track)) print("boxes--------------------") for box_print in boxes.bounding_boxes: print(box_print) print("\n\n") self.pub.publish(boxes)
def msg(image, boxes): """ Create the Darknet BoundingBox[es] messages """ msg = BoundingBoxes() msg.header = image.header for (x, y, w, h) in boxes: detection = BoundingBox() detection.Class = "human" detection.probability = 1 # Not really... detection.xmin = x detection.ymin = y detection.xmax = x + w detection.ymax = y + h msg.boundingBoxes.append(detection) return msg
def process_frame(self): """ Main function to process the frame and run the infererence """ # Deque the next image msg current_msg = self.msg_queue.get() current_image = None # Convert to image to OpenCV format try: current_image = self._bridge.imgmsg_to_cv2(current_msg, "bgr8") rospy.logdebug("[trt_yolo_ros] image converted for processing") except CvBridgeError as e: rospy.logdebug("Failed to convert image %s" , str(e)) # Initialize detection results if current_image is not None: boxes, classes, scores, visualization = self.model(current_image) detection_results = BoundingBoxes() detection_results.header = current_msg.header detection_results.image_header = current_msg.header rospy.logdebug("[trt_yolo_ros] processing frame") # construct message self._write_message(detection_results, boxes, scores, classes) # send message try: rospy.logdebug("[trt_yolo_ros] publishing") self._pub.publish(detection_results) if self.publish_image and boxes is not None: #compressed_msg = CompressedImage() #compressed_msg.header.stamp = current_msg.header.stamp #compressed_msg.format = "jpeg" #print("Generating data") #compressed_msg.data = np.array(cv2.imencode('.jpg', visualization)).tostring() #print("Going to publish") #self._pub_viz.publish(compressed_msg) compressed_image = self._bridge.cv2_to_compressed_imgmsg(visualization, dst_format='jpg') compressed_image.header.stamp = current_msg.header.stamp self._pub_viz.publish(compressed_image) self._pub_viz.publish(self._bridge.cv2_to_compressed_imgmsg(visualization, dst_format='jpg')) #self._pub_viz.publish(self._bridge.cv2_to_imgmsg(visualization, "bgr8")) except CvBridgeError as e: rospy.logdebug("Failed to convert image %s" , str(e))
def BoundingBoxes_Filtered(self, bounding_boxes, labels): bbs = BoundingBoxes() bbs.header = bounding_boxes.header words = self.Trad() print words print " " for bounding_box in bounding_boxes.bounding_boxes: if bounding_box.Class in words.keys(): word = words[bounding_box.Class] else: word = 'There are no object found' print word if len(labels) == 0 or word in labels: bbs.bounding_boxes.append(bounding_box) return bbs