Пример #1
0
    def msgDN(self, image, boxes, scores, classes):
        """
        Create the Object Detector message to publish with ROS

        This uses the Darknet BoundingBox[es] messages
        """
        msg = BoundingBoxes()
        msg.header = image.header
        scores_above_threshold = np.where(scores > self.threshold)[1]

        for s in scores_above_threshold:
            # Get the properties
            bb = boxes[0, s, :]
            sc = scores[0, s]
            cl = classes[0, s]

            # Create the bounding box message
            detection = BoundingBox()
            detection.Class = self.category_index[int(cl)]['name']
            detection.probability = sc
            detection.xmin = int((image.width - 1) * bb[1])
            detection.ymin = int((image.height - 1) * bb[0])
            detection.xmax = int((image.width - 1) * bb[3])
            detection.ymax = int((image.height - 1) * bb[2])

            msg.boundingBoxes.append(detection)

        return msg
Пример #2
0
 def process_frame(self):
     """ Main function to process the frame and run the infererence """
     # Deque the next image msg
     current_msg = self.msg_queue.get()
     current_image = None
     # Convert to image to OpenCV format
     try:
         current_image = self._bridge.imgmsg_to_cv2(current_msg, "bgr8")
         rospy.logdebug("[trt_yolo_ros] image converted for processing")
     except CvBridgeError as e:
         rospy.logdebug("Failed to convert image %s", str(e))
     # Initialize detection results
     if current_image is not None:
         rospy.logdebug("[trt_yolo_ros] processing frame")
         boxes, classes, scores, visualization = self.model(current_image)
         detection_results = BoundingBoxes()
         detection_results.header = current_msg.header
         detection_results.image_header = current_msg.header
         # construct message
         self._write_message(detection_results, boxes, scores, classes)
         # send message
         try:
             rospy.logdebug("[trt_yolo_ros] publishing")
             self._pub.publish(detection_results)
             if self.publish_image:
                 self._pub_viz.publish(
                     self._bridge.cv2_to_imgmsg(visualization, "bgr8")
                 )
         except CvBridgeError as e:
             rospy.logdebug("[trt_yolo_ros] Failed to convert image %s", str(e))
Пример #3
0
def test():
    rospy.init_node('localizer_client')
    l = Localizer()

    # Fake data
    bb = BoundingBoxes()
    bb.bounding_boxes = []
    bb.image = Image()

    b1 = BoundingBox()
    b1.Class = "start_gate_pole"

    b2 = BoundingBox()
    b2.Class = "dice1"

    b3 = BoundingBox()
    b3.Class = "dice2"

    b4 = BoundingBox()
    b4.Class = "random"

    bb.bounding_boxes.append(b1)
    bb.bounding_boxes.append(b2)
    bb.bounding_boxes.append(b3)
    bb.bounding_boxes.append(b4)
    l.boxes_received(bb)

    try:
        rospy.spin()
    except rospy.ROSInterruptException:
        sys.exit()
Пример #4
0
def get_test_bounding_boxes():

    Helipad = BoundingBox()
    Helipad.probability = 0.5
    Helipad.xmin = 312
    Helipad.ymin = 120
    Helipad.xmax = 337
    Helipad.ymax = 148
    Helipad.id = 2
    Helipad.Class = "Helipad"

    H = BoundingBox()
    H.probability = 0.5
    H.xmin = 320
    H.ymin = 128
    H.xmax = 330
    H.ymax = 138
    H.id = 0
    H.Class = "H"

    Arrow = BoundingBox()
    Arrow.probability = 0.5
    Arrow.xmin = 333
    Arrow.ymin = 140
    Arrow.xmax = 335
    Arrow.ymax = 143
    Arrow.id = 1
    Arrow.Class = "Arrow"

    bbs = BoundingBoxes()
    bbs.bounding_boxes = [Helipad, H, Arrow]
    return bbs
Пример #5
0
    def image_callback(self, msg):
        # print("Received an image!")
        try:
            # Convert your ROS Image message to numpy image data type
            cv2_img = np.frombuffer(msg.data, dtype=np.uint8).reshape(
                msg.height, msg.width, -1)
            image_ori = cv2.resize(cv2_img, (512, 512))
            #remove alpha channel if it exists
            if image_ori.shape[-1] == 4:
                image_ori = image_ori[..., :3]
            #normalize the image
            image = self.normalize(image_ori)

            with torch.no_grad():
                image = torch.Tensor(image)
                if torch.cuda.is_available():
                    image = image.cuda()
                boxes = self.model_.get_boxes(
                    image.permute(2, 0, 1).unsqueeze(0))

            Boxes_msg = BoundingBoxes()
            Boxes_msg.image_header = msg.header
            Boxes_msg.header.stamp = rospy.Time.now()
            for box in boxes[0]:
                # print(box.confidence)
                confidence = float(box.confidence)
                box = (box.box * torch.Tensor([512] * 4)).int().tolist()
                if confidence > 0.35:
                    cv2.rectangle(image_ori, (box[0], box[1]),
                                  (box[2], box[3]), (255, 0, 0), 2)
                    cv2.putText(image_ori,
                                str(confidence)[:4], (box[0] - 2, box[1] - 2),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                    # msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
                    detection_box = BoundingBox()
                    # detection_box.Class=str(box.class_id)
                    detection_box.xmin = box[0]
                    detection_box.ymin = box[1]
                    detection_box.xmax = box[2]
                    detection_box.ymax = box[3]
                    detection_box.probability = confidence
                    Boxes_msg.bounding_boxes.append(detection_box)

            msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
            self.img_pub.publish(msg_frame)
            self.ret_pub.publish(Boxes_msg)

        except CvBridgeError:
            print("error")
        if self.savefigure:
            cv2.imwrite('new_image.jpeg', cv2_img)
            print('save picture')
            # self.detected_msg.data="Take a photo"
            self.savefigure = False
Пример #6
0
    def publish_d_msgs(self, tracks, img_msg):

        boxes = BoundingBoxes()
        boxes.header = img_msg.header

        for track in tracks:
            boxes.bounding_boxes.append(self.create_d_msgs_box(track))

        print("boxes--------------------")
        for box_print in boxes.bounding_boxes:
            print(box_print)
        print("\n\n")

        self.pub.publish(boxes)
    def __init__(self):
        self.node_name = "dark_net_object_detection"
        rospy.init_node(self.node_name)

        self.conf_thresh = 0.25
        self.boxes_list  = BoundingBoxes()

        # Create unique and somewhat visually distinguishable bright
        # colors for the different classes.
        # Depends on the detector
        self.num_classes = 20
        self.class_colors = []
        for i in range(0, self.num_classes):
            # This can probably be written in a more elegant manner
            hue = 255*i/self.num_classes
            col = np.zeros((1,1,3)).astype("uint8")
            col[0][0][0] = hue
            col[0][0][1] = 128 # Saturation
            col[0][0][2] = 255 # Value
            cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
            col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
            self.class_colors.append(col)

        self.bridge             = CvBridge() # Create the cv_bridge object
        self.to_draw            = cv2.imread(pkg_path+ '/resource/start.jpg')
        self.image_sub          = rospy.Subscriber("/usb_cam/image_raw", Image, self.detect_image,queue_size=1)
        self.bounding_boxes_sub = rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes, self.overlay_boxes,queue_size=1)
Пример #8
0
    def __init__(self, max_age=100, min_hits=10, iou_threshold=0.3):
        """
        ROS IoU Tracker
        :param max_age: Maximum number of frames to keep alive a track without associated detections.
        :param min_hits: Minimum number of associated detections before track is initialised.
        :param iou_threshold: Minimum IOU for match.
        """
        self.iou_threshold = iou_threshold
        self.bridge = CvBridge()
        self.tracked_img_pub = rospy.Publisher("/iou_tracker/detection_image",
                                               Image,
                                               queue_size=1)
        self.new_bboxes = []
        self.bboxes = []
        self.bboxes_msg = BoundingBoxes()
        self.traces = dict()
        self.mot_tracker = Sort(
            max_age=max_age, min_hits=min_hits,
            iou_threshold=iou_threshold)  # create instance of the SORT tracker
        self.image = np.zeros(1)
        self.raw_image_sub = rospy.Subscriber('/darknet_ros/detection_image',
                                              Image,
                                              self.__raw_image_callback,
                                              queue_size=1)
        #self.raw_image_sub = rospy.Subscriber('/r200/depth/image_raw', Image, self.__raw_image_callback, queue_size=1)

        self.bbox_pub = rospy.Publisher("/iou_tracker/bounding_boxes",
                                        BoundingBoxes,
                                        queue_size=1)
        self.bbox_nn_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                            BoundingBoxes,
                                            self.__bbox_nn_callback,
                                            queue_size=1)
        rospy.loginfo("iou_tracker has been initialized!")
Пример #9
0
    def callback(self, data):
        print('\n', 'Predicting')
        # try:
        #   cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        # except CvBridgeError as e:
        #   print(e)
        # cv_image = cv2.copyMakeBorder(cv_image, 100, 100, 100, 100, cv2.BORDER_CONSTANT, value=[85, 120, 68])
        # cv_image = cv2.imread("pic_buffer/R.png")
        # cv2.imwrite("pic_buffer/R.png", cv_image)

        # im = array_to_image(cv_image)
        # dn.rgbgr_image(im)
        # # print(dn.detect(net, meta, im))
        # print("===")
        r = []
        img = cv2.imread("pic_buffer/1_R.png")
        sp = img.shape
        r += self.subpredict(0, int(sp[0] / 2), 0, sp[1])
        r += self.subpredict(int(sp[0] / 2), sp[0], 0, sp[1])
        r += self.subpredict(int(sp[0] / 4), int(sp[0] * 3 / 4), 0, sp[1])
        r += self.subpredict(0, sp[0], 0, int(sp[1] / 2))
        r += self.subpredict(0, sp[0], int(sp[1] / 2), sp[1])
        r += self.subpredict(0, sp[0], int(sp[1] / 3), int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], 0, int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], int(sp[1] / 3), sp[1])
        r += self.subpredict(0, int(sp[0] * 2 / 3), 0, sp[1])
        r += self.subpredict(int(sp[0] / 3), sp[0], 0, sp[1])
        r = self.removeDuplicate(r)
        print()
        img = self.drawCrossings(img, r)
        cv2.imwrite("pic_buffer/2_D.png", img)

        boxes = BoundingBoxes()
        print('\n', 'Predict result:')
        for i in range(len(r)):
            box = BoundingBox()
            print('    ', r[i][0], r[i][1] * 100, '%')
            box.Class = r[i][0].decode('utf-8')
            box.probability = r[i][1]
            box.id = i
            box.xmin = int(r[i][2][0])
            box.ymin = int(r[i][2][1])
            box.xmax = int(r[i][2][2])
            box.ymax = int(r[i][2][3])
            boxes.bounding_boxes.append(box)
            # if b'endpoint' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            print('    ', int(r[i][2][0]), int(r[i][2][1]), int(r[i][2][2]),
                  int(r[i][2][3]))
            # if b'cross' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            #     # print('\t', r[i][2])
        print('\n', 'Darknet waiting for rgb_img')
        time.sleep(0.5)
        try:
            self.boxes_pub.publish(boxes)
        except CvBridgeError as e:
            print(e)
Пример #10
0
def msg(image, boxes):
    """
    Create the Darknet BoundingBox[es] messages
    """
    msg = BoundingBoxes()
    msg.header = image.header

    for (x, y, w, h) in boxes:
        detection = BoundingBox()
        detection.Class = "human"
        detection.probability = 1  # Not really...
        detection.xmin = x
        detection.ymin = y
        detection.xmax = x + w
        detection.ymax = y + h
        msg.boundingBoxes.append(detection)

    return msg
Пример #11
0
 def make_msg(self, Depth1Image, Depth2Image, detection_data, camera1_param, camera2_param):
     bboxes_from_camera1 = BoundingBoxes()
     bboxes_from_camera2 = BoundingBoxes()
     bboxes_from_camera1.header = Depth1Image.header
     bboxes_from_camera2.header = Depth2Image.header
     self.now = rospy.get_rostime()
     self.timebefore = detection_data.header.stamp
     # Add point obstacle
     self.obstacle_msg.header.stamp = detection_data.header.stamp
     self.obstacle_msg.header.frame_id = "odom" # CHANGE HERE: odom/map
     #opencvに変換
     bridge = CvBridge()
     try:
         Depth1image = bridge.imgmsg_to_cv2(Depth1Image, desired_encoding="passthrough")
         Depth2image = bridge.imgmsg_to_cv2(Depth2Image, desired_encoding="passthrough")
     except CvBridgeError as e:
         print(e)
     for bbox in detection_data.bounding_boxes:
         if (bbox.ymin + bbox.ymax)/2 < Depth1image.shape[0]:
             if bbox.ymax > Depth1image.shape[0]:
                 bbox.ymax = Depth1image.shape[0]
             bboxes_from_camera1.bounding_boxes.append(bbox)
         else:
             bbox.ymin = bbox.ymin - Depth1image.shape[0]
             bbox.ymax = bbox.ymax - Depth1image.shape[0]
             if bbox.ymin < 0:
                 bbox.ymin = 0
             bboxes_from_camera2.bounding_boxes.append(bbox)
     camera1_obstacle_msg, camera2_obstacle_msg = ObstacleArrayMsg(), ObstacleArrayMsg()
     camera1_marker_data, camera2_marker_data = MarkerArray(), MarkerArray()
     camera1_obstacle_msg, camera1_marker_data = self.bbox_to_position_in_odom(bboxes_from_camera1, Depth1image, camera1_param)
     obstacle_msg, marker_data = self.bbox_to_position_in_odom(bboxes_from_camera2, Depth2image, camera2_param, len(camera1_obstacle_msg.obstacles), camera1_obstacle_msg, camera1_marker_data)
     self.obstacle_msg.obstacles, self.marker_data.markers = self.update_obstacles(self.obstacle_msg, obstacle_msg, self.marker_data, marker_data)
Пример #12
0
 def process_frame(self):
     """ Main function to process the frame and run the infererence """
     # Deque the next image msg
     current_msg = self.msg_queue.get()
     current_image = None
     # Convert to image to OpenCV format
     try:
         current_image = self._bridge.imgmsg_to_cv2(current_msg, "bgr8")
         rospy.logdebug("[trt_yolo_ros] image converted for processing")
     except CvBridgeError as e:
         rospy.logdebug("Failed to convert image %s" , str(e))
     # Initialize detection results
     if current_image is not None:
         boxes, classes, scores, visualization = self.model(current_image)
         detection_results = BoundingBoxes()
         detection_results.header = current_msg.header
         detection_results.image_header = current_msg.header
         rospy.logdebug("[trt_yolo_ros] processing frame")
         # construct message
         self._write_message(detection_results, boxes, scores, classes)
         # send message
         try:
             rospy.logdebug("[trt_yolo_ros] publishing")
             self._pub.publish(detection_results)
             if self.publish_image and boxes is not None:
                 #compressed_msg = CompressedImage()
                 #compressed_msg.header.stamp = current_msg.header.stamp
                 #compressed_msg.format = "jpeg"
                 #print("Generating data")
                 #compressed_msg.data = np.array(cv2.imencode('.jpg', visualization)).tostring()
                 #print("Going to publish")
                 #self._pub_viz.publish(compressed_msg)
                 compressed_image = self._bridge.cv2_to_compressed_imgmsg(visualization, dst_format='jpg')
                 compressed_image.header.stamp = current_msg.header.stamp
                 self._pub_viz.publish(compressed_image)
                 self._pub_viz.publish(self._bridge.cv2_to_compressed_imgmsg(visualization, dst_format='jpg'))
                 #self._pub_viz.publish(self._bridge.cv2_to_imgmsg(visualization, "bgr8"))
         except CvBridgeError as e:
             rospy.logdebug("Failed to convert image %s" , str(e))
Пример #13
0
    def BoundingBoxes_Filtered(self, bounding_boxes, labels):
        bbs = BoundingBoxes()

        bbs.header = bounding_boxes.header
        words = self.Trad()

        print words
        print " "

        for bounding_box in bounding_boxes.bounding_boxes:

            if bounding_box.Class in words.keys():
                word = words[bounding_box.Class]
            else:
                word = 'There are no object found'

            print word

            if len(labels) == 0 or word in labels:

                bbs.bounding_boxes.append(bounding_box)

        return bbs
Пример #14
0
    def camera_image_callback(self, image, camera):
        """Gets images from camera to generate detections on

        Computes where the bounding boxes should be in the image, and
        fakes YOLO bounding boxes output as well as publishing a debug
        image showing where the detections are if turned on

        Parameters
        ----------
        image : sensor_msgs.Image
            The image from the camera to create detections for
        camera : Camera
            Holds camera parameters for projecting points

        """

        cv_image = self.bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")

        if camera.is_ready():

            # Fake darknet yolo detections message
            bounding_boxes = BoundingBoxes()

            bounding_boxes.header = image.header
            bounding_boxes.image_header = image.header
            bounding_boxes.image = image
            bounding_boxes.bounding_boxes = []

            for _, obj in self.objects.iteritems():

                detections = self.get_detections(obj, camera, image.header.stamp)

                if detections is not None:

                    if camera.debug_image_pub:

                        self.render_debug_context(cv_image, detections, camera)

                    for det in detections:

                        bounding_box = BoundingBox()

                        bounding_box.Class = det[2]
                        bounding_box.probability = 1.0
                        bounding_box.xmin = int(det[0][0])
                        bounding_box.xmax = int(det[0][1])
                        bounding_box.ymin = int(det[0][2])
                        bounding_box.ymax = int(det[0][3])
                        bounding_boxes.bounding_boxes.append(bounding_box)

            # Only publish detection if there are boxes in it
            if bounding_boxes.bounding_boxes:
                self.darknet_detection_pub.publish(bounding_boxes)

        if camera.debug_image_pub:
            image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding="rgb8")
            camera.debug_image_pub.publish(image_message)
Пример #15
0
    def __init__(self):
        # 글로벌 변수 설정
        self.bridge = CvBridge()
        self.bounding_boxes = BoundingBoxes()
        self.image = None

        # 발행 설정
        self.compressed_detection_image_pub = rospy.Publisher(
            "/detection_image/compressed", CompressedImage, queue_size=1)

        # 구독 설정
        compressed_color_image_sub = rospy.Subscriber(
            "camera/color/image_raw/compressed", CompressedImage,
            self.bridge_color_image)
        bounding_boxes_sub = rospy.Subscriber('darknet_ros/bounding_boxes',
                                              BoundingBoxes,
                                              self.update_bounding_boxes)
Пример #16
0
 def GetBBoxesMsg(self, label_file: str):
     output_bboxes_msg = BoundingBoxes()
     with open(label_file, 'r') as file:
         id_ = 0
         for line in file:
             gt = line.split()
             bbox_msg = BoundingBox()
             #print(gt)
             bbox_msg.xmin = int(gt[5])
             bbox_msg.ymin = int(gt[6])
             bbox_msg.xmax = int(gt[7])
             bbox_msg.ymax = int(gt[8])
             bbox_msg.id = id_
             bbox_msg.Class = gt[0]
             id_ += 1
             output_bboxes_msg.bounding_boxes.append(bbox_msg)
     return output_bboxes_msg
Пример #17
0
    def __init__(self):
        bbox_nn_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                       BoundingBoxes,
                                       self.bbox_nn_callback,
                                       queue_size=1)
        raw_image_sub = rospy.Subscriber('/r200/rgb/image_raw',
                                         Image,
                                         self.raw_image_callback,
                                         queue_size=1)
        #raw_image_sub = rospy.Subscriber('/r200/depth/image_raw', Image, self.raw_image_callback, queue_size=1)
        #raw_image_sub = rospy.Subscriber('/darknet_ros/detection_image', Image, self.raw_image_callback, queue_size=1)

        self.bridge = CvBridge()
        self.pub = rospy.Publisher("/bbox_tracker/detection_image",
                                   Image,
                                   queue_size=1)
        self.refined_bboxes_msg = BoundingBoxes()
        self.pub1 = rospy.Publisher("/bbox_tracker/bounding_boxes",
                                    BoundingBoxes,
                                    queue_size=1)

        self.bboxes_new = []
        self.X = []  # a list of darknet_ros_msgs/BoundingBox
        self.Cov = []  # this is in Euclidean coordinate system
        self.image_new = np.zeros(1)
        self.image_old = np.zeros(1)
        self.startXs = np.empty((20, 0), int)
        self.startYs = np.empty((20, 0), int)
        self.bboxes_klt = np.empty((0, 4, 2), float)
        self.traces = []

        self.observation_done = False

        self.Q = np.diag(
            (.2, .2, .2, .2)
        )  # observation model noise covariance in Euclidean coordinate system
        self.R = np.diag(
            (10., 10., 0., 10., 10., 0.)
        )  # motion model noise covariance in Homogeneous coordinate system

        self.tracking_thread = Thread(target=self.tracking, args=())
        self.tracking_thread.daemon = True
        self.tracking_thread.start()

        rospy.loginfo("bbox_tracker initialized!")
    def test_tracker_callback(self):
        args = None
        rclpy.init(args=args)
        node = ObjectTrackingNode()

        bboxes_msg = BoundingBoxes()

        for track in self.tracks:
            bbox_msg = BoundingBox()

            bbox_msg.xmin = track["x1"]
            bbox_msg.ymin = track["y1"]
            bbox_msg.xmax = track["x2"]
            bbox_msg.ymax = track["y2"]

            bboxes_msg.bounding_boxes.append(bbox_msg)

        node.tracker_callback(bboxes_msg)
Пример #19
0
    def boxcallback(self, msg):
        dets = []
        for i in range(len(msg.bounding_boxes)):
            bbox = msg.bounding_boxes[i]
            dets.append(
                np.array([
                    bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax,
                    bbox.probability
                ]))
        dets = np.array(dets)
        start_time = time.time()
        trackers = self.update(dets)
        cycle_time = time.time() - start_time
        print(cycle_time)

        r = BoundingBoxes()
        rb = BoundingBox()
        for d in range(len(trackers)):
            rb.probability = 1
            rb.xmin = trackers[d, 0]
            rb.ymin = trackers[d, 1]
            rb.xmax = trackers[d, 2]
            rb.ymax = trackers[d, 3]
            rb.id = trackers[d, 4]
            rb.Class = 'tracked'
            r.bounding_boxes.append(rb)
            if self.img_in == 1 and self.display:
                res = trackers[d].astype(np.int32)
                rgb = self.colours[res[4] % 32, :] * 255
                cv2.rectangle(self.img, (res[0], res[1]), (res[2], res[3]),
                              (rgb[0], rgb[1], rgb[2]), 2)
                cv2.putText(self.img, "ID : %d" % (res[4]), (res[0], res[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 85, 200), 2)
        if self.img_in == 1 and self.display:
            try:
                self.image = self.bridge.cv2_to_imgmsg(self.img, "bgr8")
                self.image.header.stamp = rospy.Time.now()
                self.pubimage.publish(self.image)
            except CvBridgeError as e:
                pass
        r.header.stamp = rospy.Time.now()
        self.pubb.publish(r)
        return
Пример #20
0
    def __init__(self):

        # Variables 
        self.bbox = BoundingBoxes()
        self.sonar_data = LaserScan()
        self.image = Image()
        self.ekf_data = Odometry()
        self.bootlegger = object_class.LOCATION_OF_OBJECT("Bootlegger")
        self.gman = object_class.LOCATION_OF_OBJECT("G-man")
        self.tommygun = object_class.LOCATION_OF_OBJECT("Tommygun")
        self.badge= object_class.LOCATION_OF_OBJECT("Badge")
        # Publshers
        self.pub = rospy.Publisher('/manta/object_position',ObjectPlacement,queue_size=10)

        # Subscribers
        rospy.Subscriber('manta/sonar',LaserScan,self.sonarCallback)
        rospy.Subscriber('/odometry/filtered',Odometry,self.ekfCallback)
        rospy.Subscriber('/darknet_ros/bounding_boxes',BoundingBoxes,self.objectCallback)
        rospy.Timer(rospy.Duration(1.0),self.publishCallback)
Пример #21
0

if __name__ == '__main__':
    colours = np.random.rand(32, 3) #used only for display
    mot_tracker = Sort(max_age=200, min_hits=1) #create instance of the SORT tracker

    while True:
        try:
            start_time = time.time()
            if mot_tracker.bbox_checkin==1:
                trackers = mot_tracker.update(mot_tracker.dets)
                mot_tracker.bbox_checkin=0
            else:
                trackers = mot_tracker.update(np.empty((0,5)))

            r = BoundingBoxes()
            for d in range(len(trackers)):
                rb = BoundingBox()
                rb.probability=1
                rb.xmin = int(trackers[d,0])
                rb.ymin = int(trackers[d,1])
                rb.xmax = int(trackers[d,2])
                rb.ymax = int(trackers[d,3])
                rb.id = int(trackers[d,4])
                rb.Class = 'tracked'
                r.bounding_boxes.append(rb)
                if mot_tracker.img_in==1 and mot_tracker.display:
                    res = trackers[d].astype(np.int32)
                    rgb=colours[res[4]%32,:]*255
                    cv2.rectangle(mot_tracker.img, (res[0],res[1]), (res[2],res[3]), (rgb[0],rgb[1],rgb[2]), 6)
                    cv2.putText(mot_tracker.img, "ID : %d"%(res[4]), (res[0],res[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.6, (200,85,200), 6)
Пример #22
0
    def process_frames(self):
        print("\nMining Frames for Flicker Detection ......\o/ \o/ \o/ \o/....... \n")
        rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag")))
        # Init extraction loop
        msg_count = 0


        # Iterate through bags
        rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag")))
        for bag_file in tqdm(rosbag_files, unit='bag'):
            self.frame_imgs, self.trk_table, self.frame_count = self.load_cache(bag_file)
            self.bag_keypath = '_'.join(os.path.normpath(bag_file).split(os.sep))

            if not self.frame_imgs or not self.trk_table:
                # Open bag file. If corrupted, skip the file
                try:
                    with rosbag.Bag(bag_file, 'r') as bag:
                        # Check if desired topics exists in bags
                        recorded_topics = bag.get_type_and_topic_info()[1]
                        if not all(topic in recorded_topics for topic in ( DETECTION_TOPIC, CAR_STATE_TOPIC, IMAGE_STREAM_TOPIC)):
                            print("ERROR: Specified topics not in bag file:", bag_file, ".Skipping bag!")
                            continue

                        gps = ""
                        v_ego = 0.0
                        self.cv_image = None
                        # Get Detections
                        time_nsecs = []
                        self.trk_table = OrderedDict()
                        self.frame_imgs = OrderedDict()
                        self.frame_trks = OrderedDict()
                        self.flickering_frame_imgs = OrderedDict()
                        self.flickering_frame_trks = OrderedDict()
                        for topic, msg, t in bag.read_messages():
                            if topic == CAR_STATE_TOPIC:
                                self.gps = msg.GPS
                                self.v_ego = msg.v_ego
                            if topic == IMAGE_STREAM_TOPIC:
                                if msg_count % self.skip_rate == 0:

                                    self.cv_image = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8")
                                    self.img_msg_header = msg.header
                                    image_name = "frame%010d_%s.jpg" % (self.frame_count, str(msg.header.stamp.to_nsec()))
                                    img_path = os.path.join(self.frames_dir, image_name)
                                    cv2.imwrite(img_path, self.cv_image)

                                    uri = img_path
                                    img_key = int(msg.header.stamp.to_nsec())
                                    fname = self.path_leaf(uri)
                                    vid_name = ''
                                    readable_time = self.format_from_nanos(img_key).split(' ')
                                    if ':' in readable_time and ' ' not in readable_time.split(':')[0]:
                                        hour = int(readable_time.split(':')[0])
                                    else:
                                        hour = 12

                                    if (hour > 4 and hour < 6) or (hour > 17 and hour < 19):
                                        timeofday = 'dawn/dusk'
                                    elif hour > 6 and hour < 17:
                                        timeofday = 'daytime'
                                    else:
                                        timeofday = 'night'


                                    scene = 'highway'
                                    timestamp = img_key
                                    dataset_path = img_path
                                    if self.gps:
                                        gps = pynmea2.parse(self.gps)
                                        lat = gps.lat
                                        long = gps.lon
                                    else:
                                        lat = None
                                        long = None
                                    height, width, depth = self.cv_image.shape

                                    self.frame_imgs[img_key] = {'url': img_path,
                                                             'name': img_path,
                                                             'width': width,
                                                             'height': height,
                                                             'index': self.frame_count,
                                                             'timestamp': timestamp,
                                                             'videoName':vid_name,
                                                             'attributes': {'weather': 'clear', 'scene': scene, 'timeofday': timeofday},
                                                             'labels': []
                                                            }
                                    self.frame_trks[img_key] = []

                                    msg_count += 1
                                    self.frame_count += 1

                        ## Get Tracking Data ##
                        if self.use_bag_detections:
                            for topic, msg, t in bag.read_messages():
                                if topic == DETECTION_TOPIC:
                                    # Find corresponding frame for detections message
                                    found_frame = False
                                    for frame in sorted(self.frame_imgs.keys()):
                                        if int(msg.header.stamp.to_nsec()) > frame-3.333e7 and int(msg.header.stamp.to_nsec()) < frame+3.333e7:
                                            found_frame = True

                                            # Collect tracker_msgs
                                            detections, tracks = self.tracking_callback(msg)
                                            # Append to frame annotations table
                                            self.append_frame_trks(frame, tracks, detections)
                                            # Append to track annotations table
                                            self.append_trk_table(frame, tracks)
                                            # Debugger statement to make monitor data extraction
                                            print("FRAME TIMESTAMP:",frame)
                                            print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                            print('IMG PATH:',self.frame_imgs[frame]['url'])
                                            break

                                    if not found_frame: # Try a wider time window and try to shuffle detection to appropriate frame
                                        for i, frame in enumerate(sorted(self.frame_imgs.keys())):
                                            if int(msg.header.stamp.to_nsec()) > frame-3.783e7 and int(msg.header.stamp.to_nsec()) < frame+3.783e7:
                                                found_frame = True
                                                # Collect tracks
                                                detections, tracks = self.tracking_callback(msg)
                                                # Append to buffer
                                                if len(self.frame_imgs[frame]) < 2: # Check if already assigned detections to this frame
                                                    idx = frame
                                                elif i > 0 and len(self.frame_imgs[self.frame_imgs.keys()[i-1]]) < 2: # Assign detections to the previous frame if empty
                                                    idx = self.frame_imgs.keys()[i-1]
                                                elif i < len(self.frame_imgs.keys())-1 and len(self.frame_imgs[self.frame_imgs.keys()[i+1]]) < 2: # Assign detections to the next frame if empty
                                                    idx = self.frame_imgs.keys()[i+1]
                                                else:
                                                    idx = frame
                                                # Append to frame annotations table
                                                self.append_frame_trks(idx, tracks, detections)
                                                # Append to track annotations table
                                                self.append_trk_table(idx, tracks)
                                                # Debugger statement to make monitor data extraction
                                                print("FRAME TIMESTAMP:",idx)
                                                print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                                print('IMG PATH:', self.frame_imgs[idx]['url'])
                                                break
                        else:
                            anns = darknet_annotator.annotate(os.path.abspath(self.frames_dir), os.path.abspath(self.current_model_cfg_path), os.path.abspath(self.inference_model), os.path.abspath(self.current_data_cfg_path))
                            img_stamps = OrderedDict()

                            for uri, img_detections in sorted(anns):
                                img_timestamp = int(uri.split('_')[-1].replace('.jpg', '').replace('.png', '')) # Get from fpath
                                img_stamps[img_timestamp] = (uri, img_detections)


                            ann_idx = 0
                            for img_timestamp in sorted(img_stamps.keys()):
                                dets = []
                                # Get corresponding image
                                uri = img_stamps[img_timestamp][0]
                                bboxes = BoundingBoxes()
                                bboxes.header.stamp =  datetime.now()
                                bboxes.image_header = self.img_msg_header

                                for detection in img_stamps[img_timestamp][1]:
                                    # Build Detections
                                    bbox = BoundingBox()
                                    bbox.xmin, bbox.ymin = detection['box2d']['x1'], detection['box2d']['y1']
                                    bbox.xmax, bbox.ymax = detection['box2d']['x2'], detection['box2d']['y2']
                                    bbox.Class =  detection['category']
                                    bbox.class_id = 0
                                    bbox.probability = 0.99
                                    bboxes.bounding_boxes.append(bbox)

                                # Collect tracker_msgs
                                self.cv_image = cv2.imread(os.path.abspath(os.path.join(self.frames_dir, uri)))
                                detections, tracks = self.tracking_callback(bboxes)
                                # Append to frame annotations table
                                self.append_frame_trks(img_timestamp, tracks, detections)
                                # Append to track annotations table
                                self.append_trk_table(img_timestamp, tracks)
                                # Debugger statement to make monitor data extraction
                                # print("FRAME TIMESTAMP:",img_timestamp)
                                # print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                # print('IMG PATH:',self.frame_imgs[img_timestamp]['url'])

                        self.save_cache(bag_file)
                    msg_count = 0
                except ROSBagException:
                    print("\n",bag_file, "Failed!  || ")
                    print(str(ROSBagException), '\n')
                    continue

            ## Generate JSON Object in BDD Format ##
            self.generate_flickering_visualizations()

            ## Generate Ground Truth Annotations in BDD Format ##
            self.generate_annotations()

            # Export to Scalabel
            self.export(bag_file)

            # Print Summary
            print("\nFrames Extracted:", len(self.flickering_frame_imgs.keys()))
            print("=================================================================\n\n")
Пример #23
0
    def __init__(self):
        print("init start")
        rospy.init_node('detect_tracking', anonymous=True)

        #bebop info
        #sub
        #self.opencv_img_sub = rospy.Subscriber('/image_raw', Image, self.callback_opencv)
        self.opencv_img_sub = rospy.Subscriber('/bebop/image_raw', Image,
                                               self.callback_opencv)

        #with bebop
        #sub
        self.bebop_mode_sub = rospy.Subscriber('/bebop_mode', UInt8,
                                               self.callback_bebop_mode)
        self.bebop_status_sub = rospy.Subscriber('/bebop_status',
                                                 Int32,
                                                 self.callback_bebop_status,
                                                 queue_size=5)
        self.bebop_req_save_image_sub = rospy.Subscriber(
            '/bebop_req_save_image',
            Int8,
            self.callback_bebop_req_save_image,
            queue_size=5)
        #pub
        self.person_to_drone_Alignment_pub = rospy.Publisher(
            '/person_to_drone_Alignment', String, queue_size=5)
        self.isyn_status_pub = rospy.Publisher('/status_isyn',
                                               Int32,
                                               queue_size=1)
        self.found_person_pub = rospy.Publisher("/found_person",
                                                Int8,
                                                queue_size=1)
        self.isyn_save_image_clear_pub = rospy.Publisher(
            '/isyn_save_image_clear', Int8, queue_size=1)

        #with darknet
        #sub
        self.found_object_sub = rospy.Subscriber('/darknet_ros/found_object',
                                                 Int8,
                                                 self.callback_found_object)
        self.bounding_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                             BoundingBoxes,
                                             self.callback_darknet)
        #pub
        self.image_pub = rospy.Publisher("/send_to_image", Image, queue_size=5)

        #init
        self.selecting_sub_image = "raw"  # you can choose image type "compressed", "raw"
        self.bridge = CvBridge()
        self.box = BoundingBoxes()
        self.detect_object_mid = []
        self.curr_bebop_req_save_image = 0
        #init isyn
        self.curr_isyn_status_msg = 0
        self.curr_bebop_status_msg = 0
        self.found_person_msg = 0
        self.found_object = 0
        #init flag
        self.scshot_clear_msg = 0
        self.error_check_num = 0

        # detect_face_init
        self.known_face_encodings = []
        self.known_face_names = []
        dirname = '/home/ksshin/knowns'
        files = os.listdir(dirname)
        for filename in files:
            self.name, ext = os.path.splitext(filename)
            if ext == '.jpg':
                print(self.name)
                self.known_face_names.append(self.name)
                pathname = os.path.join(dirname, filename)
                img = face_recognition.load_image_file(pathname)
                face_encoding = face_recognition.face_encodings(img)[0]
                self.known_face_encodings.append(face_encoding)

        # Initialize some variables
        self.face_locations = []
        self.face_encodings = []
        self.face_names = []
        self.process_this_frame = True

        # height,width
        self.height = 480
        self.width = 856

        # point center
        self.point_x_center = self.width / 2
        self.point_y_center = self.height / 2

        print("init clear")
Пример #24
0
    def __init__(self):
        # parameter for calculation
        self.min_line_length = 0.5
        self.max_line_length = 7.0
        self.line_step = 0.02

        # initialize ROS node
        rospy.init_node('localize_plant_node', anonymous=True)
        # get node name
        self.ns_node = 'localize_plant/'   # rospy.get_namespace()

        # define variables
        self.current_pose = PoseStamped()
        self.current_yaw = 0.0
        self.slam_control = False
        self.first_pointcould = PointCloud()
        header = Header()
        header.stamp = rospy.Time.now()
        header.frame_id = 'map'
        self.first_pointcould.header = header
        self.points = None
        self.first_pointcould.header = header
        self.second_pointcould = PointCloud()
        self.second_pointcould.header.frame_id = 'map'
        self.line_point = Point32()
        self.ns_tello = 'tello/'
        self.ns_darknet = 'darknet_ros/'
        self.ns_plant_loc = 'plant_localizer/'

        # 90 deg around y-axis
        self.pos_listener = TransformListener()
        self.rot_camera= np.array([[0.0, 0.0, -1.0],
                                   [0.0, 1.0, 0.0],
                                   [1.0, 0.0, 0.0]],dtype=np.float32)

        self.slam_pose_topic = 'orb_slam_2_ros/pose'

        self.msg_boundingBoxes = BoundingBoxes()

        # ROS Subs und Pubs initialize
        rospy.Subscriber(self.ns_tello+'odom', Odometry, self.cb_odom, queue_size=1)
        rospy.Subscriber(self.ns_darknet+'found_object', ObjectCount, self.cb_objects, queue_size=1)
        rospy.Subscriber(self.ns_darknet+'bounding_boxes', BoundingBoxes, self.cb_bounding_boxes, queue_size=1)

        self.pub_first_pointcloud = rospy.Publisher(self.ns_plant_loc + "/label_1_pointcloud", PointCloud, queue_size=10)
        self.pub_second_pointcloud = rospy.Publisher(self.ns_plant_loc + "/label_2_pointcloud", PointCloud, queue_size=10)

        # container for
        plant_locations= []

        # camera matrix for tello single cam
        """              f  x  0 
         camera_matrix = 0  f  y
                         cx cy 1       
        
        f: focal length
        c: optical center     
        """
        self.camera_matrix = np.array([[924.873180, 0.000000  , 486.997346],
                                        [0.000000  , 923.504522, 364.308527],
                                        [0.000000  , 0.000000  , 1.000000]], dtype=np.float32)
        self.dist_coeffs = np.array([-0.034749, 0.071514, 0.000363, 0.003131, 0.000000], dtype=np.float32)

        # invert camera matrix for projetion to 3D coordinates
        self.camera_matrix_inv = np.linalg.inv(self.camera_matrix)


        test = np.zeros((3, 1, 2), dtype=np.float32)
        test[0,:] = [486.99, 364.30]
        test[1, :] = [0.0, 0.0]
        test[2, :] = [600.0, 50.0]
        print(test)
        test = self.undistort_and_project(test)
        print(test)
        print('print init done')
        self.pub_first_pointcloud.publish(self.first_pointcould)
        # keep process alive
        rospy.spin()
Пример #25
0
                        2)  # Draw label text

            boundingbox = BoundingBox(xmin=xmin,
                                      xmax=xmax,
                                      ymin=ymin,
                                      ymax=ymax,
                                      id=id_count,
                                      Class=object_name,
                                      probability=scores[i])
            Boxes.append(boundingbox)
            id_count = id_count + 1

    ################################################################
    #
    # BoundingBox Publish Start
    #
    ################################################################

    array_forPublish = BoundingBoxes(bounding_boxes=Boxes)
    motor.publish(array_forPublish)

    # All the results have been drawn on the frame, so it's time to display it.
    cv2.imshow('Object detector', frame)

    # Press 'q' to quit
    if cv2.waitKey(1) == ord('q'):
        break

# Clean up
cv2.destroyAllWindows()
cap.release()
Пример #26
0
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Bool
from darknet_ros_msgs.msg import BoundingBox
from darknet_ros_msgs.msg import BoundingBoxes
from geometry_msgs.msg import Twist

detectedBoxes = BoundingBoxes()
detectedBoxes.header.seq = 1

corridorBox = BoundingBox()


def callback(data):
    global automode
    if (automode == False):
        pub_pidEn.publish(0)
        return None

    global detectedBoxes
    detectedBoxes = data
    #rospy.loginfo(detectedBoxes.header.seq)


def timercb(event):
    global seq_prev
    global detectedBoxes
    global automode
    global state_prev
    global state_pp