示例#1
0
def get_test_bounding_boxes():

    Helipad = BoundingBox()
    Helipad.probability = 0.5
    Helipad.xmin = 312
    Helipad.ymin = 120
    Helipad.xmax = 337
    Helipad.ymax = 148
    Helipad.id = 2
    Helipad.Class = "Helipad"

    H = BoundingBox()
    H.probability = 0.5
    H.xmin = 320
    H.ymin = 128
    H.xmax = 330
    H.ymax = 138
    H.id = 0
    H.Class = "H"

    Arrow = BoundingBox()
    Arrow.probability = 0.5
    Arrow.xmin = 333
    Arrow.ymin = 140
    Arrow.xmax = 335
    Arrow.ymax = 143
    Arrow.id = 1
    Arrow.Class = "Arrow"

    bbs = BoundingBoxes()
    bbs.bounding_boxes = [Helipad, H, Arrow]
    return bbs
示例#2
0
    def msgDN(self, image, boxes, scores, classes):
        """
        Create the Object Detector message to publish with ROS

        This uses the Darknet BoundingBox[es] messages
        """
        msg = BoundingBoxes()
        msg.header = image.header
        scores_above_threshold = np.where(scores > self.threshold)[1]

        for s in scores_above_threshold:
            # Get the properties
            bb = boxes[0, s, :]
            sc = scores[0, s]
            cl = classes[0, s]

            # Create the bounding box message
            detection = BoundingBox()
            detection.Class = self.category_index[int(cl)]['name']
            detection.probability = sc
            detection.xmin = int((image.width - 1) * bb[1])
            detection.ymin = int((image.height - 1) * bb[0])
            detection.xmax = int((image.width - 1) * bb[3])
            detection.ymax = int((image.height - 1) * bb[2])

            msg.boundingBoxes.append(detection)

        return msg
示例#3
0
def process(msg):
    t_begin = rospy.get_time()
    cv_img = bridge.imgmsg_to_cv2(msg, "bgr8")
    cv2.imwrite(img_path, cv_img)

    with open(img_path, "rb") as img:
        img_bytes = img.read()
        server.send(darksocket.pack(darksocket.Packet.IMAGE, img_bytes))

    res = server.recv()
    t_end = rospy.get_time()

    if telemetry:
        cls()
        rospy.loginfo("%s FPS" % (1 / (t_end - t_begin)))

    if res == darksocket.StreamEvent.DETECTIONS:
        dets = Detections()
        dets.image = msg

        for det in server.stream.detections:
            bbox = BoundingBox()
            bbox.Class = det[0]
            bbox.probability = det[1]
            bbox.xmin = det[2]
            bbox.ymin = det[3]
            bbox.xmax = det[2] + det[4]
            bbox.ymax = det[3] + det[5]

            dets.bboxes.append(bbox)

            if telemetry:
                rospy.loginfo("%s %s" % (bbox.Class, bbox.probability))

        pub_detections.publish(dets)
示例#4
0
    def callback(self, data):
        print('\n', 'Predicting')
        # try:
        #   cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        # except CvBridgeError as e:
        #   print(e)
        # cv_image = cv2.copyMakeBorder(cv_image, 100, 100, 100, 100, cv2.BORDER_CONSTANT, value=[85, 120, 68])
        # cv_image = cv2.imread("pic_buffer/R.png")
        # cv2.imwrite("pic_buffer/R.png", cv_image)

        # im = array_to_image(cv_image)
        # dn.rgbgr_image(im)
        # # print(dn.detect(net, meta, im))
        # print("===")
        r = []
        img = cv2.imread("pic_buffer/1_R.png")
        sp = img.shape
        r += self.subpredict(0, int(sp[0] / 2), 0, sp[1])
        r += self.subpredict(int(sp[0] / 2), sp[0], 0, sp[1])
        r += self.subpredict(int(sp[0] / 4), int(sp[0] * 3 / 4), 0, sp[1])
        r += self.subpredict(0, sp[0], 0, int(sp[1] / 2))
        r += self.subpredict(0, sp[0], int(sp[1] / 2), sp[1])
        r += self.subpredict(0, sp[0], int(sp[1] / 3), int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], 0, int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], int(sp[1] / 3), sp[1])
        r += self.subpredict(0, int(sp[0] * 2 / 3), 0, sp[1])
        r += self.subpredict(int(sp[0] / 3), sp[0], 0, sp[1])
        r = self.removeDuplicate(r)
        print()
        img = self.drawCrossings(img, r)
        cv2.imwrite("pic_buffer/2_D.png", img)

        boxes = BoundingBoxes()
        print('\n', 'Predict result:')
        for i in range(len(r)):
            box = BoundingBox()
            print('    ', r[i][0], r[i][1] * 100, '%')
            box.Class = r[i][0].decode('utf-8')
            box.probability = r[i][1]
            box.id = i
            box.xmin = int(r[i][2][0])
            box.ymin = int(r[i][2][1])
            box.xmax = int(r[i][2][2])
            box.ymax = int(r[i][2][3])
            boxes.bounding_boxes.append(box)
            # if b'endpoint' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            print('    ', int(r[i][2][0]), int(r[i][2][1]), int(r[i][2][2]),
                  int(r[i][2][3]))
            # if b'cross' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            #     # print('\t', r[i][2])
        print('\n', 'Darknet waiting for rgb_img')
        time.sleep(0.5)
        try:
            self.boxes_pub.publish(boxes)
        except CvBridgeError as e:
            print(e)
示例#5
0
    def camera_image_callback(self, image, camera):
        """Gets images from camera to generate detections on

        Computes where the bounding boxes should be in the image, and
        fakes YOLO bounding boxes output as well as publishing a debug
        image showing where the detections are if turned on

        Parameters
        ----------
        image : sensor_msgs.Image
            The image from the camera to create detections for
        camera : Camera
            Holds camera parameters for projecting points

        """

        cv_image = self.bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")

        if camera.is_ready():

            # Fake darknet yolo detections message
            bounding_boxes = BoundingBoxes()

            bounding_boxes.header = image.header
            bounding_boxes.image_header = image.header
            bounding_boxes.image = image
            bounding_boxes.bounding_boxes = []

            for _, obj in self.objects.iteritems():

                detections = self.get_detections(obj, camera, image.header.stamp)

                if detections is not None:

                    if camera.debug_image_pub:

                        self.render_debug_context(cv_image, detections, camera)

                    for det in detections:

                        bounding_box = BoundingBox()

                        bounding_box.Class = det[2]
                        bounding_box.probability = 1.0
                        bounding_box.xmin = int(det[0][0])
                        bounding_box.xmax = int(det[0][1])
                        bounding_box.ymin = int(det[0][2])
                        bounding_box.ymax = int(det[0][3])
                        bounding_boxes.bounding_boxes.append(bounding_box)

            # Only publish detection if there are boxes in it
            if bounding_boxes.bounding_boxes:
                self.darknet_detection_pub.publish(bounding_boxes)

        if camera.debug_image_pub:
            image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding="rgb8")
            camera.debug_image_pub.publish(image_message)
示例#6
0
    def image_callback(self, msg):
        # print("Received an image!")
        try:
            # Convert your ROS Image message to numpy image data type
            cv2_img = np.frombuffer(msg.data, dtype=np.uint8).reshape(
                msg.height, msg.width, -1)
            image_ori = cv2.resize(cv2_img, (512, 512))
            #remove alpha channel if it exists
            if image_ori.shape[-1] == 4:
                image_ori = image_ori[..., :3]
            #normalize the image
            image = self.normalize(image_ori)

            with torch.no_grad():
                image = torch.Tensor(image)
                if torch.cuda.is_available():
                    image = image.cuda()
                boxes = self.model_.get_boxes(
                    image.permute(2, 0, 1).unsqueeze(0))

            Boxes_msg = BoundingBoxes()
            Boxes_msg.image_header = msg.header
            Boxes_msg.header.stamp = rospy.Time.now()
            for box in boxes[0]:
                # print(box.confidence)
                confidence = float(box.confidence)
                box = (box.box * torch.Tensor([512] * 4)).int().tolist()
                if confidence > 0.35:
                    cv2.rectangle(image_ori, (box[0], box[1]),
                                  (box[2], box[3]), (255, 0, 0), 2)
                    cv2.putText(image_ori,
                                str(confidence)[:4], (box[0] - 2, box[1] - 2),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                    # msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
                    detection_box = BoundingBox()
                    # detection_box.Class=str(box.class_id)
                    detection_box.xmin = box[0]
                    detection_box.ymin = box[1]
                    detection_box.xmax = box[2]
                    detection_box.ymax = box[3]
                    detection_box.probability = confidence
                    Boxes_msg.bounding_boxes.append(detection_box)

            msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
            self.img_pub.publish(msg_frame)
            self.ret_pub.publish(Boxes_msg)

        except CvBridgeError:
            print("error")
        if self.savefigure:
            cv2.imwrite('new_image.jpeg', cv2_img)
            print('save picture')
            # self.detected_msg.data="Take a photo"
            self.savefigure = False
示例#7
0
    def create_d_msgs_box(self, track):
        one_box = BoundingBox()

        one_box.id = int(track.id[:3], 16)
        one_box.class_id = "face"
        one_box.probability = float(track.score)
        one_box.xmin = int(track.box[0])
        one_box.ymin = int(track.box[1])
        one_box.xmax = int(track.box[2])
        one_box.ymax = int(track.box[3])

        return one_box
示例#8
0
def test1():
    g = GateLocalizer()
    image_dims = (480,752)
    # image is 480x752

    # does x go along the rows or columns??
    
    boxes = []
    boxLeft = BoundingBox()
    boxLeft.xmin = 607
    boxLeft.ymin = 154
    boxLeft.xmax = 629
    boxLeft.ymax = 410

    boxRight = BoundingBox()
    boxRight.xmin = 206
    boxRight.ymin = 171
    boxRight.xmax = 231
    boxRight.ymax = 391
    
    boxes.append(boxLeft)
    boxes.append(boxRight)
    print(g.midpoint(boxes, image_dims))
示例#9
0
 def _write_message(self, detection_results, boxes, scores, classes):
     """ populate output message with input header and bounding boxes information """
     if boxes is None:
         return None
     for box, score, category in zip(boxes, scores, classes):
         # Populate darknet message
         left, bottom, right, top = box
         detection_msg = BoundingBox()
         detection_msg.xmin = left
         detection_msg.xmax = right
         detection_msg.ymin = top
         detection_msg.ymax = bottom
         detection_msg.probability = score
         detection_msg.Class = category
         detection_results.bounding_boxes.append(detection_msg)
     return detection_results
示例#10
0
 def GetBBoxesMsg(self, label_file: str):
     output_bboxes_msg = BoundingBoxes()
     with open(label_file, 'r') as file:
         id_ = 0
         for line in file:
             gt = line.split()
             bbox_msg = BoundingBox()
             #print(gt)
             bbox_msg.xmin = int(gt[5])
             bbox_msg.ymin = int(gt[6])
             bbox_msg.xmax = int(gt[7])
             bbox_msg.ymax = int(gt[8])
             bbox_msg.id = id_
             bbox_msg.Class = gt[0]
             id_ += 1
             output_bboxes_msg.bounding_boxes.append(bbox_msg)
     return output_bboxes_msg
示例#11
0
def msg(image, boxes):
    """
    Create the Darknet BoundingBox[es] messages
    """
    msg = BoundingBoxes()
    msg.header = image.header

    for (x, y, w, h) in boxes:
        detection = BoundingBox()
        detection.Class = "human"
        detection.probability = 1  # Not really...
        detection.xmin = x
        detection.ymin = y
        detection.xmax = x + w
        detection.ymax = y + h
        msg.boundingBoxes.append(detection)

    return msg
    def test_tracker_callback(self):
        args = None
        rclpy.init(args=args)
        node = ObjectTrackingNode()

        bboxes_msg = BoundingBoxes()

        for track in self.tracks:
            bbox_msg = BoundingBox()

            bbox_msg.xmin = track["x1"]
            bbox_msg.ymin = track["y1"]
            bbox_msg.xmax = track["x2"]
            bbox_msg.ymax = track["y2"]

            bboxes_msg.bounding_boxes.append(bbox_msg)

        node.tracker_callback(bboxes_msg)
示例#13
0
    def boxcallback(self, msg):
        dets = []
        for i in range(len(msg.bounding_boxes)):
            bbox = msg.bounding_boxes[i]
            dets.append(
                np.array([
                    bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax,
                    bbox.probability
                ]))
        dets = np.array(dets)
        start_time = time.time()
        trackers = self.update(dets)
        cycle_time = time.time() - start_time
        print(cycle_time)

        r = BoundingBoxes()
        rb = BoundingBox()
        for d in range(len(trackers)):
            rb.probability = 1
            rb.xmin = trackers[d, 0]
            rb.ymin = trackers[d, 1]
            rb.xmax = trackers[d, 2]
            rb.ymax = trackers[d, 3]
            rb.id = trackers[d, 4]
            rb.Class = 'tracked'
            r.bounding_boxes.append(rb)
            if self.img_in == 1 and self.display:
                res = trackers[d].astype(np.int32)
                rgb = self.colours[res[4] % 32, :] * 255
                cv2.rectangle(self.img, (res[0], res[1]), (res[2], res[3]),
                              (rgb[0], rgb[1], rgb[2]), 2)
                cv2.putText(self.img, "ID : %d" % (res[4]), (res[0], res[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 85, 200), 2)
        if self.img_in == 1 and self.display:
            try:
                self.image = self.bridge.cv2_to_imgmsg(self.img, "bgr8")
                self.image.header.stamp = rospy.Time.now()
                self.pubimage.publish(self.image)
            except CvBridgeError as e:
                pass
        r.header.stamp = rospy.Time.now()
        self.pubb.publish(r)
        return
示例#14
0
    mot_tracker = Sort(max_age=200, min_hits=1) #create instance of the SORT tracker

    while True:
        try:
            start_time = time.time()
            if mot_tracker.bbox_checkin==1:
                trackers = mot_tracker.update(mot_tracker.dets)
                mot_tracker.bbox_checkin=0
            else:
                trackers = mot_tracker.update(np.empty((0,5)))

            r = BoundingBoxes()
            for d in range(len(trackers)):
                rb = BoundingBox()
                rb.probability=1
                rb.xmin = int(trackers[d,0])
                rb.ymin = int(trackers[d,1])
                rb.xmax = int(trackers[d,2])
                rb.ymax = int(trackers[d,3])
                rb.id = int(trackers[d,4])
                rb.Class = 'tracked'
                r.bounding_boxes.append(rb)
                if mot_tracker.img_in==1 and mot_tracker.display:
                    res = trackers[d].astype(np.int32)
                    rgb=colours[res[4]%32,:]*255
                    cv2.rectangle(mot_tracker.img, (res[0],res[1]), (res[2],res[3]), (rgb[0],rgb[1],rgb[2]), 6)
                    cv2.putText(mot_tracker.img, "ID : %d"%(res[4]), (res[0],res[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.6, (200,85,200), 6)
            if mot_tracker.img_in==1 and mot_tracker.display:
                try : 
                    mot_tracker.image = mot_tracker.bridge.cv2_to_imgmsg(mot_tracker.img, "bgr8")
                    mot_tracker.image.header.stamp = rospy.Time.now()
示例#15
0
                       min_hits=1)  #create instance of the SORT tracker

    while True:
        try:
            start_time = time.time()
            if mot_tracker.bbox_checkin == 1:
                trackers = mot_tracker.update(mot_tracker.dets)
                mot_tracker.bbox_checkin = 0
            else:
                trackers = mot_tracker.update(np.empty((0, 5)))

            r = BoundingBoxes()
            for d in range(len(trackers)):
                rb = BoundingBox()
                rb.probability = 1
                rb.xmin = trackers[d, 0]
                rb.ymin = trackers[d, 1]
                rb.xmax = trackers[d, 2]
                rb.ymax = trackers[d, 3]
                rb.id = trackers[d, 4]
                rb.Class = 'tracked'
                r.bounding_boxes.append(rb)
                if mot_tracker.img_in == 1 and mot_tracker.display:
                    res = trackers[d].astype(np.int32)
                    rgb = colours[res[4] % 32, :] * 255
                    cv2.rectangle(mot_tracker.img, (res[0], res[1]),
                                  (res[2], res[3]), (rgb[0], rgb[1], rgb[2]),
                                  6)
                    cv2.putText(mot_tracker.img, "ID : %d" % (res[4]),
                                (res[0], res[1]), cv2.FONT_HERSHEY_SIMPLEX,
                                1.6, (200, 85, 200), 6)
示例#16
0
    def __raw_image_callback(self, data):
        self.image_header = data.header
        raw_image = self.bridge.imgmsg_to_cv2(data).astype(np.uint8)
        if len(raw_image.shape) <= 2:
            return
        self.image = raw_image

        # 1. get SORT bounding boxes

        new_bboxes_sort = []
        if len(self.new_bboxes) != 0:
            new_bboxes = copy.deepcopy(self.new_bboxes)
            self.new_bboxes = []
            for bbox in new_bboxes:
                new_bboxes_sort.append(
                    np.asarray((bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax,
                                bbox.probability)))
            new_bboxes_sort = np.asarray(new_bboxes_sort)
            # 2. update tracker
            trackers = self.mot_tracker.update(new_bboxes_sort)
            matched, _, _ = associate_detections_to_trackers(
                new_bboxes_sort, trackers, 0.3)

        else:
            # 2. update tracker
            trackers = self.mot_tracker.update()
            if len(self.bboxes) == 0:
                matched = np.empty((0, 2), dtype=int)
            else:
                new_bboxes = copy.deepcopy(self.bboxes)
                for bbox in new_bboxes:
                    new_bboxes_sort.append(
                        np.asarray((bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax,
                                    bbox.probability)))
                new_bboxes_sort = np.asarray(new_bboxes_sort)
                matched, _, _ = associate_detections_to_trackers(
                    new_bboxes_sort, trackers, 0.3)

        # 3. update current bounding boxes & extract tracking trace
        # 1). id is the id from tracker;
        # 2). class is the latest class from detection;
        # 3). probability is the latest probability from detection;
        self.bboxes = []
        ids = []
        if trackers.shape[0] != 0:
            for tracker in trackers:
                bbox = BoundingBox()
                print(tracker)
                bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax = int(
                    tracker[0]), int(tracker[1]), int(tracker[2]), int(
                        tracker[3])
                bbox.id = int(tracker[4])
                self.bboxes.append(bbox)
                id = int(bbox.id)
                center = (int((bbox.xmin + bbox.xmax) / 2.),
                          int((bbox.ymin + bbox.ymax) / 2.))
                if self.traces.get(id) == None:
                    self.traces[id] = [center]
                else:
                    self.traces[id].append(center)
                ids.append(id)

        if matched.shape[0] != 0:
            for pair in matched:
                original_id = pair[0]
                new_id = pair[1]
                original_bbox = new_bboxes[original_id]
                self.bboxes[new_id].Class = original_bbox.Class
                self.bboxes[new_id].probability = original_bbox.probability

        del_ids = []
        for k in self.traces:
            if k not in ids:
                del_ids.append(k)

        for k in del_ids:
            del self.traces[k]

        # 4. publish current bounding boxes
        self.__publish_bbox()

        # 5. publish tracking image
        self.__publish_tracking_image(self.image, self.bboxes)
示例#17
0
    def process_frames(self):
        print("\nMining Frames for Flicker Detection ......\o/ \o/ \o/ \o/....... \n")
        rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag")))
        # Init extraction loop
        msg_count = 0


        # Iterate through bags
        rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag")))
        for bag_file in tqdm(rosbag_files, unit='bag'):
            self.frame_imgs, self.trk_table, self.frame_count = self.load_cache(bag_file)
            self.bag_keypath = '_'.join(os.path.normpath(bag_file).split(os.sep))

            if not self.frame_imgs or not self.trk_table:
                # Open bag file. If corrupted, skip the file
                try:
                    with rosbag.Bag(bag_file, 'r') as bag:
                        # Check if desired topics exists in bags
                        recorded_topics = bag.get_type_and_topic_info()[1]
                        if not all(topic in recorded_topics for topic in ( DETECTION_TOPIC, CAR_STATE_TOPIC, IMAGE_STREAM_TOPIC)):
                            print("ERROR: Specified topics not in bag file:", bag_file, ".Skipping bag!")
                            continue

                        gps = ""
                        v_ego = 0.0
                        self.cv_image = None
                        # Get Detections
                        time_nsecs = []
                        self.trk_table = OrderedDict()
                        self.frame_imgs = OrderedDict()
                        self.frame_trks = OrderedDict()
                        self.flickering_frame_imgs = OrderedDict()
                        self.flickering_frame_trks = OrderedDict()
                        for topic, msg, t in bag.read_messages():
                            if topic == CAR_STATE_TOPIC:
                                self.gps = msg.GPS
                                self.v_ego = msg.v_ego
                            if topic == IMAGE_STREAM_TOPIC:
                                if msg_count % self.skip_rate == 0:

                                    self.cv_image = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8")
                                    self.img_msg_header = msg.header
                                    image_name = "frame%010d_%s.jpg" % (self.frame_count, str(msg.header.stamp.to_nsec()))
                                    img_path = os.path.join(self.frames_dir, image_name)
                                    cv2.imwrite(img_path, self.cv_image)

                                    uri = img_path
                                    img_key = int(msg.header.stamp.to_nsec())
                                    fname = self.path_leaf(uri)
                                    vid_name = ''
                                    readable_time = self.format_from_nanos(img_key).split(' ')
                                    if ':' in readable_time and ' ' not in readable_time.split(':')[0]:
                                        hour = int(readable_time.split(':')[0])
                                    else:
                                        hour = 12

                                    if (hour > 4 and hour < 6) or (hour > 17 and hour < 19):
                                        timeofday = 'dawn/dusk'
                                    elif hour > 6 and hour < 17:
                                        timeofday = 'daytime'
                                    else:
                                        timeofday = 'night'


                                    scene = 'highway'
                                    timestamp = img_key
                                    dataset_path = img_path
                                    if self.gps:
                                        gps = pynmea2.parse(self.gps)
                                        lat = gps.lat
                                        long = gps.lon
                                    else:
                                        lat = None
                                        long = None
                                    height, width, depth = self.cv_image.shape

                                    self.frame_imgs[img_key] = {'url': img_path,
                                                             'name': img_path,
                                                             'width': width,
                                                             'height': height,
                                                             'index': self.frame_count,
                                                             'timestamp': timestamp,
                                                             'videoName':vid_name,
                                                             'attributes': {'weather': 'clear', 'scene': scene, 'timeofday': timeofday},
                                                             'labels': []
                                                            }
                                    self.frame_trks[img_key] = []

                                    msg_count += 1
                                    self.frame_count += 1

                        ## Get Tracking Data ##
                        if self.use_bag_detections:
                            for topic, msg, t in bag.read_messages():
                                if topic == DETECTION_TOPIC:
                                    # Find corresponding frame for detections message
                                    found_frame = False
                                    for frame in sorted(self.frame_imgs.keys()):
                                        if int(msg.header.stamp.to_nsec()) > frame-3.333e7 and int(msg.header.stamp.to_nsec()) < frame+3.333e7:
                                            found_frame = True

                                            # Collect tracker_msgs
                                            detections, tracks = self.tracking_callback(msg)
                                            # Append to frame annotations table
                                            self.append_frame_trks(frame, tracks, detections)
                                            # Append to track annotations table
                                            self.append_trk_table(frame, tracks)
                                            # Debugger statement to make monitor data extraction
                                            print("FRAME TIMESTAMP:",frame)
                                            print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                            print('IMG PATH:',self.frame_imgs[frame]['url'])
                                            break

                                    if not found_frame: # Try a wider time window and try to shuffle detection to appropriate frame
                                        for i, frame in enumerate(sorted(self.frame_imgs.keys())):
                                            if int(msg.header.stamp.to_nsec()) > frame-3.783e7 and int(msg.header.stamp.to_nsec()) < frame+3.783e7:
                                                found_frame = True
                                                # Collect tracks
                                                detections, tracks = self.tracking_callback(msg)
                                                # Append to buffer
                                                if len(self.frame_imgs[frame]) < 2: # Check if already assigned detections to this frame
                                                    idx = frame
                                                elif i > 0 and len(self.frame_imgs[self.frame_imgs.keys()[i-1]]) < 2: # Assign detections to the previous frame if empty
                                                    idx = self.frame_imgs.keys()[i-1]
                                                elif i < len(self.frame_imgs.keys())-1 and len(self.frame_imgs[self.frame_imgs.keys()[i+1]]) < 2: # Assign detections to the next frame if empty
                                                    idx = self.frame_imgs.keys()[i+1]
                                                else:
                                                    idx = frame
                                                # Append to frame annotations table
                                                self.append_frame_trks(idx, tracks, detections)
                                                # Append to track annotations table
                                                self.append_trk_table(idx, tracks)
                                                # Debugger statement to make monitor data extraction
                                                print("FRAME TIMESTAMP:",idx)
                                                print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                                print('IMG PATH:', self.frame_imgs[idx]['url'])
                                                break
                        else:
                            anns = darknet_annotator.annotate(os.path.abspath(self.frames_dir), os.path.abspath(self.current_model_cfg_path), os.path.abspath(self.inference_model), os.path.abspath(self.current_data_cfg_path))
                            img_stamps = OrderedDict()

                            for uri, img_detections in sorted(anns):
                                img_timestamp = int(uri.split('_')[-1].replace('.jpg', '').replace('.png', '')) # Get from fpath
                                img_stamps[img_timestamp] = (uri, img_detections)


                            ann_idx = 0
                            for img_timestamp in sorted(img_stamps.keys()):
                                dets = []
                                # Get corresponding image
                                uri = img_stamps[img_timestamp][0]
                                bboxes = BoundingBoxes()
                                bboxes.header.stamp =  datetime.now()
                                bboxes.image_header = self.img_msg_header

                                for detection in img_stamps[img_timestamp][1]:
                                    # Build Detections
                                    bbox = BoundingBox()
                                    bbox.xmin, bbox.ymin = detection['box2d']['x1'], detection['box2d']['y1']
                                    bbox.xmax, bbox.ymax = detection['box2d']['x2'], detection['box2d']['y2']
                                    bbox.Class =  detection['category']
                                    bbox.class_id = 0
                                    bbox.probability = 0.99
                                    bboxes.bounding_boxes.append(bbox)

                                # Collect tracker_msgs
                                self.cv_image = cv2.imread(os.path.abspath(os.path.join(self.frames_dir, uri)))
                                detections, tracks = self.tracking_callback(bboxes)
                                # Append to frame annotations table
                                self.append_frame_trks(img_timestamp, tracks, detections)
                                # Append to track annotations table
                                self.append_trk_table(img_timestamp, tracks)
                                # Debugger statement to make monitor data extraction
                                # print("FRAME TIMESTAMP:",img_timestamp)
                                # print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec()))
                                # print('IMG PATH:',self.frame_imgs[img_timestamp]['url'])

                        self.save_cache(bag_file)
                    msg_count = 0
                except ROSBagException:
                    print("\n",bag_file, "Failed!  || ")
                    print(str(ROSBagException), '\n')
                    continue

            ## Generate JSON Object in BDD Format ##
            self.generate_flickering_visualizations()

            ## Generate Ground Truth Annotations in BDD Format ##
            self.generate_annotations()

            # Export to Scalabel
            self.export(bag_file)

            # Print Summary
            print("\nFrames Extracted:", len(self.flickering_frame_imgs.keys()))
            print("=================================================================\n\n")