def test(): rospy.init_node('localizer_client') l = Localizer() # Fake data bb = BoundingBoxes() bb.bounding_boxes = [] bb.image = Image() b1 = BoundingBox() b1.Class = "start_gate_pole" b2 = BoundingBox() b2.Class = "dice1" b3 = BoundingBox() b3.Class = "dice2" b4 = BoundingBox() b4.Class = "random" bb.bounding_boxes.append(b1) bb.bounding_boxes.append(b2) bb.bounding_boxes.append(b3) bb.bounding_boxes.append(b4) l.boxes_received(bb) try: rospy.spin() except rospy.ROSInterruptException: sys.exit()
def get_test_bounding_boxes(): Helipad = BoundingBox() Helipad.probability = 0.5 Helipad.xmin = 312 Helipad.ymin = 120 Helipad.xmax = 337 Helipad.ymax = 148 Helipad.id = 2 Helipad.Class = "Helipad" H = BoundingBox() H.probability = 0.5 H.xmin = 320 H.ymin = 128 H.xmax = 330 H.ymax = 138 H.id = 0 H.Class = "H" Arrow = BoundingBox() Arrow.probability = 0.5 Arrow.xmin = 333 Arrow.ymin = 140 Arrow.xmax = 335 Arrow.ymax = 143 Arrow.id = 1 Arrow.Class = "Arrow" bbs = BoundingBoxes() bbs.bounding_boxes = [Helipad, H, Arrow] return bbs
def msgDN(self, image, boxes, scores, classes): """ Create the Object Detector message to publish with ROS This uses the Darknet BoundingBox[es] messages """ msg = BoundingBoxes() msg.header = image.header scores_above_threshold = np.where(scores > self.threshold)[1] for s in scores_above_threshold: # Get the properties bb = boxes[0, s, :] sc = scores[0, s] cl = classes[0, s] # Create the bounding box message detection = BoundingBox() detection.Class = self.category_index[int(cl)]['name'] detection.probability = sc detection.xmin = int((image.width - 1) * bb[1]) detection.ymin = int((image.height - 1) * bb[0]) detection.xmax = int((image.width - 1) * bb[3]) detection.ymax = int((image.height - 1) * bb[2]) msg.boundingBoxes.append(detection) return msg
def process(msg): t_begin = rospy.get_time() cv_img = bridge.imgmsg_to_cv2(msg, "bgr8") cv2.imwrite(img_path, cv_img) with open(img_path, "rb") as img: img_bytes = img.read() server.send(darksocket.pack(darksocket.Packet.IMAGE, img_bytes)) res = server.recv() t_end = rospy.get_time() if telemetry: cls() rospy.loginfo("%s FPS" % (1 / (t_end - t_begin))) if res == darksocket.StreamEvent.DETECTIONS: dets = Detections() dets.image = msg for det in server.stream.detections: bbox = BoundingBox() bbox.Class = det[0] bbox.probability = det[1] bbox.xmin = det[2] bbox.ymin = det[3] bbox.xmax = det[2] + det[4] bbox.ymax = det[3] + det[5] dets.bboxes.append(bbox) if telemetry: rospy.loginfo("%s %s" % (bbox.Class, bbox.probability)) pub_detections.publish(dets)
def callback(self, data): print('\n', 'Predicting') # try: # cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") # except CvBridgeError as e: # print(e) # cv_image = cv2.copyMakeBorder(cv_image, 100, 100, 100, 100, cv2.BORDER_CONSTANT, value=[85, 120, 68]) # cv_image = cv2.imread("pic_buffer/R.png") # cv2.imwrite("pic_buffer/R.png", cv_image) # im = array_to_image(cv_image) # dn.rgbgr_image(im) # # print(dn.detect(net, meta, im)) # print("===") r = [] img = cv2.imread("pic_buffer/1_R.png") sp = img.shape r += self.subpredict(0, int(sp[0] / 2), 0, sp[1]) r += self.subpredict(int(sp[0] / 2), sp[0], 0, sp[1]) r += self.subpredict(int(sp[0] / 4), int(sp[0] * 3 / 4), 0, sp[1]) r += self.subpredict(0, sp[0], 0, int(sp[1] / 2)) r += self.subpredict(0, sp[0], int(sp[1] / 2), sp[1]) r += self.subpredict(0, sp[0], int(sp[1] / 3), int(sp[1] * 2 / 3)) r += self.subpredict(0, sp[0], 0, int(sp[1] * 2 / 3)) r += self.subpredict(0, sp[0], int(sp[1] / 3), sp[1]) r += self.subpredict(0, int(sp[0] * 2 / 3), 0, sp[1]) r += self.subpredict(int(sp[0] / 3), sp[0], 0, sp[1]) r = self.removeDuplicate(r) print() img = self.drawCrossings(img, r) cv2.imwrite("pic_buffer/2_D.png", img) boxes = BoundingBoxes() print('\n', 'Predict result:') for i in range(len(r)): box = BoundingBox() print(' ', r[i][0], r[i][1] * 100, '%') box.Class = r[i][0].decode('utf-8') box.probability = r[i][1] box.id = i box.xmin = int(r[i][2][0]) box.ymin = int(r[i][2][1]) box.xmax = int(r[i][2][2]) box.ymax = int(r[i][2][3]) boxes.bounding_boxes.append(box) # if b'endpoint' == r[i][0]: # print('\t', r[i][0], r[i][1]*100, '%') print(' ', int(r[i][2][0]), int(r[i][2][1]), int(r[i][2][2]), int(r[i][2][3])) # if b'cross' == r[i][0]: # print('\t', r[i][0], r[i][1]*100, '%') # # print('\t', r[i][2]) print('\n', 'Darknet waiting for rgb_img') time.sleep(0.5) try: self.boxes_pub.publish(boxes) except CvBridgeError as e: print(e)
def camera_image_callback(self, image, camera): """Gets images from camera to generate detections on Computes where the bounding boxes should be in the image, and fakes YOLO bounding boxes output as well as publishing a debug image showing where the detections are if turned on Parameters ---------- image : sensor_msgs.Image The image from the camera to create detections for camera : Camera Holds camera parameters for projecting points """ cv_image = self.bridge.imgmsg_to_cv2(image, desired_encoding="rgb8") if camera.is_ready(): # Fake darknet yolo detections message bounding_boxes = BoundingBoxes() bounding_boxes.header = image.header bounding_boxes.image_header = image.header bounding_boxes.image = image bounding_boxes.bounding_boxes = [] for _, obj in self.objects.iteritems(): detections = self.get_detections(obj, camera, image.header.stamp) if detections is not None: if camera.debug_image_pub: self.render_debug_context(cv_image, detections, camera) for det in detections: bounding_box = BoundingBox() bounding_box.Class = det[2] bounding_box.probability = 1.0 bounding_box.xmin = int(det[0][0]) bounding_box.xmax = int(det[0][1]) bounding_box.ymin = int(det[0][2]) bounding_box.ymax = int(det[0][3]) bounding_boxes.bounding_boxes.append(bounding_box) # Only publish detection if there are boxes in it if bounding_boxes.bounding_boxes: self.darknet_detection_pub.publish(bounding_boxes) if camera.debug_image_pub: image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding="rgb8") camera.debug_image_pub.publish(image_message)
def _write_message(self, detection_results, boxes, scores, classes): """ populate output message with input header and bounding boxes information """ if boxes is None: return None for box, score, category in zip(boxes, scores, classes): # Populate darknet message left, bottom, right, top = box detection_msg = BoundingBox() detection_msg.xmin = left detection_msg.xmax = right detection_msg.ymin = top detection_msg.ymax = bottom detection_msg.probability = score detection_msg.Class = category detection_results.bounding_boxes.append(detection_msg) return detection_results
def GetBBoxesMsg(self, label_file: str): output_bboxes_msg = BoundingBoxes() with open(label_file, 'r') as file: id_ = 0 for line in file: gt = line.split() bbox_msg = BoundingBox() #print(gt) bbox_msg.xmin = int(gt[5]) bbox_msg.ymin = int(gt[6]) bbox_msg.xmax = int(gt[7]) bbox_msg.ymax = int(gt[8]) bbox_msg.id = id_ bbox_msg.Class = gt[0] id_ += 1 output_bboxes_msg.bounding_boxes.append(bbox_msg) return output_bboxes_msg
def msg(image, boxes): """ Create the Darknet BoundingBox[es] messages """ msg = BoundingBoxes() msg.header = image.header for (x, y, w, h) in boxes: detection = BoundingBox() detection.Class = "human" detection.probability = 1 # Not really... detection.xmin = x detection.ymin = y detection.xmax = x + w detection.ymax = y + h msg.boundingBoxes.append(detection) return msg
def boxcallback(self, msg): dets = [] for i in range(len(msg.bounding_boxes)): bbox = msg.bounding_boxes[i] dets.append( np.array([ bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax, bbox.probability ])) dets = np.array(dets) start_time = time.time() trackers = self.update(dets) cycle_time = time.time() - start_time print(cycle_time) r = BoundingBoxes() rb = BoundingBox() for d in range(len(trackers)): rb.probability = 1 rb.xmin = trackers[d, 0] rb.ymin = trackers[d, 1] rb.xmax = trackers[d, 2] rb.ymax = trackers[d, 3] rb.id = trackers[d, 4] rb.Class = 'tracked' r.bounding_boxes.append(rb) if self.img_in == 1 and self.display: res = trackers[d].astype(np.int32) rgb = self.colours[res[4] % 32, :] * 255 cv2.rectangle(self.img, (res[0], res[1]), (res[2], res[3]), (rgb[0], rgb[1], rgb[2]), 2) cv2.putText(self.img, "ID : %d" % (res[4]), (res[0], res[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 85, 200), 2) if self.img_in == 1 and self.display: try: self.image = self.bridge.cv2_to_imgmsg(self.img, "bgr8") self.image.header.stamp = rospy.Time.now() self.pubimage.publish(self.image) except CvBridgeError as e: pass r.header.stamp = rospy.Time.now() self.pubb.publish(r) return
if mot_tracker.bbox_checkin==1: trackers = mot_tracker.update(mot_tracker.dets) mot_tracker.bbox_checkin=0 else: trackers = mot_tracker.update(np.empty((0,5))) r = BoundingBoxes() for d in range(len(trackers)): rb = BoundingBox() rb.probability=1 rb.xmin = int(trackers[d,0]) rb.ymin = int(trackers[d,1]) rb.xmax = int(trackers[d,2]) rb.ymax = int(trackers[d,3]) rb.id = int(trackers[d,4]) rb.Class = 'tracked' r.bounding_boxes.append(rb) if mot_tracker.img_in==1 and mot_tracker.display: res = trackers[d].astype(np.int32) rgb=colours[res[4]%32,:]*255 cv2.rectangle(mot_tracker.img, (res[0],res[1]), (res[2],res[3]), (rgb[0],rgb[1],rgb[2]), 6) cv2.putText(mot_tracker.img, "ID : %d"%(res[4]), (res[0],res[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.6, (200,85,200), 6) if mot_tracker.img_in==1 and mot_tracker.display: try : mot_tracker.image = mot_tracker.bridge.cv2_to_imgmsg(mot_tracker.img, "bgr8") mot_tracker.image.header.stamp = rospy.Time.now() mot_tracker.pubimage.publish(mot_tracker.image) except CvBridgeError as e: pass cycle_time = time.time() - start_time
def process_frames(self): print("\nMining Frames for Flicker Detection ......\o/ \o/ \o/ \o/....... \n") rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag"))) # Init extraction loop msg_count = 0 # Iterate through bags rosbag_files = sorted(glob.glob(os.path.join(self.bag_dir, "*.bag"))) for bag_file in tqdm(rosbag_files, unit='bag'): self.frame_imgs, self.trk_table, self.frame_count = self.load_cache(bag_file) self.bag_keypath = '_'.join(os.path.normpath(bag_file).split(os.sep)) if not self.frame_imgs or not self.trk_table: # Open bag file. If corrupted, skip the file try: with rosbag.Bag(bag_file, 'r') as bag: # Check if desired topics exists in bags recorded_topics = bag.get_type_and_topic_info()[1] if not all(topic in recorded_topics for topic in ( DETECTION_TOPIC, CAR_STATE_TOPIC, IMAGE_STREAM_TOPIC)): print("ERROR: Specified topics not in bag file:", bag_file, ".Skipping bag!") continue gps = "" v_ego = 0.0 self.cv_image = None # Get Detections time_nsecs = [] self.trk_table = OrderedDict() self.frame_imgs = OrderedDict() self.frame_trks = OrderedDict() self.flickering_frame_imgs = OrderedDict() self.flickering_frame_trks = OrderedDict() for topic, msg, t in bag.read_messages(): if topic == CAR_STATE_TOPIC: self.gps = msg.GPS self.v_ego = msg.v_ego if topic == IMAGE_STREAM_TOPIC: if msg_count % self.skip_rate == 0: self.cv_image = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8") self.img_msg_header = msg.header image_name = "frame%010d_%s.jpg" % (self.frame_count, str(msg.header.stamp.to_nsec())) img_path = os.path.join(self.frames_dir, image_name) cv2.imwrite(img_path, self.cv_image) uri = img_path img_key = int(msg.header.stamp.to_nsec()) fname = self.path_leaf(uri) vid_name = '' readable_time = self.format_from_nanos(img_key).split(' ') if ':' in readable_time and ' ' not in readable_time.split(':')[0]: hour = int(readable_time.split(':')[0]) else: hour = 12 if (hour > 4 and hour < 6) or (hour > 17 and hour < 19): timeofday = 'dawn/dusk' elif hour > 6 and hour < 17: timeofday = 'daytime' else: timeofday = 'night' scene = 'highway' timestamp = img_key dataset_path = img_path if self.gps: gps = pynmea2.parse(self.gps) lat = gps.lat long = gps.lon else: lat = None long = None height, width, depth = self.cv_image.shape self.frame_imgs[img_key] = {'url': img_path, 'name': img_path, 'width': width, 'height': height, 'index': self.frame_count, 'timestamp': timestamp, 'videoName':vid_name, 'attributes': {'weather': 'clear', 'scene': scene, 'timeofday': timeofday}, 'labels': [] } self.frame_trks[img_key] = [] msg_count += 1 self.frame_count += 1 ## Get Tracking Data ## if self.use_bag_detections: for topic, msg, t in bag.read_messages(): if topic == DETECTION_TOPIC: # Find corresponding frame for detections message found_frame = False for frame in sorted(self.frame_imgs.keys()): if int(msg.header.stamp.to_nsec()) > frame-3.333e7 and int(msg.header.stamp.to_nsec()) < frame+3.333e7: found_frame = True # Collect tracker_msgs detections, tracks = self.tracking_callback(msg) # Append to frame annotations table self.append_frame_trks(frame, tracks, detections) # Append to track annotations table self.append_trk_table(frame, tracks) # Debugger statement to make monitor data extraction print("FRAME TIMESTAMP:",frame) print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec())) print('IMG PATH:',self.frame_imgs[frame]['url']) break if not found_frame: # Try a wider time window and try to shuffle detection to appropriate frame for i, frame in enumerate(sorted(self.frame_imgs.keys())): if int(msg.header.stamp.to_nsec()) > frame-3.783e7 and int(msg.header.stamp.to_nsec()) < frame+3.783e7: found_frame = True # Collect tracks detections, tracks = self.tracking_callback(msg) # Append to buffer if len(self.frame_imgs[frame]) < 2: # Check if already assigned detections to this frame idx = frame elif i > 0 and len(self.frame_imgs[self.frame_imgs.keys()[i-1]]) < 2: # Assign detections to the previous frame if empty idx = self.frame_imgs.keys()[i-1] elif i < len(self.frame_imgs.keys())-1 and len(self.frame_imgs[self.frame_imgs.keys()[i+1]]) < 2: # Assign detections to the next frame if empty idx = self.frame_imgs.keys()[i+1] else: idx = frame # Append to frame annotations table self.append_frame_trks(idx, tracks, detections) # Append to track annotations table self.append_trk_table(idx, tracks) # Debugger statement to make monitor data extraction print("FRAME TIMESTAMP:",idx) print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec())) print('IMG PATH:', self.frame_imgs[idx]['url']) break else: anns = darknet_annotator.annotate(os.path.abspath(self.frames_dir), os.path.abspath(self.current_model_cfg_path), os.path.abspath(self.inference_model), os.path.abspath(self.current_data_cfg_path)) img_stamps = OrderedDict() for uri, img_detections in sorted(anns): img_timestamp = int(uri.split('_')[-1].replace('.jpg', '').replace('.png', '')) # Get from fpath img_stamps[img_timestamp] = (uri, img_detections) ann_idx = 0 for img_timestamp in sorted(img_stamps.keys()): dets = [] # Get corresponding image uri = img_stamps[img_timestamp][0] bboxes = BoundingBoxes() bboxes.header.stamp = datetime.now() bboxes.image_header = self.img_msg_header for detection in img_stamps[img_timestamp][1]: # Build Detections bbox = BoundingBox() bbox.xmin, bbox.ymin = detection['box2d']['x1'], detection['box2d']['y1'] bbox.xmax, bbox.ymax = detection['box2d']['x2'], detection['box2d']['y2'] bbox.Class = detection['category'] bbox.class_id = 0 bbox.probability = 0.99 bboxes.bounding_boxes.append(bbox) # Collect tracker_msgs self.cv_image = cv2.imread(os.path.abspath(os.path.join(self.frames_dir, uri))) detections, tracks = self.tracking_callback(bboxes) # Append to frame annotations table self.append_frame_trks(img_timestamp, tracks, detections) # Append to track annotations table self.append_trk_table(img_timestamp, tracks) # Debugger statement to make monitor data extraction # print("FRAME TIMESTAMP:",img_timestamp) # print("DETECTION TIMESTAMP:",int(msg.header.stamp.to_nsec())) # print('IMG PATH:',self.frame_imgs[img_timestamp]['url']) self.save_cache(bag_file) msg_count = 0 except ROSBagException: print("\n",bag_file, "Failed! || ") print(str(ROSBagException), '\n') continue ## Generate JSON Object in BDD Format ## self.generate_flickering_visualizations() ## Generate Ground Truth Annotations in BDD Format ## self.generate_annotations() # Export to Scalabel self.export(bag_file) # Print Summary print("\nFrames Extracted:", len(self.flickering_frame_imgs.keys())) print("=================================================================\n\n")