def main(): example = tf.train.Example() for record_path in args.records: if not os.path.isfile(record_path): print("Record %s does not exist!" % record_path) continue for record in tf.io.tf_record_iterator(record_path): example.ParseFromString(record) features = example.features.feature # dict mapping image_bytes = np.frombuffer( features["image/encoded"].bytes_list.value[0], dtype=np.uint8) image = cv2.imdecode(image_bytes, cv2.IMREAD_COLOR) im_height, im_width, _ = image.shape xmin = np.array( features['image/object/bbox/xmin'].float_list.value) * \ im_width xmax = np.array( features['image/object/bbox/xmax'].float_list.value) * \ im_width ymin = np.array( features['image/object/bbox/ymin'].float_list.value) * \ im_height ymax = np.array( features['image/object/bbox/ymax'].float_list.value) * \ im_height text = features['image/object/class/text'].bytes_list.value text = [text[i].decode('utf-8') for i in range(len(text))] width = xmax - xmin height = ymax - ymin bboxes = np.column_stack((xmin, ymin, width, height)) print("Bounding Boxes:", bboxes) image = cv2.resize(image, (0, 0), fx=args.scale, fy=args.scale) scaled_bboxes = bboxes * args.scale drawing_utils.draw_bboxes(image, scaled_bboxes, text) lines = [] filename = features['image/filename'].bytes_list.value[0] filename = filename.decode('utf-8') filename = "/".join(filename.split("/")[-3:]) lines.append("Record: %s" % record_path) lines.append(filename) lines.append("Num boxes: %d" % len(text)) for i, line in enumerate(lines): drawing_utils.shadow_text(image, line, (5, 20 * i + 15), font_scale=0.5, font_weight=1) cv2.imshow("window", image) if (cv2.waitKey(0) & 0xFF) == ord('q'): return
def verify_bboxes(frame, bboxes, classes, yes): frame_to_draw = frame.copy() drawing_utils.draw_bboxes(frame_to_draw, bboxes, classes, args.scale) drawing_utils.shadow_text(frame_to_draw, "Do boxes look okay (y/n)?", (100, 80)) show_scaled(window, frame_to_draw) if yes: return # Confirm sanity check key = cv2.waitKey(0) & 0xFF if key == ord('n'): print("Poor bounding boxes. Quitting!") sys.exit()
def draw_text(image, text, location): drawing_utils.shadow_text(image, text, location, font_scale=.5, font_weight=1)
box = result['detection_boxes'][i] score = result['detection_scores'][i] y1, x1 = int(box[0] * h), int(box[1] * w) y2, x2 = int(box[2] * h), int(box[3] * w) boxes.append((class_, score, x1, y1, x2, y2)) bboxes = [ np.array([x1, y1, x2 - x1, y2 - y1]) for (cls, score, x1, y1, x2, y2) in boxes ] classes = [objdet.category_index[int(cls)]['name'] for cls, *_ in boxes] drawing_utils.draw_bboxes(img, bboxes, classes) print("Frame:", counter, end="\r") img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) drawing_utils.shadow_text( img, "Frame Rate: %0.2f" % (1.0 / (det_end - det_start)), (0, 20)) if not args.headless: cv2.imshow('image', img) if cv2.waitKey(1) & 0xFF == ord('q'): break if args.write_movie: writer.write(img) if args.write_images: print("[%d] Writing original to %s" % (counter, images_dir)) cv2.imwrite(os.path.join(images_dir, "orig_%05d.png" % counter), frame) print("[%d] Writing boxes to %s" % (counter, images_dir)) cv2.imwrite(os.path.join(images_dir, "box_%05d.png" % counter), img) counter += 1
annotated_classes.append("%d:%s" % (i, classes[i])) if args.refine: refine_bboxes(bboxes, classes, frame, trackers) end = time.time() fps = 1.0 / (end - start) original = frame.copy() drawing_utils.draw_bboxes(frame, bboxes, annotated_classes, args.scale) # Potentially save the frame to disk using @dek's format if args.frames > 0 and frame_count % args.frames == 0: save_frame(original, frame, bboxes, classes, run_path, frame_count) drawing_utils.shadow_text(frame, tracker_name, (100, 20)) drawing_utils.shadow_text(frame, "FPS: " + str(int(fps)), (100, 50)) drawing_utils.shadow_text(frame, "Frame: " + str(frame_count), (100, 80)) # Display result show_scaled(window, frame) if not args.experiment: writer.write(frame) k = cv2.waitKey(1) & 0xff if k == ord('q'): break elif k == ord('p') or k == ord(' '): # Let the user correct tracks correction_mode(original, trackers, bboxes, classes,