def eval_ufo(): os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3' is_tf1 = True on_test = False main_dir = "/home/jsteeg/MRP_Implementations/tf1/out" model_path = os.path.join(main_dir, "exported_model") output_dir = os.path.join( main_dir, "validate_results" + ("" if on_test else "_train")) set = "test" if on_test else "train" dataset_file = f"/home/jsteeg/ufo_data/yolo_no_crop_vc/{set}.txt" label_map = "/home/jsteeg/ufo_data/yolo_no_crop_vc/label_map.txt" save_output = True id_list = None threshold = 0.05 if is_tf1: detector = DetectorTF1(model_path, label_map, class_id=id_list, threshold=threshold) else: detector = DetectorTF2(model_path, label_map, class_id=id_list, threshold=threshold) validate(detector=detector, dataset_file=dataset_file, output_dir=output_dir, save_output=save_output)
def __init__(self): self.corner_detection_model = Detector( path_to_model='./models/identity_corner/model.tflite', path_to_labels='./models/identity_corner/label_map.pbtxt', nms_threshold=0.2, score_threshold=0.3) self.text_detection_model = Detector( path_to_model='./models/identity_card/model.tflite', path_to_labels='./models/identity_card/label_map.pbtxt', nms_threshold=0.2, score_threshold=0.2) self.text_detection_discharge = DetectorTF2( path_to_model='./models/discharge_record', path_to_labels='./models/discharge_record/label_map.pbtxt', nms_threshold=0.33, score_threshold=0.33) self.text_recognition_model = TextRecognition( path_to_checkpoint='./models/text_recogintion/transformerocr.pth')
default='models/mscoco_label_map.pbtxt') parser.add_argument('--class_ids', help='id of classes to detect, expects string with ids delimited by ","', type=str, default=None) # example input "1,3" to detect person and car parser.add_argument('--threshold', help='Detection Threshold', type=float, default=0.4) parser.add_argument('--images_dir', help='Directory to input images)', default='data/samples/images/') parser.add_argument('--video_path', help='Path to input video)', default='data/samples/pedestrian_test.mp4') parser.add_argument('--output_directory', help='Path to output images and video', default='data/samples/output') parser.add_argument('--video_input', help='Flag for video input, default: False', action='store_true') # default is false parser.add_argument('--save_output', help='Flag for save images and video with detections visualized, default: False', action='store_true') # default is false args = parser.parse_args() id_list = None if args.class_ids is not None: id_list = [int(item) for item in args.class_ids.split(',')] if args.save_output: if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) # instance of the class DetectorTF2 detector = DetectorTF2(args.model_path, args.path_to_labelmap, class_id=id_list, threshold=args.threshold) if args.video_input: DetectFromVideo(detector, args.video_path, save_output=args.save_output, output_dir=args.output_directory) else: DetectImagesFromFolder(detector, args.images_dir, save_output=args.save_output, output_dir=args.output_directory) print("Done ...") #cv2.destroyAllWindows()