def setUpClass(cls) -> None: cls.current_path = os.path.dirname(os.path.abspath(__file__)) cls.input_path = os.path.join(cls.current_path, "input") cls.image_path = os.path.join(cls.input_path, "images") cls.onnx_path = os.path.join(cls.current_path, "onnx") cls.onnx_file = os.path.join(cls.onnx_path, "tiny_yolo_2.onnx") cls.out_c_path = os.path.join(cls.current_path, "out_c") cls.out_c_file = os.path.join(cls.out_c_path, "qumico.so") cls.ckpt_file = os.path.join(cls.current_path, "input", "tiny_yolo2.ckpt") cls.pb_file = os.path.join(cls.current_path, "input", "tiny_yolo2.pb") cls.classes = tiny_yolo2_infer.voc2007_classes cls.num_classes = len(cls.classes) cls.classes = tiny_yolo2_infer.voc2007_classes cls.num_classes = len(cls.classes) cls.batch_size = 1 pic_num = 0 data_list_path = os.path.join(cls.input_path, "images") cls.label_list_path = os.path.join(cls.input_path, "annotations") data_list = numpy.asarray( pre_process_tool.get_data_path_list(data_list_path) [pic_num:pic_num + 1]) label_list = numpy.asarray( pre_process_tool.get_data_path_list( cls.label_list_path)[pic_num:pic_num + 1]) cls.annotation_dataset_tool = AnnotationDatasetTool( training_flag=True, data_list=data_list, label_list=label_list, category_class=cls.classes, one_hot_classes=cls.num_classes, resize_flag=True, target_h=416, target_w=416, label_file_type="voc_xml", format="NCHW", data_rescale=True, label_resclar=True) cls.model = tiny_yolo2_model.TINY_YOLO_v2(output_op_name="output", num_classes=cls.num_classes, is_train=False, width=416, height=416) cls._infer_prepare()
def test_get_data_path_list_depth_2(self): depth = 2 result = pre_process_tool.get_data_path_list(self.input_path, depth=depth) for c in self.dir_test_str: for i in range(depth): self.assertTrue([s for s in result if f"{c}{i}" in s], msg=f"Directory {c}{i} not found") for c in self.dir_test_str: for i in range(depth-1, depth): self.assertTrue([s for s in result if f"{c}{i}.txt" in s], msg=f"File {c}{i}.txt not found")
def setUpClass(cls) -> None: cls.current_path = os.path.dirname(os.path.realpath(__file__)) cls.input_path = os.path.join(cls.current_path, "input") cls.model_path = os.path.join(cls.current_path, "model") cls.data_list_path = os.path.join(cls.input_path, "images") cls.label_list_path = os.path.join(cls.input_path, "annotations") cls.batch_size = 1 cls.classes = tiny_yolo2_infer.voc2007_classes cls.data_list = numpy.asarray( pre_process_tool.get_data_path_list(cls.data_list_path)[:]) cls.label_list = numpy.asarray( pre_process_tool.get_data_path_list(cls.label_list_path)[:]) cls.transformer = None cls.dataset = annotation_dataset_tool.AnnotationDatasetTool( training_flag=True, data_list=cls.data_list, label_list=cls.label_list, category_class=cls.classes, one_hot_classes=len(cls.classes), resize_flag=True, target_h=416, target_w=416, label_file_type="voc_xml", format="NCHW", data_rescale=True, label_resclar=True, transformer=cls.transformer) cls.model = tiny_yolo2_model.TINY_YOLO_v2(height=416, width=416, output_op_name="output", num_classes=len(cls.classes), is_train=True, batch_size=cls.batch_size) cls.epochs = 999 cls._prepare()
def setUpClass(cls) -> None: cls.current_path = os.path.dirname(os.path.abspath(__file__)) cls.input_path = os.path.join(cls.current_path, "input") cls.image_path = os.path.join(cls.input_path, "images") cls.boxes_test = numpy.asarray([[0, 1, 2, 3], [-4, -5, -6, -7], [8, 9, -10, 11], [12, -13, -14, 15]]) cls.feature_test = numpy.asarray([[0, 1, 2, 3, 0], [-4, -5, -6, -7, 0], [8, 9, -10, 11, 0], [12, -13, -14, 15, 0], [16, -17, 18, 19, 0]]) cls.classes = tiny_yolo2_infer.voc2007_classes cls.num_classes = len(cls.classes) cls.batch_size = 1 cls.data_list_path = os.path.join(cls.input_path, "images") cls.label_list_path = os.path.join(cls.input_path, "annotations") pic_num = 0 data_list = numpy.asarray(pre_process_tool.get_data_path_list(cls.data_list_path)[pic_num:pic_num + 1]) label_list = numpy.asarray(pre_process_tool.get_data_path_list(cls.label_list_path)[pic_num:pic_num + 1]) cls.annotation_dataset_tool = AnnotationDatasetTool(training_flag=True, data_list=data_list, label_list=label_list, category_class=cls.classes, one_hot_classes=cls.num_classes, resize_flag=True, target_h=416, target_w=416, label_file_type="voc_xml", format="NCHW", data_rescale=True, label_resclar=True) cls.model = tiny_yolo2_model.TINY_YOLO_v2(output_op_name="output", num_classes=cls.num_classes, is_train=False, width=416, height=416) cls.ckpt_file = os.path.join(cls.current_path, "input", "tiny_yolo2.ckpt")
if __name__ == '__main__': voc2007_classes = [ 'chair', 'bird', 'sofa', 'bicycle', 'cat', 'motorbike', 'bus', 'boat', 'sheep', 'bottle', 'cow', 'person', 'horse', 'diningtable', 'pottedplant', 'aeroplane', 'car', 'train', 'dog', 'tvmonitor' ] num_classes = len(voc2007_classes) root_path = "train_data_mini/" data_list_path = root_path + "images" label_list_path = root_path + "annotations" data_list = np.asarray(list_reader.get_data_path_list(data_list_path)[:50]) label_list = np.asarray( list_reader.get_data_path_list(label_list_path)[:50]) annotation_dataset_tool = AnnotationDatasetTool( training_flag=True, data_list=data_list, label_list=label_list, category_class=voc2007_classes, one_hot_classes=num_classes, resize_flag=True, target_h=416, target_w=416, label_file_type="voc_xml", format="NCHW")
def test_get_data_path_list_depth_other_depth(self): self.assertFalse(pre_process_tool.get_data_path_list(self.input_path, depth=0)) self.assertFalse(pre_process_tool.get_data_path_list(self.input_path, depth=3)) self.assertFalse(pre_process_tool.get_data_path_list(self.input_path, depth=-1))
def test_get_data_path_list_no_data_root_path(self): self.assertRaises(TypeError, lambda: pre_process_tool.get_data_path_list(None))