コード例 #1
0
class Tester(object):
    def __init__(self,
                 cfg_path=None,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.img_size = img_size
        self.__num_class = pms.DATA["NUM"]
        self.__conf_threshold = pms.TEST["CONF_THRESH"]
        self.__nms_threshold = pms.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__visiual = visiual
        self.__eval = eval
        self.__classes = pms.DATA["CLASSES"]

        self.__model = Darknet(cfg_path=cfg_path,
                               img_size=img_size).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def test(self):
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                print("test images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(pms.PROJECT_PATH, "data/{}".format(v))

                    cv2.imwrite(path, img)
                    print("saved images : {}".format(path))

        if self.__eval:
            mAP = 0
            print('*' * 20 + "Validate" + '*' * 20)

            with torch.no_grad():
                result = self.__evalter.APs_voc()
                for i in result:
                    print(i, result[i])
                    mAP += result[i]
                mAP = mAP / self.__num_class
                print('mAP:%g' % (mAP))
コード例 #2
0
ファイル: test.py プロジェクト: AkitsukiM/YOLOv3x-pytorch
    def test(self):
        if self.mode not in ["+voc", "+coco", None]:
            print("===== " * 4 + "visualizing " + self.mode + " =====" * 4)

            # 创建新的visual目录
            visual_result_path = os.path.join(cfg.PROJECT_PATH, "data",
                                              "visual")
            if os.path.exists(visual_result_path):
                shutil.rmtree(visual_result_path)
            os.mkdir(visual_result_path)

            imgs = os.listdir(self.mode)
            for v in tqdm(imgs):
                path = os.path.join(self.mode, v)
                # print("test images: {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                evaluator_imgs = Evaluator(self.__model,
                                           self.__classes,
                                           self.__num_classes,
                                           visual=False)
                bboxes_prd = evaluator_imgs.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(visual_result_path, v)
                    cv2.imwrite(path, img)
                    # print("saved images: {}".format(path))

        elif self.mode == "+voc":
            print("===== " * 4 + "evaluating " + self.mode + " =====" * 4)

            evaluator_voc = Evaluator(self.__model,
                                      self.__classes,
                                      self.__num_classes,
                                      visual=True)
            APs = evaluator_voc.APs_voc(
                multi_scale_test=self.__multi_scale_test,
                flip_test=self.__flip_test)

            APsum = 0
            for i in APs:
                print("{:16}: {}".format(i, APs[i]))
                APsum += APs[i]
            mAP = APsum / self.__num_classes
            print("mAP: {}".format(mAP))

        else:
            print("===== " * 4 + "evaluating " + self.mode + " =====" * 4)

            evaluator_coco = Evaluator(self.__model,
                                       self.__classes,
                                       self.__num_classes,
                                       visual=True)
            APs = evaluator_coco.APs_coco()