Esempio n. 1
0
class Detection(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 video_path=None,
                 output_dir=None,
                 ):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def Video_detection(self):
        import cv2
        vid = cv2.VideoCapture(self.__video_path)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                      int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        isOutput = True if self.__output_dir != "" else False
        if isOutput:
            print("!!! TYPE:", type(self.__output_dir), type(video_FourCC), type(video_fps), type(video_size))
            out = cv2.VideoWriter(self.__output_dir, video_FourCC, video_fps, video_size)
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        while True:
            return_value, frame = vid.read()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)

            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(frame, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(255, 0, 0), thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", frame)
            if isOutput:
                out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Esempio n. 2
0
class Tester(object):
    def __init__(self,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]

        self.__visiual = './test_img/'
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Yolov3().to(self.__device)

        # self.__load_model_weights(weight_path)
        self.__load_model_weights('./weight/best.pt')

        self.__evalter = Evaluator(self.__model, visiual=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        #         print(chkpt)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def test(self):
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                print("test images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "data/{}".format(v))

                    cv2.imwrite(path, img)
                    print("saved images : {}".format(path))

        if self.__eval:
            mAP = 0
            print('*' * 20 + "Validate" + '*' * 20)

            with torch.no_grad():
                APs = Evaluator(self.__model).APs_voc(self.__multi_scale_test,
                                                      self.__flip_test)

                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                print('mAP:%g' % (mAP))
Esempio n. 3
0
    def video_detection(self):

        vid = cv2.VideoCapture(self.video_path)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (
            int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        )
        isOutput = True if self.output_dir != "" else False
        if isOutput:
            print(
                "!!! TYPE:",
                type(self.output_dir),
                type(video_FourCC),
                type(video_fps),
                type(video_size),
            )
            out = cv2.VideoWriter(self.output_dir, video_FourCC, video_fps,
                                  video_size)
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        while True:
            return_value, frame = vid.read()

            evaluator_imgs = Evaluator(self.__model,
                                       self.__classes,
                                       self.__num_classes,
                                       visual=False)
            bboxes_prd = evaluator_imgs.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=frame,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)

            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0

            cv2.putText(frame,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(255, 0, 0),
                        thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", frame)

            if isOutput:
                out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
Esempio n. 4
0
class Evaluation(object):
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
    ):
        # self.__num_class = cfg.VOC_DATA["NUM"]
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        # self.__classes = cfg.VOC_DATA["CLASSES"]
        self.__classes = cfg.Customer_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def val(self):
        global logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0
            with torch.no_grad():
                APs, inference_time = Evaluator(self.__model,
                                                showatt=False).APs_voc(
                                                    self.__multi_scale_val,
                                                    self.__flip_val)
                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info("mAP:{}".format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global logger
        if not self.__visiual:
            return

        # imgs = os.listdir(self.__visiual)
        # logger.info("***********Start Detection****************")
        # for v in imgs:
        #     path = os.path.join(self.__visiual, v)
        for files in os.walk(self.__visiual):
            # print(files)
            for tp_f in files[2]:
                path = files[0] + '/' + tp_f
                _, file_type = os.path.splitext(path)
                if file_type != '.jpg':  #json
                    continue
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None
                v = path.split('/')[-1]
                bboxes_prd = self.__evalter.get_bbox(img)  #, v
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(
                        image=img,
                        boxes=boxes,
                        labels=class_inds,
                        probs=scores,
                        class_labels=self.__classes,
                    )
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
Esempio n. 5
0
class Evaluation(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__showatt = cfg.TRAIN["showatt"]
        self.__visiual = visiual
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        # self.__model.load_state_dict(chkpt["model"])
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def val(self):
        global logger
        logger.info("***********Start Evaluation****************")
        start = time.time()
        mAP = 0
        with torch.no_grad():
            APs, inference_time = Evaluator(self.__model,
                                            showatt=False).APs_voc()
            for i in APs:
                logger.info("{} --> mAP : {}".format(i, APs[i]))
                mAP += APs[i]
            mAP = mAP / self.__num_class
            logger.info("mAP:{}".format(mAP))
            logger.info("inference time: {:.2f} ms".format(inference_time))
        end = time.time()
        logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v, mode=self.__mode)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(
                        image=img,
                        boxes=boxes,
                        labels=class_inds,
                        probs=scores,
                        class_labels=self.__classes,
                    )
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
Esempio n. 6
0
class Evaluation(object):
    def __init__(self,
                 gpu_id = 0,
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 heatmap=False
                 ):
        self.img_size = img_size
        self.__num_class = cfg.COCO_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)


    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def reset(self):
        path1 = os.path.join(cfg.DETECTION_PATH, "detection_result/")
        path2 = os.path.join(cfg.DETECTION_PATH, "ShowAtt/")
        for i in os.listdir(path1):
            path_file = os.path.join(path1, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)
        for i in os.listdir(path2):
            path_file = os.path.join(path2, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)

    def study(self):
        # Parameter study
        y = []
        for i in [0.08, 0.07, 0.06]:
                t = time.time()
                evaluator = COCOAPIEvaluator(model_type='YOLOv3',
                                             data_dir=cfg.DATA_PATH,
                                             img_size=cfg.VAL["TEST_IMG_SIZE"],
                                             confthre=i,
                                             nmsthre=cfg.VAL["NMS_THRESH"])
                _, r = evaluator.evaluate(self.__model)
                y.append(str(i) +str('  ') + str(r) +str('  ')+ str(time.time() - t,))
                np.savetxt('study.txt', y, fmt='%s')  # y = np.loadtxt('study.txt')

    def val(self):
            global writer, logger
            logger.info("***********Start Evaluation****************")
            start = time.time()

            evaluator = COCOAPIEvaluator(model_type='YOLOv4',
                                        data_dir=cfg.DATA_PATH,
                                        img_size=cfg.VAL["TEST_IMG_SIZE"],
                                        confthre=cfg.VAL["CONF_THRESH"],
                                        nmsthre=cfg.VAL["NMS_THRESH"])
            ap50_95, ap50 = evaluator.evaluate(self.__model)
            logger.info('ap50_95:{}|ap50:{}'.format(ap50_95, ap50))
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def Inference(self):
            global writer, logger
            # clear cache
            self.reset()

            logger.info("***********Start Inference****************")
            imgs = os.listdir(self.__visiual)
            logger.info("images path: {}".format(self.__visiual))
            path = os.path.join(cfg.DETECTION_PATH, "detection_result")
            logger.info("saved images at: {}".format(path))
            inference_times = []
            for v in imgs:
                start_time = time.time()
                path = os.path.join(self.__visiual, v)
                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(cfg.DETECTION_PATH, "detection_result/{}".format(v))
                    cv2.imwrite(path, img)
                end_time = time.time()
                inference_times.append(end_time - start_time)
            inference_time = sum(inference_times) / len(inference_times)
            fps = 1.0 / inference_time
            logging.info("Inference_Time: {:.5f} s/image, FPS: {}".format(inference_time,fps))
Esempio n. 7
0
class Evaluation(object):
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def val(self):
        global writer, logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0

            with torch.no_grad():
                _, _, APs = Evaluator(self.__model, self.epoch,
                                      showatt=False).APs_voc(
                                          self.__multi_scale_val,
                                          self.__flip_val)

                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
            writer.add_scalar('mAP', mAP, self.epoch)
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global writer, logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            start = time.clock()
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
            end = time.clock()
            times = end - start
            FPS = len(imgs) / times
            logger.info('FPS:{}'.format(FPS))
            logger.info("  ===detection cost time:{:.4f}s".format(times))
Esempio n. 8
0
class Detection(object):
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in tqdm(self.__file_dict.keys()):
            # print(self.__file_dict[image_path])
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)
                visualize_boxes_and_labels_on_image_array(
                    image=frame,
                    boxes=self.__file_dict[image_path][:, :4],
                    classes=self.__file_dict[image_path][:, 4],
                    scores=None,
                    line_thickness=10,
                    category_index=self.__classes)
            cv2.imwrite(
                os.path.join(self.__output_dir, os.path.basename(image_path)),
                frame)
        print("FPS: {:.04f}".format(1000 * len(self.__file_dict.keys()) /
                                    self.__evalter.inference_time))

    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = {}
        ####################################
        # for random select
        ####################################
        # random.seed(1)
        # lines = random.choices(fh.readlines(),k=10)

        # for line in lines:
        #     line = line.rstrip().split()
        #     if len(line)>1:
        #         image_paths[os.path.join("/data",line[0])] = np.array([list(map(int,i.split(","))) for i in line[1:]])
        #     else:
        #         break
        ####################################
        # for on demand plot
        ####################################
        lines = fh.readlines()
        imgs = [
            "images/0021023.png", "images/0020485.png", "images/0021042.png",
            "images/0021630.png", "images/0021729.png", "images/0021781.png"
        ]
        for line in lines:
            line = line.rstrip().split()
            if line[0] in imgs:
                image_paths[os.path.join("/data", line[0])] = np.array(
                    [list(map(int, i.split(","))) for i in line[1:]])
        #####################################
        self.__file_dict = image_paths
Esempio n. 9
0
class Detection(object):
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in self.__file_list:
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
            # curr_time = timer()
            # accum_time +=  curr_time - prev_time
            cv2.imwrite(os.path.join(self.__output_dir,os.path.basename(image_path)),frame)
        print("FPS: {:.04f}".format(len(self.__file_list)/self.__evalter.inference_time))
        
    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = []
        for line in fh.readlines():
            line = line.rstrip().split()
            if len(image_paths) < 100:
                if len(line) > 1:
                    image_paths.append(os.path.join("/data",line[0]))
            else:
                break
        print(image_paths)
        self.__file_list =  image_paths
Esempio n. 10
0
class Tester(object):
    def __init__(self, weight_path=None, gpu_id=0, visiual=None, eval=False):
        self.img_size = cfg.TEST["TEST_IMG_SIZE"]
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id, force_cpu=False)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]
        self.__classes = cfg.DATA["CLASSES"]

        self.__visiual = visiual
        self.__eval = eval
        self.__model = NPMMRDet().to(self.__device)  # Single GPU

        net_model = NPMMRDet()
        if torch.cuda.device_count() >1: ## Multi GPUs
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            net_model = torch.nn.DataParallel(net_model) ## Multi GPUs
            self.__model = net_model.to(self.__device)
        elif torch.cuda.device_count() ==1:
            self.__model = net_model.to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))
        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt) #['model']
        #print("loading weight file is done")
        del chkpt

    def test(self):
        global logger
        logger.info("***********Start Evaluation****************")

        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                #print("test images : {}".format(path))
                img = cv2.imread(path)
                assert img is not None
                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]
                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "prediction/imgs_all/{}".format(v))
                    cv2.imwrite(path, img)
                    #print("saved images : {}".format(path))

        mAP = 0
        if self.__eval and cfg.TEST["EVAL_TYPE"] == 'VOC':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)

                for i in APs:
                    print("{} --> AP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                #print('mAP:%g' % (mAP))
                #print("inference time : {:.2f} ms".format(inference_time))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'COCO':
            with torch.no_grad():
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'BOTH':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)
                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
Esempio n. 11
0
class Tester(object):
    def __init__(self,
                 cfg_path=None,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False
                 ):
        self.img_size = img_size
        self.__num_class = pms.DATA["NUM"]
        self.__conf_threshold = pms.TEST["CONF_THRESH"]
        self.__nms_threshold = pms.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__visiual = visiual
        self.__eval = eval
        self.__classes = pms.DATA["CLASSES"]

        self.__model = Darknet(cfg_path=cfg_path, img_size=img_size).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)


    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt['model'])
        print("loading weight file is done")
        del chkpt


    def test(self):
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                print("test images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(pms.PROJECT_PATH, "data/{}".format(v))

                    cv2.imwrite(path, img)
                    print("saved images : {}".format(path))


        if self.__eval:
            mAP = 0
            print('*' * 20 + "Validate" + '*' * 20)

            with torch.no_grad():
                result = self.__evalter.APs_voc()
                for i in result:
                    print(i, result[i])
                    mAP += result[i]
                mAP = mAP / self.__num_class
                print('mAP:%g' % (mAP))
Esempio n. 12
0
    def test(self):
        if self.mode not in ["+voc", "+coco", None]:
            print("===== " * 4 + "visualizing " + self.mode + " =====" * 4)

            # 创建新的visual目录
            visual_result_path = os.path.join(cfg.PROJECT_PATH, "data",
                                              "visual")
            if os.path.exists(visual_result_path):
                shutil.rmtree(visual_result_path)
            os.mkdir(visual_result_path)

            imgs = os.listdir(self.mode)
            for v in tqdm(imgs):
                path = os.path.join(self.mode, v)
                # print("test images: {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                evaluator_imgs = Evaluator(self.__model,
                                           self.__classes,
                                           self.__num_classes,
                                           visual=False)
                bboxes_prd = evaluator_imgs.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(visual_result_path, v)
                    cv2.imwrite(path, img)
                    # print("saved images: {}".format(path))

        elif self.mode == "+voc":
            print("===== " * 4 + "evaluating " + self.mode + " =====" * 4)

            evaluator_voc = Evaluator(self.__model,
                                      self.__classes,
                                      self.__num_classes,
                                      visual=True)
            APs = evaluator_voc.APs_voc(
                multi_scale_test=self.__multi_scale_test,
                flip_test=self.__flip_test)

            APsum = 0
            for i in APs:
                print("{:16}: {}".format(i, APs[i]))
                APsum += APs[i]
            mAP = APsum / self.__num_classes
            print("mAP: {}".format(mAP))

        else:
            print("===== " * 4 + "evaluating " + self.mode + " =====" * 4)

            evaluator_coco = Evaluator(self.__model,
                                       self.__classes,
                                       self.__num_classes,
                                       visual=True)
            APs = evaluator_coco.APs_coco()
Esempio n. 13
0
class Evaluation(object):
    def __init__(
            self,
            gpu_id=0,
            model1_path=None,
            model2_path=None,
            data_dir=None,
            # result_dir=None,
            mnist=False,
    ):
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__data_dir = data_dir
        print(self.__data_dir)
        self.__classes = cfg.Customer_DATA["CLASSES"]
        self.__mnist = mnist
        self.__model1 = Build_Model().to(self.__device)
        if mnist:
            self.__model2 = torch.load(model2_path).double().cuda()
        else:
            self.__model2 = torch.load(model2_path).cuda()

        self.__load_model_weights(model1_path)

        self.__evalter = Evaluator(self.__model1, showatt=False)
        # self.__result_dir = result_dir

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model1.load_state_dict(chkpt)
        # print("loading weight file is done")
        del chkpt

    def detection(self):
        if os.path.isdir(self.__data_dir):
            imgs = os.listdir(self.__data_dir)
            print("***********Start Detection****************")
            for v in imgs:
                path = self.__data_dir + "/" + v
                print("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                points, max_l = get_points(img, bboxes_prd=bboxes_prd, model=self.__model2, mnist=self.__mnist)
                if points is None:
                    return
                # print(points)
                matrix_calculator = calculator(self.__classes)
                matrix_calculator.get_from_points(points, max_l)
                print(matrix_calculator())
        else:
            raise NotImplementedError("The data directory is not exist!")