예제 #1
0
    def __init__(
            self,
            gpu_id=0,
            model1_path=None,
            model2_path=None,
            data_dir=None,
            # result_dir=None,
            mnist=False,
    ):
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__data_dir = data_dir
        print(self.__data_dir)
        self.__classes = cfg.Customer_DATA["CLASSES"]
        self.__mnist = mnist
        self.__model1 = Build_Model().to(self.__device)
        if mnist:
            self.__model2 = torch.load(model2_path).double().cuda()
        else:
            self.__model2 = torch.load(model2_path).cuda()

        self.__load_model_weights(model1_path)

        self.__evalter = Evaluator(self.__model1, showatt=False)
예제 #2
0
파일: test.py 프로젝트: Shank2358/NPMMR-Det
    def __init__(self, weight_path=None, gpu_id=0, visiual=None, eval=False):
        self.img_size = cfg.TEST["TEST_IMG_SIZE"]
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id, force_cpu=False)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]
        self.__classes = cfg.DATA["CLASSES"]

        self.__visiual = visiual
        self.__eval = eval
        self.__model = NPMMRDet().to(self.__device)  # Single GPU

        net_model = NPMMRDet()
        if torch.cuda.device_count() >1: ## Multi GPUs
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            net_model = torch.nn.DataParallel(net_model) ## Multi GPUs
            self.__model = net_model.to(self.__device)
        elif torch.cuda.device_count() ==1:
            self.__model = net_model.to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
예제 #3
0
파일: dleval.py 프로젝트: Nikonz/dleval
    def __init__(self, config_path, log_level='info'):
        """
        :param str config_path: path to config file
        """
        self.__logger = logging.getLogger()
        self.__logger.setLevel(LOG_LEVELS[log_level])
        ch = logging.StreamHandler()
        formatter = logging.Formatter(
                '%(asctime)s [%(levelname)s]: %(message)s ' \
                '(%(module)s:%(funcName)s:%(lineno)d)')
        ch.setFormatter(formatter)
        self.__logger.addHandler(ch)

        with open(config_path, 'r') as ymlfile:
            self.__cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)

        self.__client = Client(
            self.__cfg['moodle'].get('data_path', DEFAULT_MOODLE_DATA_PATH),
            self.__logger, self.__cfg['moodle'].get('timeout',
                                                    DEFAULT_TIMEOUT),
            self.__cfg['moodle'].get('max_retries', DEFAULT_MAX_RETRIES))

        eval_data_path = DEFAULT_EVAL_DATA_PATH
        if 'eval' in self.__cfg and 'data_path' in self.__cfg['eval']:
            eval_data_path = self.__cfg['eval']['data_path']
        self.__evaluator = Evaluator(eval_data_path, self.__logger)
예제 #4
0
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
    ):
        # self.__num_class = cfg.VOC_DATA["NUM"]
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        # self.__classes = cfg.VOC_DATA["CLASSES"]
        self.__classes = cfg.Customer_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #5
0
파일: test.py 프로젝트: RouSage/YOLOV3
    def __init__(self,
                 model,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.model = model
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        if self.model == 's':
            self.__model = Yolov3_S().to(self.__device)
        else:
            self.__model = Yolov3().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
예제 #6
0
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch
예제 #7
0
def evaluate(config, args):
    if args.onnx:
        model = onnx_model_for_eval(args.onnx, args.device == 'cuda')
    else:
        model = torch_model_for_eval(args.cfg, args.weight, device=args.device)
    eval_dataset = EvalDataset(config)
    evaluator = Evaluator(model, eval_dataset, config)
    AP = evaluator.evaluate()
    tools.print_metric(AP)
예제 #8
0
파일: pruner.py 프로젝트: zyc4me/PQDet
 def test(self):
     eval_dataset = EvalDataset(self.cfg)
     dataloader = torch.utils.data.DataLoader(
         eval_dataset, batch_size=None, shuffle=False,
         num_workers=self._num_workers, pin_memory=True,
         collate_fn=lambda x: x,
     )
     evaluator = Evaluator(self.new_model, dataloader, self.cfg)
     self.new_model.eval()
     AP = evaluator.evaluate()
     # 打印
     tools.print_metric(AP)
예제 #9
0
    def __init__(self, weight_path, resume, exp_name, accumulate=None):
        # precision=16 for fp16

        super().__init__()
        self.model = Build_Model(weight_path=weight_path, resume=resume)
        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.evaluator = Evaluator(self.model,
                                   showatt=False,
                                   exp_name=exp_name)
        self.evaluator.clear_predict_file()
예제 #10
0
파일: test.py 프로젝트: smalldroid/YOLOV3-1
    def test(self):
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                print("test images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "data/{}".format(v))

                    cv2.imwrite(path, img)
                    print("saved images : {}".format(path))


        if self.__eval:
            mAP = 0
            print('*' * 20 + "Validate" + '*' * 20)

            with torch.no_grad():
                APs = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)

                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                print('mAP:%g' % (mAP))
예제 #11
0
파일: dleval.py 프로젝트: Nikonz/dleval
class DlEval:
    def __init__(self, config_path, log_level='info'):
        """
        :param str config_path: path to config file
        """
        self.__logger = logging.getLogger()
        self.__logger.setLevel(LOG_LEVELS[log_level])
        ch = logging.StreamHandler()
        formatter = logging.Formatter(
                '%(asctime)s [%(levelname)s]: %(message)s ' \
                '(%(module)s:%(funcName)s:%(lineno)d)')
        ch.setFormatter(formatter)
        self.__logger.addHandler(ch)

        with open(config_path, 'r') as ymlfile:
            self.__cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)

        self.__client = Client(
            self.__cfg['moodle'].get('data_path', DEFAULT_MOODLE_DATA_PATH),
            self.__logger, self.__cfg['moodle'].get('timeout',
                                                    DEFAULT_TIMEOUT),
            self.__cfg['moodle'].get('max_retries', DEFAULT_MAX_RETRIES))

        eval_data_path = DEFAULT_EVAL_DATA_PATH
        if 'eval' in self.__cfg and 'data_path' in self.__cfg['eval']:
            eval_data_path = self.__cfg['eval']['data_path']
        self.__evaluator = Evaluator(eval_data_path, self.__logger)

    def run(self):
        while True:
            try:
                ok = self.__client.login(self.__cfg['moodle']['username'],
                                         self.__cfg['moodle']['password'])
                if not ok:
                    self.__logger.critical('login failed')
                else:
                    allowed_assignments = \
                            self.__evaluator.get_allowed_assignments()
                    course_data = self.__client.download_new_course_data(
                        self.__cfg['moodle']['course_id'], allowed_assignments)
                    self.__evaluator.evaluate(course_data)
                    self.__client.send_feedback(course_data)
            except:
                tback = ''.join(traceback.format_exception(*sys.exc_info()))
                self.__logger.critical('an exception occured!\n' + tback)
            sleep(self.__cfg.get('interval', DEFAULT_INTERVAL))
예제 #12
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__showatt = cfg.TRAIN["showatt"]
        self.__visiual = visiual
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)
예제 #13
0
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #14
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 heatmap=False):
        self.__num_class = cfg.COCO_DATA.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL.MULTI_SCALE_VAL
        self.__flip_val = cfg.VAL.FLIP_VAL

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA.CLASSES

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)
예제 #15
0
    def __init__(self,
                 cfg_path=None,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.img_size = img_size
        self.__num_class = pms.DATA["NUM"]
        self.__conf_threshold = pms.TEST["CONF_THRESH"]
        self.__nms_threshold = pms.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__visiual = visiual
        self.__eval = eval
        self.__classes = pms.DATA["CLASSES"]

        self.__model = Darknet(cfg_path=cfg_path,
                               img_size=img_size).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
예제 #16
0
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #17
0
 def val(self):
     global logger
     if self.__eval:
         logger.info("***********Start Evaluation****************")
         start = time.time()
         mAP = 0
         with torch.no_grad():
                 APs, inference_time = Evaluator(self.__model, showatt=False).APs_voc(self.__multi_scale_val, self.__flip_val)
                 for i in APs:
                     logger.info("{} --> mAP : {}".format(i, APs[i]))
                     mAP += APs[i]
                 mAP = mAP / self.__num_class
                 logger.info('mAP:{}'.format(mAP))
                 logger.info("inference time: {:.2f} ms".format(inference_time))
         end = time.time()
         logger.info("  ===val cost time:{:.4f}s".format(end - start))
예제 #18
0
    def val(self):
        global writer, logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0

            with torch.no_grad():
                _, _, APs = Evaluator(self.__model, self.epoch,
                                      showatt=False).APs_voc(
                                          self.__multi_scale_val,
                                          self.__flip_val)

                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
            writer.add_scalar('mAP', mAP, self.epoch)
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))
예제 #19
0
class Tester(object):
    def __init__(self,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Yolov3().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def test(self):
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                print("test images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "data/{}".format(v))

                    cv2.imwrite(path, img)
                    print("saved images : {}".format(path))

        if self.__eval:
            mAP = 0
            print('*' * 20 + "Validate" + '*' * 20)

            with torch.no_grad():
                APs = Evaluator(self.__model).APs_voc(self.__multi_scale_test,
                                                      self.__flip_test)

                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                print('mAP:%g' % (mAP))
예제 #20
0
class Evaluation(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 heatmap=False):
        self.__num_class = cfg.COCO_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def reset(self):
        path1 = os.path.join(cfg.DETECTION_PATH, "detection_result/")
        path2 = os.path.join(cfg.DETECTION_PATH, "ShowAtt/")
        for i in os.listdir(path1):
            path_file = os.path.join(path1, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)
        for i in os.listdir(path2):
            path_file = os.path.join(path2, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)

    def study(self):
        # Parameter study
        y = []
        for i in [0.08, 0.07, 0.06]:
            t = time.time()
            evaluator = COCOAPIEvaluator(model_type='YOLOv3',
                                         data_dir=cfg.DATA_PATH,
                                         img_size=cfg.VAL["TEST_IMG_SIZE"],
                                         confthre=i,
                                         nmsthre=cfg.VAL["NMS_THRESH"])
            _, r = evaluator.evaluate(self.__model)
            y.append(
                str(i) + str('  ') + str(r) + str('  ') +
                str(time.time() - t, ))
            np.savetxt('study.txt', y, fmt='%s')  # y = np.loadtxt('study.txt')

    def val(self):
        global logger
        logger.info("***********Start Evaluation****************")
        start = time.time()

        evaluator = COCOAPIEvaluator(model_type='YOLOv4',
                                     data_dir=cfg.DATA_PATH,
                                     img_size=cfg.VAL["TEST_IMG_SIZE"],
                                     confthre=cfg.VAL["CONF_THRESH"],
                                     nmsthre=cfg.VAL["NMS_THRESH"])
        ap50_95, ap50 = evaluator.evaluate(self.__model)
        logger.info('ap50_95:{}|ap50:{}'.format(ap50_95, ap50))
        end = time.time()
        logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def Inference(self):
        global logger
        # clear cache
        self.reset()

        logger.info("***********Start Inference****************")
        imgs = os.listdir(self.__visiual)
        logger.info("images path: {}".format(self.__visiual))
        path = os.path.join(cfg.DETECTION_PATH, "detection_result")
        logger.info("saved images at: {}".format(path))
        inference_times = []
        for v in imgs:
            start_time = time.time()
            path = os.path.join(self.__visiual, v)
            img = cv2.imread(path)
            assert img is not None

            bboxes_prd = self.__evalter.get_bbox(img, v)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=img,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)
                path = os.path.join(cfg.DETECTION_PATH,
                                    "detection_result/{}".format(v))
                cv2.imwrite(path, img)
            end_time = time.time()
            inference_times.append(end_time - start_time)
        inference_time = sum(inference_times) / len(inference_times)
        fps = 1.0 / inference_time
        logging.info("Inference_Time: {:.5f} s/image, FPS: {}".format(
            inference_time, fps))
예제 #21
0
class lightenYOLOv4(pl.LightningModule):
    def __init__(self, weight_path, resume, exp_name, accumulate=None):
        # precision=16 for fp16

        super().__init__()
        self.model = Build_Model(weight_path=weight_path, resume=resume)
        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.evaluator = Evaluator(self.model,
                                   showatt=False,
                                   exp_name=exp_name)
        self.evaluator.clear_predict_file()

    # how you want your model to do inference/predictions
    def forward(self, img):
        p, p_d = self.model(img)
        return p, p_d

    """
    def training_epoch_end(self,outputs):
        #  the function is called after every epoch is completed
        # calculating average loss
        avg_loss = 0
        avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
        # creating log dictionary
        result = pl.TrainResult()
        result.log('val/loss_epoch', avg_loss)
        return result
    """

    # the train loop INDEPENDENT of forward.
    def training_step(self, batch, batch_idx):
        img, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes, _ = batch

        p, p_d = self(img)
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(
            p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes,
            lbboxes)

        result = pl.TrainResult(minimize=loss)
        #result = pl.TrainResult(loss)
        #result.log('train_loss_ciou', loss_ciou)
        #result.log('train_loss_conf', loss_conf)
        #result.log('train_loss_cls', loss_cls)
        result.log('train_loss', loss, on_step=True, on_epoch=True)
        return result
        '''
        #https://www.learnopencv.com/tensorboard-with-pytorch-lightning/
        logs={"train_loss": loss,
            "train_loss_ciou":loss_ciou,
            "train_loss_conf":loss_conf,
            "train_loss_cls":loss_cls,}
        batch_dictionary={
            #REQUIRED: It ie required for us to return "loss"
            "loss": loss,
            #optional for batch logging purposes
            "log": logs,
        }
        return batch_dictionary

        '''

    def validation_epoch_end(self, outputs):
        APs = self.evaluator.calc_APs()
        self.evaluator.clear_predict_file()
        mAP = 0
        for i in APs:
            mAP += APs[i]
        mAP = mAP / self.model.getNC()
        result = pl.EvalResult()
        result.log('val/mAP_epoch', torch.Tensor([mAP]).cuda())
        #trainer.logger_connector.logged_metrics
        return result

    def validation_step(self, batch, batch_idx):
        img_batch, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes, img_name = batch

        for idx, img in tqdm(zip(img_name, img_batch)):
            # CHW -> HWC
            img = img.cpu().numpy().transpose(1, 2, 0)
            bboxes_prd = self.evaluator.get_bbox(img,
                                                 multi_test=False,
                                                 flip_test=False)
            self.evaluator.store_bbox(idx, bboxes_prd)
        '''
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(p, p_d, label_sbbox, label_mbbox,
                                                  label_lbbox, sbboxes, mbboxes, lbboxes)

        self.log('val_loss_ciou', loss_ciou)
        self.log('val_loss_conf', loss_conf)
        self.log('val_loss_cls', loss_cls)
        self.log('val_loss', loss)
        '''
        return 1

    def test_step(self, batch, batch_idx):
        img, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = batch

        p, p_d = self(img)
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(
            p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes,
            lbboxes)
        #, loss_ciou, loss_conf, loss_cls
        return loss

    def configure_optimizers(self):
        optimizer = optim.SGD(self.model.parameters(),
                              lr=cfg.TRAIN["LR_INIT"],
                              momentum=cfg.TRAIN["MOMENTUM"],
                              weight_decay=cfg.TRAIN["WEIGHT_DECAY"])
        return optimizer
예제 #22
0
파일: test.py 프로젝트: Shank2358/NPMMR-Det
    def test(self):
        global logger
        logger.info("***********Start Evaluation****************")

        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                #print("test images : {}".format(path))
                img = cv2.imread(path)
                assert img is not None
                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]
                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "prediction/imgs_all/{}".format(v))
                    cv2.imwrite(path, img)
                    #print("saved images : {}".format(path))

        mAP = 0
        if self.__eval and cfg.TEST["EVAL_TYPE"] == 'VOC':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)

                for i in APs:
                    print("{} --> AP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                #print('mAP:%g' % (mAP))
                #print("inference time : {:.2f} ms".format(inference_time))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'COCO':
            with torch.no_grad():
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'BOTH':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)
                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
예제 #23
0
    def video_detection(self):

        vid = cv2.VideoCapture(self.video_path)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (
            int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        )
        isOutput = True if self.output_dir != "" else False
        if isOutput:
            print(
                "!!! TYPE:",
                type(self.output_dir),
                type(video_FourCC),
                type(video_fps),
                type(video_size),
            )
            out = cv2.VideoWriter(self.output_dir, video_FourCC, video_fps,
                                  video_size)
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        while True:
            return_value, frame = vid.read()

            evaluator_imgs = Evaluator(self.__model,
                                       self.__classes,
                                       self.__num_classes,
                                       visual=False)
            bboxes_prd = evaluator_imgs.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=frame,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)

            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0

            cv2.putText(frame,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(255, 0, 0),
                        thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", frame)

            if isOutput:
                out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
예제 #24
0
class Detection(object):
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in self.__file_list:
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
            # curr_time = timer()
            # accum_time +=  curr_time - prev_time
            cv2.imwrite(os.path.join(self.__output_dir,os.path.basename(image_path)),frame)
        print("FPS: {:.04f}".format(len(self.__file_list)/self.__evalter.inference_time))
        
    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = []
        for line in fh.readlines():
            line = line.rstrip().split()
            if len(image_paths) < 100:
                if len(line) > 1:
                    image_paths.append(os.path.join("/data",line[0]))
            else:
                break
        print(image_paths)
        self.__file_list =  image_paths
예제 #25
0
class Detection(object):
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in tqdm(self.__file_dict.keys()):
            # print(self.__file_dict[image_path])
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)
                visualize_boxes_and_labels_on_image_array(
                    image=frame,
                    boxes=self.__file_dict[image_path][:, :4],
                    classes=self.__file_dict[image_path][:, 4],
                    scores=None,
                    line_thickness=10,
                    category_index=self.__classes)
            cv2.imwrite(
                os.path.join(self.__output_dir, os.path.basename(image_path)),
                frame)
        print("FPS: {:.04f}".format(1000 * len(self.__file_dict.keys()) /
                                    self.__evalter.inference_time))

    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = {}
        ####################################
        # for random select
        ####################################
        # random.seed(1)
        # lines = random.choices(fh.readlines(),k=10)

        # for line in lines:
        #     line = line.rstrip().split()
        #     if len(line)>1:
        #         image_paths[os.path.join("/data",line[0])] = np.array([list(map(int,i.split(","))) for i in line[1:]])
        #     else:
        #         break
        ####################################
        # for on demand plot
        ####################################
        lines = fh.readlines()
        imgs = [
            "images/0021023.png", "images/0020485.png", "images/0021042.png",
            "images/0021630.png", "images/0021729.png", "images/0021781.png"
        ]
        for line in lines:
            line = line.rstrip().split()
            if line[0] in imgs:
                image_paths[os.path.join("/data", line[0])] = np.array(
                    [list(map(int, i.split(","))) for i in line[1:]])
        #####################################
        self.__file_dict = image_paths
예제 #26
0
파일: test.py 프로젝트: Shank2358/NPMMR-Det
class Tester(object):
    def __init__(self, weight_path=None, gpu_id=0, visiual=None, eval=False):
        self.img_size = cfg.TEST["TEST_IMG_SIZE"]
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id, force_cpu=False)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]
        self.__classes = cfg.DATA["CLASSES"]

        self.__visiual = visiual
        self.__eval = eval
        self.__model = NPMMRDet().to(self.__device)  # Single GPU

        net_model = NPMMRDet()
        if torch.cuda.device_count() >1: ## Multi GPUs
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            net_model = torch.nn.DataParallel(net_model) ## Multi GPUs
            self.__model = net_model.to(self.__device)
        elif torch.cuda.device_count() ==1:
            self.__model = net_model.to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))
        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt) #['model']
        #print("loading weight file is done")
        del chkpt

    def test(self):
        global logger
        logger.info("***********Start Evaluation****************")

        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                #print("test images : {}".format(path))
                img = cv2.imread(path)
                assert img is not None
                bboxes_prd = self.__evalter.get_bbox(img)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]
                    visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH, "prediction/imgs_all/{}".format(v))
                    cv2.imwrite(path, img)
                    #print("saved images : {}".format(path))

        mAP = 0
        if self.__eval and cfg.TEST["EVAL_TYPE"] == 'VOC':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)

                for i in APs:
                    print("{} --> AP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                #print('mAP:%g' % (mAP))
                #print("inference time : {:.2f} ms".format(inference_time))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'COCO':
            with torch.no_grad():
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))

        elif self.__eval and cfg.TEST["EVAL_TYPE"] == 'BOTH':
            with torch.no_grad():
                start = time.time()
                APs, inference_time = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)
                for i in APs:
                    print("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/VOCmAP', mAP)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
                start = time.time()
                evaluator = COCOEvaluator(data_dir=cfg.DATA_PATH,
                                          img_size=cfg.TEST["TEST_IMG_SIZE"],
                                          confthre=cfg.TEST["CONF_THRESH"],
                                          nmsthre=cfg.TEST["NMS_THRESH"])
                ap50_95, ap50, inference_time = evaluator.evaluate(self.__model)
                logger.info('ap50_95:{} | ap50:{}'.format(ap50_95, ap50))
                logger.info("inference time: {:.2f} ms".format(inference_time))
                writer.add_scalar('test/COCOAP50', ap50)
                writer.add_scalar('test/COCOAP50_95', ap50_95)
                end = time.time()
                logger.info("Test cost time:{:.4f}s".format(end - start))
예제 #27
0
class Evaluation(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 showatt=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__showatt = showatt
        self.__visiual = visiual
        self.__eval = eval
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt["model"])
        print("loading weight file is done")
        del chkpt

    def val(self):
        global logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0
            with torch.no_grad():
                APs, inference_time = Evaluator(self.__model,
                                                showatt=False).APs_voc(
                                                    self.__multi_scale_val,
                                                    self.__flip_val)
                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info("mAP:{}".format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v, mode=self.__mode)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(
                        image=img,
                        boxes=boxes,
                        labels=class_inds,
                        probs=scores,
                        class_labels=self.__classes,
                    )
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
예제 #28
0
class Detection(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 video_path=None,
                 output_dir=None,
                 ):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def Video_detection(self):
        import cv2
        vid = cv2.VideoCapture(self.__video_path)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                      int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        isOutput = True if self.__output_dir != "" else False
        if isOutput:
            print("!!! TYPE:", type(self.__output_dir), type(video_FourCC), type(video_fps), type(video_size))
            out = cv2.VideoWriter(self.__output_dir, video_FourCC, video_fps, video_size)
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        while True:
            return_value, frame = vid.read()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)

            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(frame, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(255, 0, 0), thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", frame)
            if isOutput:
                out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
예제 #29
0
    def train(self):
        global writer
        logger.info(
            "Training start,img size is: {:d},batchsize is: {:d},work number is {:d}"
            .format(
                cfg.TRAIN["TRAIN_IMG_SIZE"],
                cfg.TRAIN["BATCH_SIZE"],
                cfg.TRAIN["NUMBER_WORKERS"],
            ))
        logger.info(self.yolov4)
        logger.info("Train datasets number is : {}".format(
            len(self.train_dataset)))

        logger.info("        =======  start  training   ======     ")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.yolov4.train()

            mloss = torch.zeros(4)
            # logger.info("===Epoch:[{}/{}]===".format(epoch, self.epochs))
            for i, (
                    imgs,
                    label_sbbox,
                    label_mbbox,
                    label_lbbox,
                    sbboxes,
                    mbboxes,
                    lbboxes,
            ) in enumerate(self.train_dataloader):
                self.scheduler.step(
                    len(self.train_dataloader) /
                    (cfg.TRAIN["BATCH_SIZE"]) * epoch + i)

                imgs = imgs.to(self.device)
                label_sbbox = label_sbbox.to(self.device)
                label_mbbox = label_mbbox.to(self.device)
                label_lbbox = label_lbbox.to(self.device)
                sbboxes = sbboxes.to(self.device)
                mbboxes = mbboxes.to(self.device)
                lbboxes = lbboxes.to(self.device)

                p, p_d = self.yolov4(imgs)

                loss, loss_ciou, loss_conf, loss_cls = self.criterion(
                    p,
                    p_d,
                    label_sbbox,
                    label_mbbox,
                    label_lbbox,
                    sbboxes,
                    mbboxes,
                    lbboxes,
                )

                loss.backward()
                # Accumulate gradient for x batches before optimizing
                if i % self.accumulate == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                # Update running mean of tracked metrics
                loss_items = torch.tensor(
                    [loss_ciou, loss_conf, loss_cls, loss])
                mloss = (mloss * i + loss_items) / (i + 1)

                # Print batch results
                if i % 10 == 0:

                    logger.info(
                        "  === Epoch:[{:3}/{}],step:[{:3}/{}],img_size:[{:3}],total_loss:{:.4f}|loss_ciou:{:.4f}|loss_conf:{:.4f}|loss_cls:{:.4f}|lr:{:.4e}"
                        .format(
                            epoch + 1,
                            self.epochs,
                            i,
                            len(self.train_dataloader) - 1,
                            self.train_dataset.img_size,
                            mloss[3],
                            mloss[0],
                            mloss[1],
                            mloss[2],
                            self.optimizer.param_groups[0]["lr"],
                        ))
                    writer.add_scalar(
                        "loss_ciou",
                        mloss[0],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "loss_conf",
                        mloss[1],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "loss_cls",
                        mloss[2],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "train_loss",
                        mloss[3],
                        len(self.train_dataloader) * epoch + i,
                    )
                # multi-sclae training (320-608 pixels) every 10 batches
                if self.multi_scale_train and (i + 1) % 10 == 0:
                    self.train_dataset.img_size = (
                        random.choice(range(10, 20)) * 32)

            if cfg.TRAIN["DATA_TYPE"] == "Customer":
                mAP = 0.0
                if epoch >= 0:
                    logger.info("===== Validate =====".format(
                        epoch, self.epochs))
                    logger.info("val img size is {}".format(
                        cfg.VAL["TEST_IMG_SIZE"]))
                    with torch.no_grad():
                        APs, inference_time = Evaluator(
                            self.yolov4, showatt=False).APs_voc()
                        for i in APs:
                            logger.info("{} --> mAP : {}".format(i, APs[i]))
                            mAP += APs[i]
                        mAP = mAP / self.train_dataset.num_classes
                        logger.info("mAP : {}".format(mAP))
                        logger.info(
                            "inference time: {:.2f} ms".format(inference_time))
                        writer.add_scalar("mAP", mAP, epoch)
                        self.__save_model_weights(epoch, mAP)
                        logger.info("save weights done")
                    logger.info("  ===test mAP:{:.6f}".format(mAP))
            end = time.time()
            logger.info("  ===cost time:{:.4f}s".format(end - start))
        logger.info(
            "=====Training Finished.   best_test_mAP:{:.3f}%====".format(
                self.best_mAP))
예제 #30
0
class Evaluation(object):
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def val(self):
        global writer, logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0

            with torch.no_grad():
                _, _, APs = Evaluator(self.__model, self.epoch,
                                      showatt=False).APs_voc(
                                          self.__multi_scale_val,
                                          self.__flip_val)

                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
            writer.add_scalar('mAP', mAP, self.epoch)
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global writer, logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            start = time.clock()
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
            end = time.clock()
            times = end - start
            FPS = len(imgs) / times
            logger.info('FPS:{}'.format(FPS))
            logger.info("  ===detection cost time:{:.4f}s".format(times))