Пример #1
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 heatmap=False):
        self.img_size = img_size
        self.__num_class = cfg.COCO_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)
Пример #2
0
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
    ):
        self.__num_class = cfg.VOC_DATA.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL.MULTI_SCALE_VAL
        self.__flip_val = cfg.VAL.FLIP_VAL

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.VOC_DATA.CLASSES

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
Пример #3
0
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
Пример #4
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 showatt=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__showatt = showatt
        self.__visiual = visiual
        self.__eval = eval
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)
Пример #5
0
class Tester(object):
    def __init__(self, log_dir):
        init_seeds(0)
        self.device = gpu.select_device()
        self.log_dir = log_dir
        self.yolov4 = Build_Model(weight_path=None, resume=False)
        self.yolov4 = self.yolov4.to(self.device)
        self.__load_best_weights()

    def __load_best_weights(self):
        best_weight = os.path.join(log_dir, "checkpoints", "best.pt")
        chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt)
        del chkpt

    def test(self):
        logger.info(self.yolov4)
        evaluator = COCOAPIEvaluator(cfg=cfg,
                                     img_size=cfg.VAL.TEST_IMG_SIZE,
                                     confthre=cfg.VAL.CONF_THRESH,
                                     nmsthre=cfg.VAL.NMS_THRESH)
        logger.info("\n===============  start  testing   ===============")
        start = time.time()
        coco_stat = evaluator.evaluate(self.yolov4)
        end = time.time()

        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[0]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[1]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[2]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}"
            .format(coco_stat[3]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}"
            .format(coco_stat[4]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}"
            .format(coco_stat[5]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = {:.04f}"
            .format(coco_stat[6]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = {:.04f}"
            .format(coco_stat[7]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[8]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}"
            .format(coco_stat[9]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}"
            .format(coco_stat[10]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}"
            .format(coco_stat[11]))
        logger.info("cost time:{:.4f}s".format(end - start))
        logger.info("FPS:{:.4f}".format(
            len(evaluator.dataloader) / (end - start)))
Пример #6
0
            if m.inplace:
                continue
        out = m(input_)[0]
        if len(out):
            for j in range(len(out)):
                out_sizes.append(np.array(out[j].size()))
            break

    total_nums = 0
    for i in range(len(out_sizes)):
        s = out_sizes[i]
        nums = np.prod(np.array(s))
        total_nums += nums

    print(
        "Model {} : intermedite variables: {:3f} M (without backward)".format(
            model._get_name(), total_nums * type_size / 1000 / 1000))
    print("Model {} : intermedite variables: {:3f} M (with backward)".format(
        model._get_name(), total_nums * type_size * 2 / 1000 / 1000))


if __name__ == "__main__":
    from model.build_model import Build_Model
    import torch

    net = Build_Model()
    print(net)

    in_img = torch.randn(1, 3, 320, 320)
    modelsize(net, in_img)
Пример #7
0
class Evaluation(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 heatmap=False):
        self.__num_class = cfg.COCO_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def reset(self):
        path1 = os.path.join(cfg.DETECTION_PATH, "detection_result/")
        path2 = os.path.join(cfg.DETECTION_PATH, "ShowAtt/")
        for i in os.listdir(path1):
            path_file = os.path.join(path1, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)
        for i in os.listdir(path2):
            path_file = os.path.join(path2, i)
            if os.path.isfile(path_file):
                os.remove(path_file)
            else:
                for f in os.listdir(path_file):
                    path_file2 = os.path.join(path_file, f)
                    if os.path.isfile(path_file2):
                        os.remove(path_file2)

    def study(self):
        # Parameter study
        y = []
        for i in [0.08, 0.07, 0.06]:
            t = time.time()
            evaluator = COCOAPIEvaluator(model_type='YOLOv3',
                                         data_dir=cfg.DATA_PATH,
                                         img_size=cfg.VAL["TEST_IMG_SIZE"],
                                         confthre=i,
                                         nmsthre=cfg.VAL["NMS_THRESH"])
            _, r = evaluator.evaluate(self.__model)
            y.append(
                str(i) + str('  ') + str(r) + str('  ') +
                str(time.time() - t, ))
            np.savetxt('study.txt', y, fmt='%s')  # y = np.loadtxt('study.txt')

    def val(self):
        global logger
        logger.info("***********Start Evaluation****************")
        start = time.time()

        evaluator = COCOAPIEvaluator(model_type='YOLOv4',
                                     data_dir=cfg.DATA_PATH,
                                     img_size=cfg.VAL["TEST_IMG_SIZE"],
                                     confthre=cfg.VAL["CONF_THRESH"],
                                     nmsthre=cfg.VAL["NMS_THRESH"])
        ap50_95, ap50 = evaluator.evaluate(self.__model)
        logger.info('ap50_95:{}|ap50:{}'.format(ap50_95, ap50))
        end = time.time()
        logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def Inference(self):
        global logger
        # clear cache
        self.reset()

        logger.info("***********Start Inference****************")
        imgs = os.listdir(self.__visiual)
        logger.info("images path: {}".format(self.__visiual))
        path = os.path.join(cfg.DETECTION_PATH, "detection_result")
        logger.info("saved images at: {}".format(path))
        inference_times = []
        for v in imgs:
            start_time = time.time()
            path = os.path.join(self.__visiual, v)
            img = cv2.imread(path)
            assert img is not None

            bboxes_prd = self.__evalter.get_bbox(img, v)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=img,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)
                path = os.path.join(cfg.DETECTION_PATH,
                                    "detection_result/{}".format(v))
                cv2.imwrite(path, img)
            end_time = time.time()
            inference_times.append(end_time - start_time)
        inference_time = sum(inference_times) / len(inference_times)
        fps = 1.0 / inference_time
        logging.info("Inference_Time: {:.5f} s/image, FPS: {}".format(
            inference_time, fps))
Пример #8
0
weight_path = 'weight/yolov4.weights'

train_anno_path = './data/train_annotation.txt'

train_dataset = BuildDataset(train_anno_path)

train_dataloader = DataLoader(
    train_dataset,
    batch_size=batch_size,
    num_workers=0,
    shuffle=True,
    pin_memory=True,
)

# model
yolov4 = Build_Model(weight_path=weight_path).to(device)

optimizer = optim.SGD(
    yolov4.parameters(),
    lr=cfg.TRAIN["LR_INIT"],
    momentum=cfg.TRAIN["MOMENTUM"],
    weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
)

criterion = YoloV4Loss(
    anchors=cfg.MODEL["ANCHORS"],
    strides=cfg.MODEL["STRIDES"],
    iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
)

scheduler = cosine_lr_scheduler.CosineDecayLR(
Пример #9
0
class Trainer(object):
    def __init__(self, weight_path, resume, gpu_id, accumulate):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = accumulate
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        print('train img size is {}'.format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True)
        self.yolov4 = Build_Model().to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(),
                                   lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"],
                                   weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader))

    def __load_model_weights(self, weight_path, resume):
        if resume:
            last_weight = os.path.join(
                os.path.split(weight_path)[0], "last.pt")
            chkpt = torch.load(last_weight, map_location=self.device)
            self.yolov4.load_state_dict(chkpt['model'])

            self.start_epoch = chkpt['epoch'] + 1
            if chkpt['optimizer'] is not None:
                self.optimizer.load_state_dict(chkpt['optimizer'])
                self.best_mAP = chkpt['best_mAP']
            del chkpt
        else:
            self.yolov4.load_darknet_weights(weight_path)

    # def __save_model_weights(self, epoch, mAP):
    #     if mAP > self.best_mAP:
    #         self.best_mAP = mAP
    #     best_weight = os.path.join(os.path.split(self.weight_path)[0], "best.pt")
    #     last_weight = os.path.join(os.path.split(self.weight_path)[0], "last.pt")
    #     chkpt = {'epoch': epoch,
    #              'best_mAP': self.best_mAP,
    #              'model': self.yolov3.state_dict(),
    #              'optimizer': self.optimizer.state_dict()}
    #     torch.save(chkpt, last_weight)
    #
    #     if self.best_mAP == mAP:
    #         torch.save(chkpt['model'], best_weight)
    #
    #     if epoch > 0 and epoch % 10 == 0:
    #         torch.save(chkpt, os.path.join(os.path.split(self.weight_path)[0], 'backup_epoch%g.pt'%epoch))
    #     del chkpt

    def __save_model_weights(self, epoch, loss):
        weight = os.path.join(
            os.path.split(self.weight_path)[0],
            "epoch{}, loss{}.pt".format(epoch, loss))
        chkpt = {
            'epoch': epoch,
            'model': self.yolov4.state_dict(),
            'optimizer': self.optimizer.state_dict()
        }
        torch.save(chkpt['model'], weight)

    def train(self):
        global writer  # 创建一个SummaryWriter实例
        logger.info(
            "Training start,img size is: {:d},batchsize is: {:d},work number is {:d}"
            .format(cfg.TRAIN["TRAIN_IMG_SIZE"], cfg.TRAIN["BATCH_SIZE"],
                    cfg.TRAIN["NUMBER_WORKERS"]))
        logger.info(self.yolov4)
        logger.info("Train datasets number is : {}".format(
            len(self.train_dataset)))

        self.yolov4, self.optimizer = amp.initialize(self.yolov4,
                                                     self.optimizer,
                                                     opt_level='O1',
                                                     verbosity=0)
        logger.info("        =======  start  training   ======     ")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.yolov4.train()

            mloss = torch.zeros(4)
            logger.info("===Epoch:[{}/{}]===".format(epoch, self.epochs))
            for i, (imgs, label_sbbox, label_mbbox, label_lbbox, sbboxes,
                    mbboxes, lbboxes) in enumerate(self.train_dataloader):

                self.scheduler.step(len(self.train_dataloader) * epoch + i)

                imgs = imgs.to(self.device)
                label_sbbox = label_sbbox.to(self.device)
                label_mbbox = label_mbbox.to(self.device)
                label_lbbox = label_lbbox.to(self.device)
                sbboxes = sbboxes.to(self.device)
                mbboxes = mbboxes.to(self.device)
                lbboxes = lbboxes.to(self.device)

                p, p_d = self.yolov4(imgs)

                loss, loss_giou, loss_conf, loss_cls = self.criterion(
                    p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes,
                    mbboxes, lbboxes)

                with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                    scaled_loss.backward()
                # Accumulate gradient for x batches before optimizing
                if i % self.accumulate == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                # Update running mean of tracked metrics
                loss_items = torch.tensor(
                    [loss_giou, loss_conf, loss_cls, loss])
                mloss = (mloss * i + loss_items) / (i + 1)

                # Print batch results
                if i % 10 == 0:

                    logger.info(
                        "  === Epoch:[{:3}/{}],step:[{:3}/{}],img_size:[{:3}],total_loss:{:.4f}|loss_giou:{:.4f}|loss_conf:{:.4f}|loss_cls:{:.4f}|lr:{:.4f}"
                        .format(epoch, self.epochs, i,
                                len(self.train_dataloader) - 1,
                                self.train_dataset.img_size, mloss[3],
                                mloss[0], mloss[1], mloss[2],
                                self.optimizer.param_groups[0]['lr']))
                    writer.add_scalar('train_loss', mloss[3], i)
                    # multi-sclae training (320-608 pixels) every 10 batches
                if self.multi_scale_train and (i + 1) % 10 == 0:
                    # self.train_dataset.img_size = random.choice(range(5, 15)) * 32 # for imgsize 320
                    # self.train_dataset.img_size = random.choice(range(12, 22)) * 32  # for imgsize 544
                    self.train_dataset.img_size = random.choice(range(10,
                                                                      20)) * 32

            if epoch >= 0 and cfg.TRAIN["DATA_TYPE"] == 'VOC':
                self.__save_model_weights(epoch, mloss[3])
                print('save weights done')
                # mAP = 0
                # if epoch >= 0:
                #     logger.info("===== Validate =====".format(epoch, self.epochs))
                #     with torch.no_grad():
                #         APs = Evaluator(self.yolov3,epoch,showatt=False).APs_voc()
                #         for i in APs:
                #             print("{} --> mAP : {}".format(i, APs[i]))
                #             mAP += APs[i]
                #         mAP = mAP / self.train_dataset.num_classes
                #         writer.add_scalar('mAP', mAP, epoch)
                #     logger.info("  ===test mAP:{:.3f}".format(mAP))
                # writer.add_scalar('mAP', mAP, epoch)
            elif epoch >= 0 and cfg.TRAIN["DATA_TYPE"] == 'COCO':
                evaluator = COCOAPIEvaluator(model_type='YOLOv4',
                                             data_dir=cfg.DATA_PATH,
                                             img_size=cfg.VAL["TEST_IMG_SIZE"],
                                             confthre=0.08,
                                             nmsthre=cfg.VAL["NMS_THRESH"])
                ap50_95, ap50 = evaluator.evaluate(self.yolov4)
                # logger.info('ap50_95:{}|ap50:{}'.format(ap50_95, ap50))
                writer.add_scalar('val/COCOAP50', ap50, epoch)
                writer.add_scalar('val/COCOAP50_95', ap50_95, epoch)
                self.__save_model_weights(epoch, ap50)
            else:
                assert print('dataset must be VOC or COCO')
            end = time.time()
            logger.info("  ===cost time:{:.4f}s".format(end - start))
        logger.info(
            "=====Training Finished.   best_test_mAP:{:.3f}%====".format(
                self.best_mAP))
Пример #10
0
class Evaluation(object):
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def val(self):
        global writer, logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0

            with torch.no_grad():
                _, _, APs = Evaluator(self.__model, self.epoch,
                                      showatt=False).APs_voc(
                                          self.__multi_scale_val,
                                          self.__flip_val)

                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info('mAP:{}'.format(mAP))
            writer.add_scalar('mAP', mAP, self.epoch)
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global writer, logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            start = time.clock()
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(image=img,
                                    boxes=boxes,
                                    labels=class_inds,
                                    probs=scores,
                                    class_labels=self.__classes)
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
            end = time.clock()
            times = end - start
            FPS = len(imgs) / times
            logger.info('FPS:{}'.format(FPS))
            logger.info("  ===detection cost time:{:.4f}s".format(times))
Пример #11
0
class Detection(object):
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in self.__file_list:
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
            # curr_time = timer()
            # accum_time +=  curr_time - prev_time
            cv2.imwrite(os.path.join(self.__output_dir,os.path.basename(image_path)),frame)
        print("FPS: {:.04f}".format(len(self.__file_list)/self.__evalter.inference_time))
        
    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = []
        for line in fh.readlines():
            line = line.rstrip().split()
            if len(image_paths) < 100:
                if len(line) > 1:
                    image_paths.append(os.path.join("/data",line[0]))
            else:
                break
        print(image_paths)
        self.__file_list =  image_paths
Пример #12
0
    def __init__(self,
                 weight_path=None,
                 resume: bool = False,
                 gpu_id: int = 0,
                 accumulate: bool = True,
                 fp_16: bool = False):

        # PYTHON HASH SEED
        init_seeds(0)

        # device
        self.fp_16: bool = fp_16
        self.device: torch.device = gpu.select_device(gpu_id)
        self.start_epoch: int = 0
        self.best_mAP: float = 0.0  # not sure why this is necessary...
        self.accumulate: bool = accumulate
        self.weight_path: Path = weight_path
        self.multi_scale_train: bool = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        # Show attention modification?
        self.showatt = cfg.TRAIN["showatt"]

        # Multi-scale training status
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print(f"train img size is {cfg.TRAIN['TRAIN_IMG_SIZE']}")

        # Build Dataset using helper function.
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)
def transform_to_onnx(weight_file, batch_size, n_classes, IN_IMAGE_H,
                      IN_IMAGE_W):
    device = gpu.select_device(id=0)

    model = Build_Model().to(device)
    #model = Build_Model(weight_path=weight_file, resume=False).to(device)

    pretrained_dict = torch.load(weight_file,
                                 map_location=device)  #torch.device('cuda')
    model.load_state_dict(pretrained_dict)

    evaluator = Evaluator(model, showatt=False)

    input_names = ["input"]
    output_names = ['boxes', 'confs']

    dynamic = False
    if batch_size <= 0:
        dynamic = True

    if dynamic:
        x = torch.randn((1, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True)
        onnx_file_name = "yolov4_-1_3_{}_{}_dynamic.onnx".format(
            IN_IMAGE_H, IN_IMAGE_W)
        dynamic_axes = {
            "input": {
                0: "batch_size"
            },
            "boxes": {
                0: "batch_size"
            },
            "confs": {
                0: "batch_size"
            }
        }
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=dynamic_axes)

        print('Onnx model exporting done')
        return onnx_file_name
    else:
        x = torch.randn((batch_size, 3, IN_IMAGE_H, IN_IMAGE_W),
                        requires_grad=True)
        x = x.to(device)
        onnx_file_name = "yolov4_{}_3_{}_{}_static.onnx".format(
            batch_size, IN_IMAGE_H, IN_IMAGE_W)
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=None)

        print('Onnx model exporting done')
        return onnx_file_name, evaluator
Пример #14
0
import matplotlib.pyplot as plt
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import config.yolov4_config as cfg
from utils.visualize import *
from utils.torch_utils import *

# GPU device
# Check GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print("Device: ", device)

model = Build_Model().to(device)

w_path = 'model.pt'
# chkpt = torch.load(os.path.join(w_path))
chkpt = torch.load(os.path.join(w_path),  map_location=torch.device('cpu'))

model.load_state_dict(chkpt['model'])

classes = ['1', '2', '3', '4', '5', '6']

test_path = './data/test/'
pred_path = './data/pred/'
imgs_path = os.listdir(test_path)
n = len(imgs_path)
ratio = 0.1
for i, img_path in enumerate(imgs_path):
Пример #15
0
class LAMR_Tester(object):
    id_map = ["pedestrian"]

    def __init__(self, log_dir, test_images):
        init_seeds(0)
        self.device = gpu.select_device()
        self.log_dir = log_dir
        self.yolov4 = Build_Model(weight_path=None, resume=False)
        self.yolov4 = self.yolov4.to(self.device)
        self.dataset = Naive_Test_Dataset(test_images)
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=cfg.VAL.BATCH_SIZE,
            shuffle=False,
            pin_memory=True,
            num_workers=cfg.VAL.NUMBER_WORKERS)
        self.__load_best_weights()

    def __load_best_weights(self):
        best_weight = os.path.join(log_dir, "checkpoints", "best.pt")
        chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt)
        del chkpt

    def test(self):
        logger.info(self.yolov4)
        self.yolov4.eval()
        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
        ) else torch.FloatTensor
        results_path = os.path.join("/data", "mock_detections",
                                    os.path.basename(self.log_dir), "day",
                                    "val")
        if not os.path.exists(results_path):
            os.makedirs(results_path)
        for i, (img_path, img, info_img) in tqdm(enumerate(self.dataloader),
                                                 desc="Test to ECP... ",
                                                 unit="imgs",
                                                 total=len(self.dataloader)):
            info_img = [float(info) for info in info_img]
            data_dict = {
                "tags": [],
                "imageheight": int(info_img[0]),
                "imagewidth": int(info_img[1]),
                "children": [],
                "identity": "frame"
            }
            city_name = os.path.basename(os.path.dirname(img_path[0]))
            os.makedirs(os.path.join(results_path, city_name), exist_ok=True)
            result_json_path = os.path.join(
                results_path,
                os.path.basename(img_path[0]).replace("png", "json"))

            with torch.no_grad():
                img = Variable(img.type(Tensor))
                _, outputs = self.yolov4(img)
                outputs = outputs.unsqueeze(0)
                outputs = postprocess(outputs, len(cfg.DATASET.CLASSES), 0.01,
                                      cfg.VAL.NMS_THRESH)
                if outputs[0] is None:
                    with open(result_json_path, "w") as json_fh:
                        json.dump(data_dict, json_fh, indent=4)
                    continue
                outputs = outputs[0].cpu().data

            for output in outputs:
                x1 = float(output[0])
                y1 = float(output[1])
                x2 = float(output[2])
                y2 = float(output[3])
                box = yolobox2label((y1, x1, y2, x2), info_img)
                data_dict["children"].append({
                    "tags": [],
                    "children": [],
                    "identity":
                    self.id_map[int(output[6])],
                    "score":
                    float(output[4]) * float(output[5]),
                    "y0":
                    float(box[0]),
                    "x0":
                    float(box[1]),
                    "y1":
                    float(box[2]),
                    "x1":
                    float(box[3])
                })  # ECP Formats
            with open(result_json_path, "w") as json_fh:
                json.dump(data_dict, json_fh, indent=4)
Пример #16
0
class Trainer(object):
    def __init__(self,
                 weight_path=None,
                 resume=False,
                 gpu_id=0,
                 accumulate=1,
                 fp_16=False):
        init_seeds(0)
        self.fp_16 = fp_16
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.0
        self.accumulate = accumulate
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.showatt = cfg.TRAIN["showatt"]
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print("train img size is {}".format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)

    def __load_resume_weights(self, weight_path):

        last_weight = os.path.join(os.path.split(weight_path)[0], "last.pt")
        chkpt = torch.load(last_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt["model"])

        self.start_epoch = chkpt["epoch"] + 1
        if chkpt["optimizer"] is not None:
            self.optimizer.load_state_dict(chkpt["optimizer"])
            self.best_mAP = chkpt["best_mAP"]
        del chkpt

    def __save_model_weights(self, epoch, mAP):
        if mAP > self.best_mAP:
            self.best_mAP = mAP
        best_weight = os.path.join(
            os.path.split(self.weight_path)[0], "best.pt")
        last_weight = os.path.join(
            os.path.split(self.weight_path)[0], "last.pt")
        chkpt = {
            "epoch": epoch,
            "best_mAP": self.best_mAP,
            "model": self.yolov4.state_dict(),
            "optimizer": self.optimizer.state_dict(),
        }
        torch.save(chkpt, last_weight)

        if self.best_mAP == mAP:
            torch.save(chkpt["model"], best_weight)

        if epoch > 0 and epoch % 10 == 0:
            torch.save(
                chkpt,
                os.path.join(
                    os.path.split(self.weight_path)[0],
                    "backup_epoch%g.pt" % epoch,
                ),
            )
        del chkpt

    def train(self):
        global writer
        logger.info(
            "Training start,img size is: {:d},batchsize is: {:d},work number is {:d}"
            .format(
                cfg.TRAIN["TRAIN_IMG_SIZE"],
                cfg.TRAIN["BATCH_SIZE"],
                cfg.TRAIN["NUMBER_WORKERS"],
            ))
        logger.info(self.yolov4)
        logger.info("Train datasets number is : {}".format(
            len(self.train_dataset)))

        def is_valid_number(x):
            return not (math.isnan(x) or math.isinf(x) or x > 1e4)

        if self.fp_16:
            self.yolov4, self.optimizer = amp.initialize(self.yolov4,
                                                         self.optimizer,
                                                         opt_level="O1",
                                                         verbosity=0)
        logger.info("        =======  start  training   ======     ")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.yolov4.train()

            mloss = torch.zeros(4)
            logger.info("===Epoch:[{}/{}]===".format(epoch, self.epochs))
            for i, (
                    imgs,
                    label_sbbox,
                    label_mbbox,
                    label_lbbox,
                    sbboxes,
                    mbboxes,
                    lbboxes,
            ) in enumerate(self.train_dataloader):
                self.scheduler.step(
                    len(self.train_dataloader) /
                    (cfg.TRAIN["BATCH_SIZE"]) * epoch + i)

                imgs = imgs.to(self.device)
                label_sbbox = label_sbbox.to(self.device)
                label_mbbox = label_mbbox.to(self.device)
                label_lbbox = label_lbbox.to(self.device)
                sbboxes = sbboxes.to(self.device)
                mbboxes = mbboxes.to(self.device)
                lbboxes = lbboxes.to(self.device)

                p, p_d = self.yolov4(imgs)

                loss, loss_ciou, loss_conf, loss_cls = self.criterion(
                    p,
                    p_d,
                    label_sbbox,
                    label_mbbox,
                    label_lbbox,
                    sbboxes,
                    mbboxes,
                    lbboxes,
                )
                if is_valid_number(loss.item()):
                    if self.fp_16:
                        with amp.scale_loss(loss,
                                            self.optimizer) as scaled_loss:
                            scaled_loss.backward()
                    else:
                        loss.backward()
                # Accumulate gradient for x batches before optimizing
                if i % self.accumulate == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                # Update running mean of tracked metrics
                loss_items = torch.tensor(
                    [loss_ciou, loss_conf, loss_cls, loss])
                mloss = (mloss * i + loss_items) / (i + 1)

                # Print batch results
                if i % 10 == 0:

                    logger.info(
                        "  === Epoch:[{:3}/{}],step:[{:3}/{}],img_size:[{:3}],total_loss:{:.4f}|loss_ciou:{:.4f}|loss_conf:{:.4f}|loss_cls:{:.4f}|lr:{:.4f}"
                        .format(
                            epoch,
                            self.epochs,
                            i,
                            len(self.train_dataloader) - 1,
                            self.train_dataset.img_size,
                            mloss[3],
                            mloss[0],
                            mloss[1],
                            mloss[2],
                            self.optimizer.param_groups[0]["lr"],
                        ))
                    writer.add_scalar(
                        "loss_ciou",
                        mloss[0],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "loss_conf",
                        mloss[1],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "loss_cls",
                        mloss[2],
                        len(self.train_dataloader) * epoch + i,
                    )
                    writer.add_scalar(
                        "train_loss",
                        mloss[3],
                        len(self.train_dataloader) * epoch + i,
                    )
                # multi-sclae training (320-608 pixels) every 10 batches
                if self.multi_scale_train and (i + 1) % 10 == 0:
                    self.train_dataset.img_size = (
                        random.choice(range(10, 20)) * 32)

            if (cfg.TRAIN["DATA_TYPE"] == "VOC"
                    or cfg.TRAIN["DATA_TYPE"] == "Customer"):
                mAP = 0.0
                if epoch >= self.eval_epoch:
                    logger.info("===== Validate =====".format(
                        epoch, self.epochs))
                    logger.info("val img size is {}".format(
                        cfg.VAL["TEST_IMG_SIZE"]))
                    with torch.no_grad():
                        APs, inference_time = Evaluator(
                            self.yolov4, showatt=self.showatt).APs_voc()
                        for i in APs:
                            logger.info("{} --> mAP : {}".format(i, APs[i]))
                            mAP += APs[i]
                        mAP = mAP / self.train_dataset.num_classes
                        logger.info("mAP : {}".format(mAP))
                        logger.info(
                            "inference time: {:.2f} ms".format(inference_time))
                        writer.add_scalar("mAP", mAP, epoch)
                        self.__save_model_weights(epoch, mAP)
                        logger.info("save weights done")
                    logger.info("  ===test mAP:{:.3f}".format(mAP))
            elif epoch >= 0 and cfg.TRAIN["DATA_TYPE"] == "COCO":
                evaluator = COCOAPIEvaluator(
                    model_type="YOLOv4",
                    data_dir=cfg.DATA_PATH,
                    img_size=cfg.VAL["TEST_IMG_SIZE"],
                    confthre=0.08,
                    nmsthre=cfg.VAL["NMS_THRESH"],
                )
                ap50_95, ap50 = evaluator.evaluate(self.yolov4)
                logger.info("ap50_95:{}|ap50:{}".format(ap50_95, ap50))
                writer.add_scalar("val/COCOAP50", ap50, epoch)
                writer.add_scalar("val/COCOAP50_95", ap50_95, epoch)
                self.__save_model_weights(epoch, ap50)
                print("save weights done")
            end = time.time()
            logger.info("  ===cost time:{:.4f}s".format(end - start))
        logger.info(
            "=====Training Finished.   best_test_mAP:{:.3f}%====".format(
                self.best_mAP))
Пример #17
0
class Evaluation(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 showatt=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__showatt = showatt
        self.__visiual = visiual
        self.__eval = eval
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt["model"])
        print("loading weight file is done")
        del chkpt

    def val(self):
        global logger
        if self.__eval:
            logger.info("***********Start Evaluation****************")
            start = time.time()
            mAP = 0
            with torch.no_grad():
                APs, inference_time = Evaluator(self.__model,
                                                showatt=False).APs_voc(
                                                    self.__multi_scale_val,
                                                    self.__flip_val)
                for i in APs:
                    logger.info("{} --> mAP : {}".format(i, APs[i]))
                    mAP += APs[i]
                mAP = mAP / self.__num_class
                logger.info("mAP:{}".format(mAP))
                logger.info("inference time: {:.2f} ms".format(inference_time))
            end = time.time()
            logger.info("  ===val cost time:{:.4f}s".format(end - start))

    def detection(self):
        global logger
        if self.__visiual:
            imgs = os.listdir(self.__visiual)
            logger.info("***********Start Detection****************")
            for v in imgs:
                path = os.path.join(self.__visiual, v)
                logger.info("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v, mode=self.__mode)
                if bboxes_prd.shape[0] != 0:
                    boxes = bboxes_prd[..., :4]
                    class_inds = bboxes_prd[..., 5].astype(np.int32)
                    scores = bboxes_prd[..., 4]

                    visualize_boxes(
                        image=img,
                        boxes=boxes,
                        labels=class_inds,
                        probs=scores,
                        class_labels=self.__classes,
                    )
                    path = os.path.join(cfg.PROJECT_PATH,
                                        "detection_result/{}".format(v))

                    cv2.imwrite(path, img)
                    logger.info("saved images : {}".format(path))
Пример #18
0
class Detection(object):
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def detect_demo(self):
        import cv2

        accum_time = 0
        for image_path in tqdm(self.__file_dict.keys()):
            # print(self.__file_dict[image_path])
            frame = cv2.imread(image_path)
            # prev_time = timer()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame,
                                boxes=boxes,
                                labels=class_inds,
                                probs=scores,
                                class_labels=self.__classes)
                visualize_boxes_and_labels_on_image_array(
                    image=frame,
                    boxes=self.__file_dict[image_path][:, :4],
                    classes=self.__file_dict[image_path][:, 4],
                    scores=None,
                    line_thickness=10,
                    category_index=self.__classes)
            cv2.imwrite(
                os.path.join(self.__output_dir, os.path.basename(image_path)),
                frame)
        print("FPS: {:.04f}".format(1000 * len(self.__file_dict.keys()) /
                                    self.__evalter.inference_time))

    def get_first_10_imgs(self):
        fh = open(self.__label_path)
        image_paths = {}
        ####################################
        # for random select
        ####################################
        # random.seed(1)
        # lines = random.choices(fh.readlines(),k=10)

        # for line in lines:
        #     line = line.rstrip().split()
        #     if len(line)>1:
        #         image_paths[os.path.join("/data",line[0])] = np.array([list(map(int,i.split(","))) for i in line[1:]])
        #     else:
        #         break
        ####################################
        # for on demand plot
        ####################################
        lines = fh.readlines()
        imgs = [
            "images/0021023.png", "images/0020485.png", "images/0021042.png",
            "images/0021630.png", "images/0021729.png", "images/0021781.png"
        ]
        for line in lines:
            line = line.rstrip().split()
            if line[0] in imgs:
                image_paths[os.path.join("/data", line[0])] = np.array(
                    [list(map(int, i.split(","))) for i in line[1:]])
        #####################################
        self.__file_dict = image_paths
Пример #19
0
class Trainer(object):
    _resume = False
    _fine_tune = False
    def __init__(self, log_dir, resume=False, fine_tune=False):
        init_seeds(0)
        self._fine_tune = fine_tune
        self._resume = resume
        if self._fine_tune:
            self.__prepare_fine_tune()
        self.device = gpu.select_device()
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = cfg.TRAIN.ACCUMULATE
        self.log_dir = log_dir
        self.weight_path = "yolov4.weights"
        self.multi_scale_train = cfg.TRAIN.MULTI_SCALE_TRAIN
        if self.multi_scale_train:
            print('Using multi scales training')
        else:
            print('train img size is {}'.format(cfg.TRAIN.TRAIN_IMG_SIZE))
        self.train_dataset = data.Build_Train_Dataset(anno_file=cfg.TRAIN.ANNO_FILE, anno_file_type="train", img_size=cfg.TRAIN.TRAIN_IMG_SIZE)

        self.epochs = cfg.TRAIN.YOLO_EPOCHS if cfg.MODEL.MODEL_TYPE == 'YOLOv4' else cfg.TRAIN.Mobilenet_YOLO_EPOCHS
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.ACCUMULATE,
                                           num_workers=cfg.TRAIN.NUMBER_WORKERS,
                                           shuffle=True, pin_memory=True)
        self.yolov4 = Build_Model(weight_path="yolov4.weights", resume=resume)

        self.yolov4 = self.yolov4.to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(), lr=cfg.TRAIN.LR_INIT,
                                   momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY)

        self.criterion = YoloV4Loss(anchors=cfg.MODEL.ANCHORS, strides=cfg.MODEL.STRIDES,
                                    iou_threshold_loss=cfg.TRAIN.IOU_THRESHOLD_LOSS)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN.LR_INIT,
                                                          lr_min=cfg.TRAIN.LR_END,
                                                          warmup=cfg.TRAIN.WARMUP_EPOCHS*len(self.train_dataloader))
        if resume: self.__load_resume_weights()
        if self._fine_tune: self.__load_best_weights()
        
    def __prepare_fine_tune(self):
        cfg.defrost()
        cfg.TRAIN.ANNO_FILE = cfg.FINE_TUNE.ANNO_FILE
        cfg.TRAIN.YOLO_EPOCHS = cfg.FINE_TUNE.YOLO_EPOCHS
        cfg.TRAIN.LR_INIT = cfg.FINE_TUNE.LR_INIT
        cfg.TRAIN.LR_END = cfg.FINE_TUNE.LR_END
        cfg.TRAIN.WARMUP_EPOCHS = cfg.FINE_TUNE.WARMUP_EPOCHS
        cfg.freeze()
        
    def __load_best_weights(self):
        best_weight = os.path.join(log_dir,"checkpoints", "best.pt")
        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")    
        shutil.copy2(best_weight,
                     best_weight.replace("best.pt","best_before_fine_tune.pt"))
        shutil.copy2(last_weight,
                     last_weight.replace("last.pt","last_before_fine_tune.pt"))
        last_chkpt = torch.load(last_weight, map_location=self.device)
        best_chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(best_chkpt)
        self.best_mAP = 0 
        del last_chkpt, best_chkpt

        
    def __load_resume_weights(self):

        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")
        chkpt = torch.load(last_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt['model'])

        self.start_epoch = chkpt['epoch'] + 1
        if chkpt['optimizer'] is not None:
            self.optimizer.load_state_dict(chkpt['optimizer'])
            self.best_mAP = chkpt['best_mAP']
        del chkpt

    def __save_model_weights(self, epoch, mAP):
        if mAP > self.best_mAP:
            self.best_mAP = mAP
        best_weight = os.path.join(log_dir,"checkpoints", "best.pt")
        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")
        chkpt = {'epoch': epoch,
                 'best_mAP': self.best_mAP,
                 'model': self.yolov4.module.state_dict() if torch.cuda.device_count()>1 else self.yolov4.state_dict(),
                 'optimizer': self.optimizer.state_dict()}
        torch.save(chkpt, last_weight)

        if self.best_mAP == mAP:
            torch.save(chkpt['model'], best_weight)

        if self._fine_tune and epoch % 5 == 0:
            torch.save(chkpt['model'], os.path.join(log_dir,"checkpoints", 'backup_fine_tune_epoch_{:02d}.pt'.format(epoch)))
        del chkpt

    def train(self):
        global writer
        logger.info("Training start,img size is: {:d},batchsize is: {:d}, subdivision: {:d}, worker number is {:d}".format(cfg.TRAIN.TRAIN_IMG_SIZE, cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.ACCUMULATE, cfg.TRAIN.NUMBER_WORKERS))
        logger.info(self.yolov4)
        n_train = len(self.train_dataset)
        n_step = n_train // (cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.ACCUMULATE) + 1
        logger.info("Train datasets number is : {}".format(n_train))
        evaluator = COCOAPIEvaluator(cfg=cfg,
                    img_size=cfg.VAL.TEST_IMG_SIZE,
                    confthre=cfg.VAL.CONF_THRESH,
                    nmsthre=cfg.VAL.NMS_THRESH)

        if torch.cuda.device_count() > 1: self.yolov4 = torch.nn.DataParallel(self.yolov4)
        logger.info("\n===============  start  training   ===============")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.yolov4.train()
            with tqdm(total=n_train, unit="imgs", desc=f'Epoch {epoch}/{self.epochs}', ncols=30) as pbar:
                for i, (imgs, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes) in enumerate(self.train_dataloader):

                    imgs = imgs.to(self.device)
                    label_sbbox = label_sbbox.to(self.device)
                    label_mbbox = label_mbbox.to(self.device)
                    label_lbbox = label_lbbox.to(self.device)
                    sbboxes = sbboxes.to(self.device)
                    mbboxes = mbboxes.to(self.device)
                    lbboxes = lbboxes.to(self.device)

                    p, p_d = self.yolov4(imgs)

                    loss, loss_ciou, loss_conf, loss_cls = self.criterion(p, p_d, label_sbbox, label_mbbox,
                                                    label_lbbox, sbboxes, mbboxes, lbboxes)

                    loss.backward()
                    # Accumulate gradient for x batches before optimizing
                    if i % self.accumulate == 0:
                        self.scheduler.step(n_step*epoch + i)
                        self.optimizer.step()
                        self.optimizer.zero_grad()

                    # Print batch results
                    if i % (5*self.accumulate) == 0:
                        logger.info("{:3}: total_loss:{:.4f} | loss_ciou:{:.4f} | loss_conf:{:.4f} | loss_cls:{:.4f} | lr:{:.6f}".format(
                            self.train_dataset.img_size, loss, loss_ciou, loss_conf, loss_cls, self.optimizer.param_groups[0]['lr']
                        ))
                        writer.add_scalar('train/loss_ciou', loss_ciou, n_step * epoch + i)
                        writer.add_scalar('train/loss_conf', loss_conf, n_step * epoch + i)
                        writer.add_scalar('train/loss_cls', loss_cls, n_step * epoch + i)
                        writer.add_scalar('train/train_loss', loss, n_step * epoch + i)
                        writer.add_scalar('train/lr', self.optimizer.param_groups[0]['lr'], n_step * epoch + i)
                    # multi-sclae training (320-608 pixels) every 10 batches
                    if self.multi_scale_train and (i+1) % (5*self.accumulate) == 0:
                        self.train_dataset.img_size = random.choice(range(10, 20)) * 32
                    pbar.update(imgs.shape[0])
                
            mAP = 0.
            # evaluator = COCOAPIEvaluator(cfg=cfg,
            #                             img_size=cfg.VAL.TEST_IMG_SIZE,
            #                             confthre=cfg.VAL.CONF_THRESH,
            #                             nmsthre=cfg.VAL.NMS_THRESH)
            coco_stat = evaluator.evaluate(self.yolov4)
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[0]))
            logger.info("Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[1]))            
            logger.info("Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[2]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}".format(coco_stat[3]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}".format(coco_stat[4]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}".format(coco_stat[5]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = {:.04f}".format(coco_stat[6]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = {:.04f}".format(coco_stat[7]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[8]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}".format(coco_stat[9]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}".format(coco_stat[10])) 
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}".format(coco_stat[11]))             
            writer.add_scalar('val/mAP_50_95',  coco_stat[0], epoch)
            writer.add_scalar('val/mAP_50',     coco_stat[1], epoch)
            writer.add_scalar('val/mAP_75',     coco_stat[2], epoch)
            writer.add_scalar('val/mAP_small',  coco_stat[3], epoch)
            writer.add_scalar('val/mAP_medium', coco_stat[4], epoch)
            writer.add_scalar('val/mAP_large',  coco_stat[5], epoch)
            writer.add_scalar('val/mAR_max_1',  coco_stat[6], epoch)
            writer.add_scalar('val/mAR_max_10', coco_stat[7], epoch)
            writer.add_scalar('val/mAR_max_100',coco_stat[8], epoch)
            writer.add_scalar('val/mAR_small',  coco_stat[9], epoch)
            writer.add_scalar('val/mAR_medium', coco_stat[10], epoch)
            writer.add_scalar('val/mAR_large',  coco_stat[11], epoch)

            self.__save_model_weights(epoch, coco_stat[0])
            logger.info('save weights done')
        
            end = time.time()
            logger.info("cost time:{:.4f}s".format(end - start))
        logger.info("=====Training Finished.   best_test_mAP:{:.3f}%====".format(self.best_mAP))
Пример #20
0
class Detection(object):
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 video_path=None,
                 output_dir=None,
                 ):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model.load_state_dict(chkpt)
        print("loading weight file is done")
        del chkpt

    def Video_detection(self):
        import cv2
        vid = cv2.VideoCapture(self.__video_path)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                      int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        isOutput = True if self.__output_dir != "" else False
        if isOutput:
            print("!!! TYPE:", type(self.__output_dir), type(video_FourCC), type(video_fps), type(video_size))
            out = cv2.VideoWriter(self.__output_dir, video_FourCC, video_fps, video_size)
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        while True:
            return_value, frame = vid.read()
            bboxes_prd = self.__evalter.get_bbox(frame)
            if bboxes_prd.shape[0] != 0:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]
                visualize_boxes(image=frame, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)

            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(frame, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(255, 0, 0), thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", frame)
            if isOutput:
                out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Пример #21
0
class lightenYOLOv4(pl.LightningModule):
    def __init__(self, weight_path, resume, exp_name, accumulate=None):
        # precision=16 for fp16

        super().__init__()
        self.model = Build_Model(weight_path=weight_path, resume=resume)
        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.evaluator = Evaluator(self.model,
                                   showatt=False,
                                   exp_name=exp_name)
        self.evaluator.clear_predict_file()

    # how you want your model to do inference/predictions
    def forward(self, img):
        p, p_d = self.model(img)
        return p, p_d

    """
    def training_epoch_end(self,outputs):
        #  the function is called after every epoch is completed
        # calculating average loss
        avg_loss = 0
        avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
        # creating log dictionary
        result = pl.TrainResult()
        result.log('val/loss_epoch', avg_loss)
        return result
    """

    # the train loop INDEPENDENT of forward.
    def training_step(self, batch, batch_idx):
        img, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes, _ = batch

        p, p_d = self(img)
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(
            p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes,
            lbboxes)

        result = pl.TrainResult(minimize=loss)
        #result = pl.TrainResult(loss)
        #result.log('train_loss_ciou', loss_ciou)
        #result.log('train_loss_conf', loss_conf)
        #result.log('train_loss_cls', loss_cls)
        result.log('train_loss', loss, on_step=True, on_epoch=True)
        return result
        '''
        #https://www.learnopencv.com/tensorboard-with-pytorch-lightning/
        logs={"train_loss": loss,
            "train_loss_ciou":loss_ciou,
            "train_loss_conf":loss_conf,
            "train_loss_cls":loss_cls,}
        batch_dictionary={
            #REQUIRED: It ie required for us to return "loss"
            "loss": loss,
            #optional for batch logging purposes
            "log": logs,
        }
        return batch_dictionary

        '''

    def validation_epoch_end(self, outputs):
        APs = self.evaluator.calc_APs()
        self.evaluator.clear_predict_file()
        mAP = 0
        for i in APs:
            mAP += APs[i]
        mAP = mAP / self.model.getNC()
        result = pl.EvalResult()
        result.log('val/mAP_epoch', torch.Tensor([mAP]).cuda())
        #trainer.logger_connector.logged_metrics
        return result

    def validation_step(self, batch, batch_idx):
        img_batch, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes, img_name = batch

        for idx, img in tqdm(zip(img_name, img_batch)):
            # CHW -> HWC
            img = img.cpu().numpy().transpose(1, 2, 0)
            bboxes_prd = self.evaluator.get_bbox(img,
                                                 multi_test=False,
                                                 flip_test=False)
            self.evaluator.store_bbox(idx, bboxes_prd)
        '''
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(p, p_d, label_sbbox, label_mbbox,
                                                  label_lbbox, sbboxes, mbboxes, lbboxes)

        self.log('val_loss_ciou', loss_ciou)
        self.log('val_loss_conf', loss_conf)
        self.log('val_loss_cls', loss_cls)
        self.log('val_loss', loss)
        '''
        return 1

    def test_step(self, batch, batch_idx):
        img, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = batch

        p, p_d = self(img)
        loss, loss_ciou, loss_conf, loss_cls = self.criterion(
            p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes,
            lbboxes)
        #, loss_ciou, loss_conf, loss_cls
        return loss

    def configure_optimizers(self):
        optimizer = optim.SGD(self.model.parameters(),
                              lr=cfg.TRAIN["LR_INIT"],
                              momentum=cfg.TRAIN["MOMENTUM"],
                              weight_decay=cfg.TRAIN["WEIGHT_DECAY"])
        return optimizer
Пример #22
0
class Evaluation(object):
    def __init__(
            self,
            gpu_id=0,
            model1_path=None,
            model2_path=None,
            data_dir=None,
            # result_dir=None,
            mnist=False,
    ):
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__data_dir = data_dir
        print(self.__data_dir)
        self.__classes = cfg.Customer_DATA["CLASSES"]
        self.__mnist = mnist
        self.__model1 = Build_Model().to(self.__device)
        if mnist:
            self.__model2 = torch.load(model2_path).double().cuda()
        else:
            self.__model2 = torch.load(model2_path).cuda()

        self.__load_model_weights(model1_path)

        self.__evalter = Evaluator(self.__model1, showatt=False)
        # self.__result_dir = result_dir

    def __load_model_weights(self, weight_path):
        print("loading weight file from : {}".format(weight_path))

        weight = os.path.join(weight_path)
        chkpt = torch.load(weight, map_location=self.__device)
        self.__model1.load_state_dict(chkpt)
        # print("loading weight file is done")
        del chkpt

    def detection(self):
        if os.path.isdir(self.__data_dir):
            imgs = os.listdir(self.__data_dir)
            print("***********Start Detection****************")
            for v in imgs:
                path = self.__data_dir + "/" + v
                print("val images : {}".format(path))

                img = cv2.imread(path)
                assert img is not None

                bboxes_prd = self.__evalter.get_bbox(img, v)
                points, max_l = get_points(img, bboxes_prd=bboxes_prd, model=self.__model2, mnist=self.__mnist)
                if points is None:
                    return
                # print(points)
                matrix_calculator = calculator(self.__classes)
                matrix_calculator.get_from_points(points, max_l)
                print(matrix_calculator())
        else:
            raise NotImplementedError("The data directory is not exist!")