Esempio n. 1
0
class Tester(object):
    def __init__(self, log_dir):
        init_seeds(0)
        self.device = gpu.select_device()
        self.log_dir = log_dir
        self.yolov4 = Build_Model(weight_path=None, resume=False)
        self.yolov4 = self.yolov4.to(self.device)
        self.__load_best_weights()

    def __load_best_weights(self):
        best_weight = os.path.join(log_dir, "checkpoints", "best.pt")
        chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt)
        del chkpt

    def test(self):
        logger.info(self.yolov4)
        evaluator = COCOAPIEvaluator(cfg=cfg,
                                     img_size=cfg.VAL.TEST_IMG_SIZE,
                                     confthre=cfg.VAL.CONF_THRESH,
                                     nmsthre=cfg.VAL.NMS_THRESH)
        logger.info("\n===============  start  testing   ===============")
        start = time.time()
        coco_stat = evaluator.evaluate(self.yolov4)
        end = time.time()

        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[0]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[1]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[2]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}"
            .format(coco_stat[3]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}"
            .format(coco_stat[4]))
        logger.info(
            "Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}"
            .format(coco_stat[5]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = {:.04f}"
            .format(coco_stat[6]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = {:.04f}"
            .format(coco_stat[7]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}"
            .format(coco_stat[8]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}"
            .format(coco_stat[9]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}"
            .format(coco_stat[10]))
        logger.info(
            "Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}"
            .format(coco_stat[11]))
        logger.info("cost time:{:.4f}s".format(end - start))
        logger.info("FPS:{:.4f}".format(
            len(evaluator.dataloader) / (end - start)))
Esempio n. 2
0
class Trainer(object):
    _resume = False
    _fine_tune = False
    def __init__(self, log_dir, resume=False, fine_tune=False):
        init_seeds(0)
        self._fine_tune = fine_tune
        self._resume = resume
        if self._fine_tune:
            self.__prepare_fine_tune()
        self.device = gpu.select_device()
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = cfg.TRAIN.ACCUMULATE
        self.log_dir = log_dir
        self.weight_path = "yolov4.weights"
        self.multi_scale_train = cfg.TRAIN.MULTI_SCALE_TRAIN
        if self.multi_scale_train:
            print('Using multi scales training')
        else:
            print('train img size is {}'.format(cfg.TRAIN.TRAIN_IMG_SIZE))
        self.train_dataset = data.Build_Train_Dataset(anno_file=cfg.TRAIN.ANNO_FILE, anno_file_type="train", img_size=cfg.TRAIN.TRAIN_IMG_SIZE)

        self.epochs = cfg.TRAIN.YOLO_EPOCHS if cfg.MODEL.MODEL_TYPE == 'YOLOv4' else cfg.TRAIN.Mobilenet_YOLO_EPOCHS
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.ACCUMULATE,
                                           num_workers=cfg.TRAIN.NUMBER_WORKERS,
                                           shuffle=True, pin_memory=True)
        self.yolov4 = Build_Model(weight_path="yolov4.weights", resume=resume)

        self.yolov4 = self.yolov4.to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(), lr=cfg.TRAIN.LR_INIT,
                                   momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY)

        self.criterion = YoloV4Loss(anchors=cfg.MODEL.ANCHORS, strides=cfg.MODEL.STRIDES,
                                    iou_threshold_loss=cfg.TRAIN.IOU_THRESHOLD_LOSS)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN.LR_INIT,
                                                          lr_min=cfg.TRAIN.LR_END,
                                                          warmup=cfg.TRAIN.WARMUP_EPOCHS*len(self.train_dataloader))
        if resume: self.__load_resume_weights()
        if self._fine_tune: self.__load_best_weights()
        
    def __prepare_fine_tune(self):
        cfg.defrost()
        cfg.TRAIN.ANNO_FILE = cfg.FINE_TUNE.ANNO_FILE
        cfg.TRAIN.YOLO_EPOCHS = cfg.FINE_TUNE.YOLO_EPOCHS
        cfg.TRAIN.LR_INIT = cfg.FINE_TUNE.LR_INIT
        cfg.TRAIN.LR_END = cfg.FINE_TUNE.LR_END
        cfg.TRAIN.WARMUP_EPOCHS = cfg.FINE_TUNE.WARMUP_EPOCHS
        cfg.freeze()
        
    def __load_best_weights(self):
        best_weight = os.path.join(log_dir,"checkpoints", "best.pt")
        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")    
        shutil.copy2(best_weight,
                     best_weight.replace("best.pt","best_before_fine_tune.pt"))
        shutil.copy2(last_weight,
                     last_weight.replace("last.pt","last_before_fine_tune.pt"))
        last_chkpt = torch.load(last_weight, map_location=self.device)
        best_chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(best_chkpt)
        self.best_mAP = 0 
        del last_chkpt, best_chkpt

        
    def __load_resume_weights(self):

        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")
        chkpt = torch.load(last_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt['model'])

        self.start_epoch = chkpt['epoch'] + 1
        if chkpt['optimizer'] is not None:
            self.optimizer.load_state_dict(chkpt['optimizer'])
            self.best_mAP = chkpt['best_mAP']
        del chkpt

    def __save_model_weights(self, epoch, mAP):
        if mAP > self.best_mAP:
            self.best_mAP = mAP
        best_weight = os.path.join(log_dir,"checkpoints", "best.pt")
        last_weight = os.path.join(log_dir,"checkpoints", "last.pt")
        chkpt = {'epoch': epoch,
                 'best_mAP': self.best_mAP,
                 'model': self.yolov4.module.state_dict() if torch.cuda.device_count()>1 else self.yolov4.state_dict(),
                 'optimizer': self.optimizer.state_dict()}
        torch.save(chkpt, last_weight)

        if self.best_mAP == mAP:
            torch.save(chkpt['model'], best_weight)

        if self._fine_tune and epoch % 5 == 0:
            torch.save(chkpt['model'], os.path.join(log_dir,"checkpoints", 'backup_fine_tune_epoch_{:02d}.pt'.format(epoch)))
        del chkpt

    def train(self):
        global writer
        logger.info("Training start,img size is: {:d},batchsize is: {:d}, subdivision: {:d}, worker number is {:d}".format(cfg.TRAIN.TRAIN_IMG_SIZE, cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.ACCUMULATE, cfg.TRAIN.NUMBER_WORKERS))
        logger.info(self.yolov4)
        n_train = len(self.train_dataset)
        n_step = n_train // (cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.ACCUMULATE) + 1
        logger.info("Train datasets number is : {}".format(n_train))
        evaluator = COCOAPIEvaluator(cfg=cfg,
                    img_size=cfg.VAL.TEST_IMG_SIZE,
                    confthre=cfg.VAL.CONF_THRESH,
                    nmsthre=cfg.VAL.NMS_THRESH)

        if torch.cuda.device_count() > 1: self.yolov4 = torch.nn.DataParallel(self.yolov4)
        logger.info("\n===============  start  training   ===============")
        for epoch in range(self.start_epoch, self.epochs):
            start = time.time()
            self.yolov4.train()
            with tqdm(total=n_train, unit="imgs", desc=f'Epoch {epoch}/{self.epochs}', ncols=30) as pbar:
                for i, (imgs, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes) in enumerate(self.train_dataloader):

                    imgs = imgs.to(self.device)
                    label_sbbox = label_sbbox.to(self.device)
                    label_mbbox = label_mbbox.to(self.device)
                    label_lbbox = label_lbbox.to(self.device)
                    sbboxes = sbboxes.to(self.device)
                    mbboxes = mbboxes.to(self.device)
                    lbboxes = lbboxes.to(self.device)

                    p, p_d = self.yolov4(imgs)

                    loss, loss_ciou, loss_conf, loss_cls = self.criterion(p, p_d, label_sbbox, label_mbbox,
                                                    label_lbbox, sbboxes, mbboxes, lbboxes)

                    loss.backward()
                    # Accumulate gradient for x batches before optimizing
                    if i % self.accumulate == 0:
                        self.scheduler.step(n_step*epoch + i)
                        self.optimizer.step()
                        self.optimizer.zero_grad()

                    # Print batch results
                    if i % (5*self.accumulate) == 0:
                        logger.info("{:3}: total_loss:{:.4f} | loss_ciou:{:.4f} | loss_conf:{:.4f} | loss_cls:{:.4f} | lr:{:.6f}".format(
                            self.train_dataset.img_size, loss, loss_ciou, loss_conf, loss_cls, self.optimizer.param_groups[0]['lr']
                        ))
                        writer.add_scalar('train/loss_ciou', loss_ciou, n_step * epoch + i)
                        writer.add_scalar('train/loss_conf', loss_conf, n_step * epoch + i)
                        writer.add_scalar('train/loss_cls', loss_cls, n_step * epoch + i)
                        writer.add_scalar('train/train_loss', loss, n_step * epoch + i)
                        writer.add_scalar('train/lr', self.optimizer.param_groups[0]['lr'], n_step * epoch + i)
                    # multi-sclae training (320-608 pixels) every 10 batches
                    if self.multi_scale_train and (i+1) % (5*self.accumulate) == 0:
                        self.train_dataset.img_size = random.choice(range(10, 20)) * 32
                    pbar.update(imgs.shape[0])
                
            mAP = 0.
            # evaluator = COCOAPIEvaluator(cfg=cfg,
            #                             img_size=cfg.VAL.TEST_IMG_SIZE,
            #                             confthre=cfg.VAL.CONF_THRESH,
            #                             nmsthre=cfg.VAL.NMS_THRESH)
            coco_stat = evaluator.evaluate(self.yolov4)
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[0]))
            logger.info("Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[1]))            
            logger.info("Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[2]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}".format(coco_stat[3]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}".format(coco_stat[4]))            
            logger.info("Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}".format(coco_stat[5]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = {:.04f}".format(coco_stat[6]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = {:.04f}".format(coco_stat[7]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:.04f}".format(coco_stat[8]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.04f}".format(coco_stat[9]))            
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.04f}".format(coco_stat[10])) 
            logger.info("Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.04f}".format(coco_stat[11]))             
            writer.add_scalar('val/mAP_50_95',  coco_stat[0], epoch)
            writer.add_scalar('val/mAP_50',     coco_stat[1], epoch)
            writer.add_scalar('val/mAP_75',     coco_stat[2], epoch)
            writer.add_scalar('val/mAP_small',  coco_stat[3], epoch)
            writer.add_scalar('val/mAP_medium', coco_stat[4], epoch)
            writer.add_scalar('val/mAP_large',  coco_stat[5], epoch)
            writer.add_scalar('val/mAR_max_1',  coco_stat[6], epoch)
            writer.add_scalar('val/mAR_max_10', coco_stat[7], epoch)
            writer.add_scalar('val/mAR_max_100',coco_stat[8], epoch)
            writer.add_scalar('val/mAR_small',  coco_stat[9], epoch)
            writer.add_scalar('val/mAR_medium', coco_stat[10], epoch)
            writer.add_scalar('val/mAR_large',  coco_stat[11], epoch)

            self.__save_model_weights(epoch, coco_stat[0])
            logger.info('save weights done')
        
            end = time.time()
            logger.info("cost time:{:.4f}s".format(end - start))
        logger.info("=====Training Finished.   best_test_mAP:{:.3f}%====".format(self.best_mAP))
Esempio n. 3
0
class LAMR_Tester(object):
    id_map = ["pedestrian"]

    def __init__(self, log_dir, test_images):
        init_seeds(0)
        self.device = gpu.select_device()
        self.log_dir = log_dir
        self.yolov4 = Build_Model(weight_path=None, resume=False)
        self.yolov4 = self.yolov4.to(self.device)
        self.dataset = Naive_Test_Dataset(test_images)
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=cfg.VAL.BATCH_SIZE,
            shuffle=False,
            pin_memory=True,
            num_workers=cfg.VAL.NUMBER_WORKERS)
        self.__load_best_weights()

    def __load_best_weights(self):
        best_weight = os.path.join(log_dir, "checkpoints", "best.pt")
        chkpt = torch.load(best_weight, map_location=self.device)
        self.yolov4.load_state_dict(chkpt)
        del chkpt

    def test(self):
        logger.info(self.yolov4)
        self.yolov4.eval()
        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
        ) else torch.FloatTensor
        results_path = os.path.join("/data", "mock_detections",
                                    os.path.basename(self.log_dir), "day",
                                    "val")
        if not os.path.exists(results_path):
            os.makedirs(results_path)
        for i, (img_path, img, info_img) in tqdm(enumerate(self.dataloader),
                                                 desc="Test to ECP... ",
                                                 unit="imgs",
                                                 total=len(self.dataloader)):
            info_img = [float(info) for info in info_img]
            data_dict = {
                "tags": [],
                "imageheight": int(info_img[0]),
                "imagewidth": int(info_img[1]),
                "children": [],
                "identity": "frame"
            }
            city_name = os.path.basename(os.path.dirname(img_path[0]))
            os.makedirs(os.path.join(results_path, city_name), exist_ok=True)
            result_json_path = os.path.join(
                results_path,
                os.path.basename(img_path[0]).replace("png", "json"))

            with torch.no_grad():
                img = Variable(img.type(Tensor))
                _, outputs = self.yolov4(img)
                outputs = outputs.unsqueeze(0)
                outputs = postprocess(outputs, len(cfg.DATASET.CLASSES), 0.01,
                                      cfg.VAL.NMS_THRESH)
                if outputs[0] is None:
                    with open(result_json_path, "w") as json_fh:
                        json.dump(data_dict, json_fh, indent=4)
                    continue
                outputs = outputs[0].cpu().data

            for output in outputs:
                x1 = float(output[0])
                y1 = float(output[1])
                x2 = float(output[2])
                y2 = float(output[3])
                box = yolobox2label((y1, x1, y2, x2), info_img)
                data_dict["children"].append({
                    "tags": [],
                    "children": [],
                    "identity":
                    self.id_map[int(output[6])],
                    "score":
                    float(output[4]) * float(output[5]),
                    "y0":
                    float(box[0]),
                    "x0":
                    float(box[1]),
                    "y1":
                    float(box[2]),
                    "x1":
                    float(box[3])
                })  # ECP Formats
            with open(result_json_path, "w") as json_fh:
                json.dump(data_dict, json_fh, indent=4)