def __init__(
        self,
        cfg,
        use_weight_path,
        img_dir,
        gpu_id=0,
        img_size=488,
        annotation_dir='',
    ):
        self.cfg = cfg
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__visual_threshold = cfg.TEST["VISUAL_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]

        self.__img_dir = img_dir
        self.__annotation_dir = annotation_dir

        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Yolov3(cfg).to(self.__device)

        self.__load_model_weights(use_weight_path)

        self.__evalter = YoloEvaluator(self.__model,
                                       cfg,
                                       img_dir,
                                       mode="test",
                                       anno_dir=annotation_dir)

        self.__visual_imgs = 0
    def __init__(
            self,
            gpu_id=0,
            model1_path=None,
            model2_path=None,
            data_dir=None,
            # result_dir=None,
            mnist=False,
    ):
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__data_dir = data_dir
        print(self.__data_dir)
        self.__classes = cfg.Customer_DATA["CLASSES"]
        self.__mnist = mnist
        self.__model1 = Build_Model().to(self.__device)
        if mnist:
            self.__model2 = torch.load(model2_path).double().cuda()
        else:
            self.__model2 = torch.load(model2_path).cuda()

        self.__load_model_weights(model1_path)

        self.__evalter = Evaluator(self.__model1, showatt=False)
Beispiel #3
0
    def __init__(self,
                 model,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.model = model
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        if self.model == 's':
            self.__model = Yolov3_S().to(self.__device)
        else:
            self.__model = Yolov3().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
Beispiel #4
0
 def __init__(self, log_dir):
     init_seeds(0)
     self.device = gpu.select_device()
     self.log_dir = log_dir
     self.yolov4 = Build_Model(weight_path=None, resume=False)
     self.yolov4 = self.yolov4.to(self.device)
     self.__load_best_weights()
Beispiel #5
0
    def __init__(self,  weight_path,
                        gpu_id,
                        visiual=False,
                        eval=False):
        self.device = gpu.select_device(gpu_id)
        self.weight_path = weight_path

        self.visiual = visiual
        self.eval = eval

        self.test_set = pascal.VOCSegmentation(base_size=cfg.TEST["BASE_SIZE"],
                                              crop_size=cfg.TEST["CROP_SIZE"],
                                              base_dir=cfg.DATA["TEST_DIR"],
                                              split='test')
        self.num_class = self.test_set.NUM_CLASSES
        self.test_loader = DataLoader(self.test_set,
                                     batch_size=cfg.TEST["BATCH_SIZE"],
                                     shuffle=False,
                                     num_workers=cfg.TEST["NUMBER_WORKERS"],
                                     pin_memory=False,
                                     drop_last=False)

        self.model = DeepLab(num_classes=self.num_class,
                             backbone="resnet",
                             output_stride=16,
                             sync_bn=False,
                             freeze_bn=False).to(self.device)

        self.evaluator = Evaluator(self.num_class)

        self.__load_model_weights(weight_path)
Beispiel #6
0
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch
Beispiel #7
0
    def __init__(self, weight_path=None, gpu_id=0, visiual=None, eval=False):
        self.img_size = cfg.TEST["TEST_IMG_SIZE"]
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.TEST["CONF_THRESH"]
        self.__nms_threshold = cfg.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id, force_cpu=False)
        self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
        self.__flip_test = cfg.TEST["FLIP_TEST"]
        self.__classes = cfg.DATA["CLASSES"]

        self.__visiual = visiual
        self.__eval = eval
        self.__model = NPMMRDet().to(self.__device)  # Single GPU

        net_model = NPMMRDet()
        if torch.cuda.device_count() >1: ## Multi GPUs
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            net_model = torch.nn.DataParallel(net_model) ## Multi GPUs
            self.__model = net_model.to(self.__device)
        elif torch.cuda.device_count() ==1:
            self.__model = net_model.to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
Beispiel #8
0
    def __init__(self, cfg_path,
                        weight_path,
                        resume,
                        gpu_id
                 ):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.epochs = TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = TRAIN["MULTI_SCALE_TRAIN"]
        self.train_dataset = data.VocDataset(anno_file_type="train", img_size=TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=TRAIN["BATCH_SIZE"],
                                           num_workers=TRAIN["NUMBER_WORKERS"], shuffle=True)
        self.yolov3 = Darknet(cfg_path=cfg_path, img_size=TRAIN["TRAIN_IMG_SIZE"]).to(self.device)
        self.yolov3.apply(tools.weights_init_normal)

        self.optimizer = optim.SGD(self.yolov3.parameters(), lr=TRAIN["LR_INIT"], momentum=TRAIN["MOMENTUM"],
                                   weight_decay=TRAIN["WEIGHT_DECAY"])
        # self.optimizer = optim.Adam(self.yolov3.parameters(), lr = lr_init, weight_decay=0.9995)

        self.criterion = YoloV3Loss(anchors=MODEL["ANCHORS"], strides=MODEL["STRIDES"],
                                    iou_threshold_loss=TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)
        self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[25, 40], gamma=0.1,
                                                  last_epoch=self.start_epoch - 1)
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
    ):
        # self.__num_class = cfg.VOC_DATA["NUM"]
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        # self.__classes = cfg.VOC_DATA["CLASSES"]
        self.__classes = cfg.Customer_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
Beispiel #10
0
    def __init__(self,  weight_path, resume, gpu_id, accumulate, fp_16):
        init_seeds(0)
        self.fp_16 = fp_16
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = accumulate
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.train_dataset = data.Build_Dataset(anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        print('train img size is {}'.format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN["BATCH_SIZE"],
                                           num_workers=cfg.TRAIN["NUMBER_WORKERS"],
                                           shuffle=True, pin_memory=True
                                           )
        self.yolov4 = Build_Model().to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(), lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"], weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = YoloV4Loss(anchors=cfg.MODEL["ANCHORS"], strides=cfg.MODEL["STRIDES"],
                                    iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN["LR_INIT"],
                                                          lr_min=cfg.TRAIN["LR_END"],
                                                          warmup=cfg.TRAIN["WARMUP_EPOCHS"]*len(self.train_dataloader))
Beispiel #11
0
    def __init__(self,
                 weight_path=None,
                 resume=False,
                 gpu_id=0,
                 accumulate=1,
                 fp_16=False):
        init_seeds(0)
        self.fp_16 = fp_16
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.0
        self.accumulate = accumulate
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.showatt = cfg.TRAIN["showatt"]
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print("train img size is {}".format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)
Beispiel #12
0
    def __init__(self, weight_path, resume, gpu_id, vis, mode=None):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.epochs = cfg.epoch = 100
        self.weight_path = weight_path
        self.resume = resume
        self.mode = mode
        self.multi_scale_train = cfg.MULTI_SCALE_TRAIN
        print('Loading Datasets...')
        self.train_dataset = PASCALVOC(img_size=cfg.img_size,
                                       root=cfg.root,
                                       image_sets=cfg.train_sets,
                                       phase='trainval',
                                       mean=cfg.means,
                                       std=cfg.std)
        self.val_dataset = PASCALVOC(img_size=cfg.img_size,
                                     root=cfg.root,
                                     image_sets=cfg.test_sets,
                                     phase='test',
                                     mean=cfg.means,
                                     std=cfg.std)
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.batch_size,
                                           num_workers=cfg.workers,
                                           collate_fn=detection_collate,
                                           shuffle=True)
        if vis:
            ViewDatasets(self.train_dataloader)
        self.SSD = SSD(num_classes=cfg.num_classes,
                       num_blocks=cfg.mbox,
                       top_k=cfg.top_k,
                       conf_thresh=cfg.conf_thresh,
                       nms_thresh=cfg.nms_thresh,
                       variance=cfg.variance).to(self.device)
        self.optimizer = optim.SGD(self.SSD.parameters(),
                                   lr=cfg.init_lr,
                                   momentum=cfg.momentum,
                                   weight_decay=cfg.weight_decay)

        self.criterion = SSD_loss(num_classes=cfg.num_classes,
                                  variances=cfg.variance,
                                  device=self.device)

        self.__load_model_weights(self.weight_path, self.resume, self.mode)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.init_lr,
            lr_min=cfg.end_lr,
            warmup=cfg.warmup_epoch * len(self.train_dataloader))
Beispiel #13
0
    def __init__(self, log_dir, resume=False, fine_tune=False):
        init_seeds(0)
        if fine_tune:
            self.__prepare_fine_tune()
        self.fp_16 = cfg.FP16
        self.device = gpu.select_device()
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = cfg.TRAIN.ACCUMULATE
        self.log_dir = log_dir
        self.weight_path = "/content/drive/MyDrive/YOLO/weights/yolov4.weights"
        self.multi_scale_train = cfg.TRAIN.MULTI_SCALE_TRAIN
        if self.multi_scale_train:
            print('Using multi scales training')
        else:
            print('train img size is {}'.format(cfg.TRAIN.TRAIN_IMG_SIZE))
        self.train_dataset = data.Build_Train_Dataset(
            anno_file=cfg.TRAIN.ANNO_FILE,
            anno_file_type="train",
            img_size=cfg.TRAIN.TRAIN_IMG_SIZE)

        self.epochs = cfg.TRAIN.YOLO_EPOCHS if cfg.MODEL.MODEL_TYPE == 'YOLOv4' else cfg.TRAIN.Mobilenet_YOLO_EPOCHS
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE // cfg.TRAIN.ACCUMULATE,
            num_workers=cfg.TRAIN.NUMBER_WORKERS,
            shuffle=True,
            pin_memory=True)
        self.yolov4 = Build_Model(
            weight_path="/content/drive/MyDrive/YOLO/weights/yolov4.weights",
            resume=resume)

        self.yolov4 = self.yolov4.to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(),
                                   lr=cfg.TRAIN.LR_INIT,
                                   momentum=cfg.TRAIN.MOMENTUM,
                                   weight_decay=cfg.TRAIN.WEIGHT_DECAY)

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL.ANCHORS,
            strides=cfg.MODEL.STRIDES,
            iou_threshold_loss=cfg.TRAIN.IOU_THRESHOLD_LOSS)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN.LR_INIT,
            lr_min=cfg.TRAIN.LR_END,
            warmup=cfg.TRAIN.WARMUP_EPOCHS * len(self.train_dataloader))
        if resume: self.__load_resume_weights()
        if fine_tune: self.__load_best_weights()
 def __init__(self, log_dir, test_images):
     init_seeds(0)
     self.device = gpu.select_device()
     self.log_dir = log_dir
     self.yolov4 = Build_Model(weight_path=None, resume=False)
     self.yolov4 = self.yolov4.to(self.device)
     self.dataset = Naive_Test_Dataset(test_images)
     self.dataloader = torch.utils.data.DataLoader(
         self.dataset,
         batch_size=cfg.VAL.BATCH_SIZE,
         shuffle=False,
         pin_memory=True,
         num_workers=cfg.VAL.NUMBER_WORKERS)
     self.__load_best_weights()
Beispiel #15
0
    def __init__(self,  cfg, train_dir, valid_dir, weight_path, resume, gpu_id):
        init_seeds(0)
        self.cfg = cfg
        self.train_dir = train_dir
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.today = datetime.datetime.today().strftime('%y%m%d')
        num_weights = len(glob.glob(f'./weight/{self.today}_*'))
        self.save_weight_dir = f'./weight/{self.today}_{num_weights+1}'
        os.mkdir(self.save_weight_dir)
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        # self.train_dataset = VocDataset(cfg=cfg, anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataset = YoloDataset(cfg=cfg, data_dir=self.train_dir, img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN["BATCH_SIZE"],
                                           num_workers=cfg.TRAIN["NUMBER_WORKERS"],
                                           shuffle=True)
        self.yolov3 = Yolov3(cfg=cfg).to(self.device)

        self.optimizer = optim.SGD(self.yolov3.parameters(), lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"], weight_decay=cfg.TRAIN["WEIGHT_DECAY"])
        #self.optimizer = optim.Adam(self.yolov3.parameters(), lr = lr_init, weight_decay=0.9995)

        self.criterion = YoloV3Loss(anchors=cfg.MODEL["ANCHORS"], strides=cfg.MODEL["STRIDES"],
                                    iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN["LR_INIT"],
                                                          lr_min=cfg.TRAIN["LR_END"],
                                                          warmup=cfg.TRAIN["WARMUP_EPOCHS"]*len(self.train_dataloader))

        self.valid_dir = valid_dir
        self.img_valid_dir = os.path.join(self.valid_dir, 'images')
        self.anno_valid_dir = os.path.join(self.valid_dir, 'labels')

        self.valid_dataset = YoloDataset(cfg=cfg, data_dir=self.valid_dir, img_size=cfg.EVAL["TEST_IMG_SIZE"])
        self.valid_dataloader = DataLoader(self.valid_dataset,
                                           batch_size=cfg.EVAL["BATCH_SIZE"],
                                           num_workers=cfg.EVAL["NUMBER_WORKERS"],
                                           shuffle=False)

        self.evaluator = YoloEvaluator(self.yolov3, self.cfg, self.img_valid_dir, "eval", anno_dir=self.anno_valid_dir)
Beispiel #16
0
    def __init__(self, weight_path, resume, gpu_id):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        print(self.device)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        if self.multi_scale_train: print('Using multi scales training')
        else: print('train img size is {}'.format(cfg.TRAIN["TRAIN_IMG_SIZE"]))

        self.train_dataset = data.Construct_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True)

        net_model = NPMMRDet()
        if torch.cuda.device_count() > 1:  ## multi GPUs
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            net_model = torch.nn.DataParallel(net_model)
            self.model = net_model.to(self.device)
        elif torch.cuda.device_count() == 1:
            self.model = net_model.to(self.device)  ## Single GPU

        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"],
                                   weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)
        #self.__save_model_weights_best(160)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader))
Beispiel #17
0
    def __init__(self, model, weight_path, resume, gpu_id):
        init_seeds(0)
        self.model = model
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.train_dataset = data.VocDataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True)
        if self.model == 's':
            self.yolov3 = Yolov3_S().to(self.device)
            self.model_postfix = '_s'
        elif self.model == 'l':
            self.yolov3 = Yolov3_L().to(self.device)
            self.model_postfix = '_l'
        else:
            self.yolov3 = Yolov3().to(self.device)
            self.model_postfix = '_m'
        # self.yolov3.apply(tools.weights_init_normal)

        self.optimizer = optim.SGD(self.yolov3.parameters(),
                                   lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"],
                                   weight_decay=cfg.TRAIN["WEIGHT_DECAY"])
        #self.optimizer = optim.Adam(self.yolov3.parameters(), lr=cfg.TRAIN["LR_INIT"], weight_decay=0.9995)

        self.criterion = YoloV3Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader))
Beispiel #18
0
    def __init__(self, weight_path, resume, gpu_id):
        init_seeds(1)
        init_dirs("result")

        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mIoU = 0.
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path

        self.train_loader, self.val_loader, _, self.num_class = make_data_loader(
        )

        self.model = DeepLab(num_classes=self.num_class,
                             backbone="resnet",
                             output_stride=16,
                             sync_bn=False,
                             freeze_bn=False).to(self.device)

        train_params = [{
            'params': self.model.get_1x_lr_params(),
            'lr': cfg.TRAIN["LR_INIT"]
        }, {
            'params': self.model.get_10x_lr_params(),
            'lr': cfg.TRAIN["LR_INIT"] * 10
        }]

        self.optimizer = optim.SGD(train_params,
                                   momentum=cfg.TRAIN["MOMENTUM"],
                                   weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = SegmentationLosses().build_loss(
            mode=cfg.TRAIN["LOSS_TYPE"])

        self.scheduler = LR_Scheduler(mode=cfg.TRAIN["LR_SCHEDULER"],
                                      base_lr=cfg.TRAIN["LR_INIT"],
                                      num_epochs=self.epochs,
                                      iters_per_epoch=len(self.train_loader))
        self.evaluator = Evaluator(self.num_class)
        self.saver = Saver()
        self.summary = TensorboardSummary(os.path.join("result", "run"))

        if resume:
            self.__resume_model_weights()
Beispiel #19
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__showatt = cfg.TRAIN["showatt"]
        self.__visiual = visiual
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
Beispiel #21
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 heatmap=False):
        self.__num_class = cfg.COCO_DATA.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL.MULTI_SCALE_VAL
        self.__flip_val = cfg.VAL.FLIP_VAL

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA.CLASSES

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)
Beispiel #22
0
    def __init__(self,
                 cfg_path=None,
                 weight_path=None,
                 gpu_id=0,
                 img_size=544,
                 visiual=None,
                 eval=False):
        self.img_size = img_size
        self.__num_class = pms.DATA["NUM"]
        self.__conf_threshold = pms.TEST["CONF_THRESH"]
        self.__nms_threshold = pms.TEST["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__visiual = visiual
        self.__eval = eval
        self.__classes = pms.DATA["CLASSES"]

        self.__model = Darknet(cfg_path=cfg_path,
                               img_size=img_size).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, visiual=False)
Beispiel #23
0
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
Beispiel #24
0
    def __init__(self,
                 weight_path=None,
                 resume: bool = False,
                 gpu_id: int = 0,
                 accumulate: bool = True,
                 fp_16: bool = False):

        # PYTHON HASH SEED
        init_seeds(0)

        # device
        self.fp_16: bool = fp_16
        self.device: torch.device = gpu.select_device(gpu_id)
        self.start_epoch: int = 0
        self.best_mAP: float = 0.0  # not sure why this is necessary...
        self.accumulate: bool = accumulate
        self.weight_path: Path = weight_path
        self.multi_scale_train: bool = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        # Show attention modification?
        self.showatt = cfg.TRAIN["showatt"]

        # Multi-scale training status
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print(f"train img size is {cfg.TRAIN['TRAIN_IMG_SIZE']}")

        # Build Dataset using helper function.
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)
def transform_to_onnx(weight_file, batch_size, n_classes, IN_IMAGE_H,
                      IN_IMAGE_W):
    device = gpu.select_device(id=0)

    model = Build_Model().to(device)
    #model = Build_Model(weight_path=weight_file, resume=False).to(device)

    pretrained_dict = torch.load(weight_file,
                                 map_location=device)  #torch.device('cuda')
    model.load_state_dict(pretrained_dict)

    evaluator = Evaluator(model, showatt=False)

    input_names = ["input"]
    output_names = ['boxes', 'confs']

    dynamic = False
    if batch_size <= 0:
        dynamic = True

    if dynamic:
        x = torch.randn((1, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True)
        onnx_file_name = "yolov4_-1_3_{}_{}_dynamic.onnx".format(
            IN_IMAGE_H, IN_IMAGE_W)
        dynamic_axes = {
            "input": {
                0: "batch_size"
            },
            "boxes": {
                0: "batch_size"
            },
            "confs": {
                0: "batch_size"
            }
        }
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=dynamic_axes)

        print('Onnx model exporting done')
        return onnx_file_name
    else:
        x = torch.randn((batch_size, 3, IN_IMAGE_H, IN_IMAGE_W),
                        requires_grad=True)
        x = x.to(device)
        onnx_file_name = "yolov4_{}_3_{}_{}_static.onnx".format(
            batch_size, IN_IMAGE_H, IN_IMAGE_W)
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=None)

        print('Onnx model exporting done')
        return onnx_file_name, evaluator