예제 #1
0
    def __init__(self,  weight_path, resume, gpu_id, accumulate, fp_16):
        init_seeds(0)
        self.fp_16 = fp_16
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = accumulate
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.train_dataset = data.Build_Dataset(anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        print('train img size is {}'.format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN["BATCH_SIZE"],
                                           num_workers=cfg.TRAIN["NUMBER_WORKERS"],
                                           shuffle=True, pin_memory=True
                                           )
        self.yolov4 = Build_Model().to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(), lr=cfg.TRAIN["LR_INIT"],
                                   momentum=cfg.TRAIN["MOMENTUM"], weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = YoloV4Loss(anchors=cfg.MODEL["ANCHORS"], strides=cfg.MODEL["STRIDES"],
                                    iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN["LR_INIT"],
                                                          lr_min=cfg.TRAIN["LR_END"],
                                                          warmup=cfg.TRAIN["WARMUP_EPOCHS"]*len(self.train_dataloader))
예제 #2
0
 def __init__(self, log_dir):
     init_seeds(0)
     self.device = gpu.select_device()
     self.log_dir = log_dir
     self.yolov4 = Build_Model(weight_path=None, resume=False)
     self.yolov4 = self.yolov4.to(self.device)
     self.__load_best_weights()
예제 #3
0
    def __init__(
        self,
        gpu_id=0,
        weight_path=None,
        visiual=None,
        eval=False,
    ):
        # self.__num_class = cfg.VOC_DATA["NUM"]
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        # self.__classes = cfg.VOC_DATA["CLASSES"]
        self.__classes = cfg.Customer_DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #4
0
    def __init__(self,
                 gpu_id='0',
                 weight_path=None,
                 img_size=544,
                 visiual=None,
                 eval=False,
                 epoch=None):
        self.img_size = img_size
        self.__num_class = cfg.DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        # self.__device = select_device('0', batch_size=cfg.VAL["BATCH_SIZE"])
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.DATA["CLASSES"]

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, epoch, showatt=False)

        self.epoch = epoch
예제 #5
0
    def __init__(
            self,
            gpu_id=0,
            model1_path=None,
            model2_path=None,
            data_dir=None,
            # result_dir=None,
            mnist=False,
    ):
        self.__num_class = cfg.Customer_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
        self.__flip_val = cfg.VAL["FLIP_VAL"]

        self.__data_dir = data_dir
        print(self.__data_dir)
        self.__classes = cfg.Customer_DATA["CLASSES"]
        self.__mnist = mnist
        self.__model1 = Build_Model().to(self.__device)
        if mnist:
            self.__model2 = torch.load(model2_path).double().cuda()
        else:
            self.__model2 = torch.load(model2_path).cuda()

        self.__load_model_weights(model1_path)

        self.__evalter = Evaluator(self.__model1, showatt=False)
예제 #6
0
    def __init__(self,
                 weight_path=None,
                 resume=False,
                 gpu_id=0,
                 accumulate=1,
                 fp_16=False):
        init_seeds(0)
        self.fp_16 = fp_16
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.0
        self.accumulate = accumulate
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        self.showatt = cfg.TRAIN["showatt"]
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print("train img size is {}".format(cfg.TRAIN["TRAIN_IMG_SIZE"]))
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)
예제 #7
0
    def __init__(self, log_dir, resume=False, fine_tune=False):
        init_seeds(0)
        if fine_tune:
            self.__prepare_fine_tune()
        self.fp_16 = cfg.FP16
        self.device = gpu.select_device()
        self.start_epoch = 0
        self.best_mAP = 0.
        self.accumulate = cfg.TRAIN.ACCUMULATE
        self.log_dir = log_dir
        self.weight_path = "/content/drive/MyDrive/YOLO/weights/yolov4.weights"
        self.multi_scale_train = cfg.TRAIN.MULTI_SCALE_TRAIN
        if self.multi_scale_train:
            print('Using multi scales training')
        else:
            print('train img size is {}'.format(cfg.TRAIN.TRAIN_IMG_SIZE))
        self.train_dataset = data.Build_Train_Dataset(
            anno_file=cfg.TRAIN.ANNO_FILE,
            anno_file_type="train",
            img_size=cfg.TRAIN.TRAIN_IMG_SIZE)

        self.epochs = cfg.TRAIN.YOLO_EPOCHS if cfg.MODEL.MODEL_TYPE == 'YOLOv4' else cfg.TRAIN.Mobilenet_YOLO_EPOCHS
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE // cfg.TRAIN.ACCUMULATE,
            num_workers=cfg.TRAIN.NUMBER_WORKERS,
            shuffle=True,
            pin_memory=True)
        self.yolov4 = Build_Model(
            weight_path="/content/drive/MyDrive/YOLO/weights/yolov4.weights",
            resume=resume)

        self.yolov4 = self.yolov4.to(self.device)

        self.optimizer = optim.SGD(self.yolov4.parameters(),
                                   lr=cfg.TRAIN.LR_INIT,
                                   momentum=cfg.TRAIN.MOMENTUM,
                                   weight_decay=cfg.TRAIN.WEIGHT_DECAY)

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL.ANCHORS,
            strides=cfg.MODEL.STRIDES,
            iou_threshold_loss=cfg.TRAIN.IOU_THRESHOLD_LOSS)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN.LR_INIT,
            lr_min=cfg.TRAIN.LR_END,
            warmup=cfg.TRAIN.WARMUP_EPOCHS * len(self.train_dataloader))
        if resume: self.__load_resume_weights()
        if fine_tune: self.__load_best_weights()
예제 #8
0
 def __init__(self, log_dir, test_images):
     init_seeds(0)
     self.device = gpu.select_device()
     self.log_dir = log_dir
     self.yolov4 = Build_Model(weight_path=None, resume=False)
     self.yolov4 = self.yolov4.to(self.device)
     self.dataset = Naive_Test_Dataset(test_images)
     self.dataloader = torch.utils.data.DataLoader(
         self.dataset,
         batch_size=cfg.VAL.BATCH_SIZE,
         shuffle=False,
         pin_memory=True,
         num_workers=cfg.VAL.NUMBER_WORKERS)
     self.__load_best_weights()
예제 #9
0
    def __init__(self, weight_path, resume, exp_name, accumulate=None):
        # precision=16 for fp16

        super().__init__()
        self.model = Build_Model(weight_path=weight_path, resume=resume)
        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"])

        self.evaluator = Evaluator(self.model,
                                   showatt=False,
                                   exp_name=exp_name)
        self.evaluator.clear_predict_file()
예제 #10
0
def transform_to_onnx(weight_file, batch_size, IN_IMAGE_H, IN_IMAGE_W):
    model = Build_Model()
    pretrained_dict = torch.load(weight_file, map_location=torch.device('cpu'))
    model.load_state_dict(pretrained_dict)

    input_names = ["input"]
    output_names = ['boxes', 'confs']

    dynamic = False
    if batch_size <= 0:
        dynamic = True

    if dynamic:
        x = torch.randn((1, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True)
        onnx_file_name = "yolov4_-1_3_{}_{}_dynamic.onnx".format(IN_IMAGE_H, IN_IMAGE_W)
        dynamic_axes = {"input": {0: "batch_size"}, "boxes": {0: "batch_size"}, "confs": {0: "batch_size"}}
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names, output_names=output_names,
                          dynamic_axes=dynamic_axes)

        print('Onnx model exporting done')
        return onnx_file_name

    else:
        x = torch.randn((batch_size, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True)
        onnx_file_name = "yolov4_{}_3_{}_{}_static.onnx".format(batch_size, IN_IMAGE_H, IN_IMAGE_W)
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          do_constant_folding=True,
                          input_names=input_names, output_names=output_names,
                          )

        print('Onnx model exporting done')
        return onnx_file_name
예제 #11
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 eval=False,
                 mode=None):
        self.__num_class = cfg.VOC_DATA["NUM"]
        self.__conf_threshold = cfg.VAL["CONF_THRESH"]
        self.__nms_threshold = cfg.VAL["NMS_THRESH"]
        self.__device = gpu.select_device(gpu_id)
        self.__showatt = cfg.TRAIN["showatt"]
        self.__visiual = visiual
        self.__mode = mode
        self.__classes = cfg.VOC_DATA["CLASSES"]

        self.__model = Build_Model(showatt=self.__showatt).to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=self.__showatt)
예제 #12
0
    def __init__(self,
                 label_path,
                 weight_path=None,
                 output_dir=None,
                 ):
        self.__label_path = os.path.join("/data",label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES

        # self.__video_path = video_path
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #13
0
    def __init__(self,
                 gpu_id=0,
                 weight_path=None,
                 visiual=None,
                 heatmap=False):
        self.__num_class = cfg.COCO_DATA.NUM
        self.__conf_threshold = cfg.VAL.CONF_THRESH
        self.__nms_threshold = cfg.VAL.NMS_THRESH
        self.__device = gpu.select_device(gpu_id)
        self.__multi_scale_val = cfg.VAL.MULTI_SCALE_VAL
        self.__flip_val = cfg.VAL.FLIP_VAL

        self.__visiual = visiual
        self.__eval = eval
        self.__classes = cfg.COCO_DATA.CLASSES

        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=heatmap)
예제 #14
0
    def __init__(
        self,
        label_path,
        weight_path=None,
        output_dir=None,
    ):
        self.__label_path = os.path.join("/data", label_path)
        self.get_first_10_imgs()
        self.__num_class = cfg.DATASET.NUM

        # these should be set still
        self.__conf_threshold = 0.25
        self.__nms_threshold = 0.5
        #######################################

        self.__device = gpu.select_device()
        self.__classes = cfg.DATASET.CLASSES
        self.__output_dir = output_dir
        self.__model = Build_Model().to(self.__device)

        self.__load_model_weights(weight_path)

        self.__evalter = Evaluator(self.__model, showatt=False)
예제 #15
0
import matplotlib.pyplot as plt
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import config.yolov4_config as cfg
from utils.visualize import *
from utils.torch_utils import *

# GPU device
# Check GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print("Device: ", device)

model = Build_Model().to(device)

w_path = 'model.pt'
# chkpt = torch.load(os.path.join(w_path))
chkpt = torch.load(os.path.join(w_path),  map_location=torch.device('cpu'))

model.load_state_dict(chkpt['model'])

classes = ['1', '2', '3', '4', '5', '6']

test_path = './data/test/'
pred_path = './data/pred/'
imgs_path = os.listdir(test_path)
n = len(imgs_path)
ratio = 0.1
for i, img_path in enumerate(imgs_path):
def transform_to_onnx(weight_file, batch_size, n_classes, IN_IMAGE_H,
                      IN_IMAGE_W):
    device = gpu.select_device(id=0)

    model = Build_Model().to(device)
    #model = Build_Model(weight_path=weight_file, resume=False).to(device)

    pretrained_dict = torch.load(weight_file,
                                 map_location=device)  #torch.device('cuda')
    model.load_state_dict(pretrained_dict)

    evaluator = Evaluator(model, showatt=False)

    input_names = ["input"]
    output_names = ['boxes', 'confs']

    dynamic = False
    if batch_size <= 0:
        dynamic = True

    if dynamic:
        x = torch.randn((1, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True)
        onnx_file_name = "yolov4_-1_3_{}_{}_dynamic.onnx".format(
            IN_IMAGE_H, IN_IMAGE_W)
        dynamic_axes = {
            "input": {
                0: "batch_size"
            },
            "boxes": {
                0: "batch_size"
            },
            "confs": {
                0: "batch_size"
            }
        }
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=dynamic_axes)

        print('Onnx model exporting done')
        return onnx_file_name
    else:
        x = torch.randn((batch_size, 3, IN_IMAGE_H, IN_IMAGE_W),
                        requires_grad=True)
        x = x.to(device)
        onnx_file_name = "yolov4_{}_3_{}_{}_static.onnx".format(
            batch_size, IN_IMAGE_H, IN_IMAGE_W)
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=None)

        print('Onnx model exporting done')
        return onnx_file_name, evaluator
예제 #17
0
            if m.inplace:
                continue
        out = m(input_)[0]
        if len(out):
            for j in range(len(out)):
                out_sizes.append(np.array(out[j].size()))
            break

    total_nums = 0
    for i in range(len(out_sizes)):
        s = out_sizes[i]
        nums = np.prod(np.array(s))
        total_nums += nums

    print(
        "Model {} : intermedite variables: {:3f} M (without backward)".format(
            model._get_name(), total_nums * type_size / 1000 / 1000))
    print("Model {} : intermedite variables: {:3f} M (with backward)".format(
        model._get_name(), total_nums * type_size * 2 / 1000 / 1000))


if __name__ == "__main__":
    from model.build_model import Build_Model
    import torch

    net = Build_Model()
    print(net)

    in_img = torch.randn(1, 3, 320, 320)
    modelsize(net, in_img)
예제 #18
0
    def __init__(self,
                 weight_path=None,
                 resume: bool = False,
                 gpu_id: int = 0,
                 accumulate: bool = True,
                 fp_16: bool = False):

        # PYTHON HASH SEED
        init_seeds(0)

        # device
        self.fp_16: bool = fp_16
        self.device: torch.device = gpu.select_device(gpu_id)
        self.start_epoch: int = 0
        self.best_mAP: float = 0.0  # not sure why this is necessary...
        self.accumulate: bool = accumulate
        self.weight_path: Path = weight_path
        self.multi_scale_train: bool = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        # Show attention modification?
        self.showatt = cfg.TRAIN["showatt"]

        # Multi-scale training status
        if self.multi_scale_train:
            print("Using multi scales training")
        else:
            print(f"train img size is {cfg.TRAIN['TRAIN_IMG_SIZE']}")

        # Build Dataset using helper function.
        self.train_dataset = data.Build_Dataset(
            anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.epochs = (cfg.TRAIN["YOLO_EPOCHS"] if cfg.MODEL_TYPE["TYPE"]
                       == "YOLOv4" else cfg.TRAIN["Mobilenet_YOLO_EPOCHS"])
        self.eval_epoch = (30 if cfg.MODEL_TYPE["TYPE"] == "YOLOv4" else 50)
        self.train_dataloader = DataLoader(
            self.train_dataset,
            batch_size=cfg.TRAIN["BATCH_SIZE"],
            num_workers=cfg.TRAIN["NUMBER_WORKERS"],
            shuffle=True,
            pin_memory=True,
        )

        self.yolov4 = Build_Model(weight_path=weight_path,
                                  resume=resume,
                                  showatt=self.showatt).to(self.device)

        self.optimizer = optim.SGD(
            self.yolov4.parameters(),
            lr=cfg.TRAIN["LR_INIT"],
            momentum=cfg.TRAIN["MOMENTUM"],
            weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
        )

        self.criterion = YoloV4Loss(
            anchors=cfg.MODEL["ANCHORS"],
            strides=cfg.MODEL["STRIDES"],
            iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
        )

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(
            self.optimizer,
            T_max=self.epochs * len(self.train_dataloader),
            lr_init=cfg.TRAIN["LR_INIT"],
            lr_min=cfg.TRAIN["LR_END"],
            warmup=cfg.TRAIN["WARMUP_EPOCHS"] * len(self.train_dataloader),
        )
        if resume:
            self.__load_resume_weights(weight_path)
예제 #19
0
weight_path = 'weight/yolov4.weights'

train_anno_path = './data/train_annotation.txt'

train_dataset = BuildDataset(train_anno_path)

train_dataloader = DataLoader(
    train_dataset,
    batch_size=batch_size,
    num_workers=0,
    shuffle=True,
    pin_memory=True,
)

# model
yolov4 = Build_Model(weight_path=weight_path).to(device)

optimizer = optim.SGD(
    yolov4.parameters(),
    lr=cfg.TRAIN["LR_INIT"],
    momentum=cfg.TRAIN["MOMENTUM"],
    weight_decay=cfg.TRAIN["WEIGHT_DECAY"],
)

criterion = YoloV4Loss(
    anchors=cfg.MODEL["ANCHORS"],
    strides=cfg.MODEL["STRIDES"],
    iou_threshold_loss=cfg.TRAIN["IOU_THRESHOLD_LOSS"],
)

scheduler = cosine_lr_scheduler.CosineDecayLR(