Exemple #1
0
    parser.add_argument('--resize', type=int, default=600, help='image_size')
    parser.add_argument('--num_classes', type=int, default=80)

    test_opts = parser.parse_args()
    print(test_opts)

    # 2. device
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 3. visdom
    vis = None

    # 4. data set
    if test_opts.data_type == 'voc':
        test_set = VOC_Dataset(root=test_opts.data_root,
                               split='test',
                               resize=600)
        test_opts.num_classes = 20

    if test_opts.data_type == 'coco':
        test_set = COCO_Dataset(root=test_opts.data_root,
                                set_name='val2017',
                                split='test',
                                resize=600)
        test_opts.num_classes = 80

    # 5. data loader
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              collate_fn=test_set.collate_fn,
                                              shuffle=False,
def main():
    # 1. argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=150)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_workers', type=int, default=2)
    parser.add_argument('--save_file_name', type=str, default='yolo_v2_vgg_16')
    parser.add_argument('--conf_thres', type=float, default=0.01)
    parser.add_argument('--save_path', type=str, default='./saves')
    parser.add_argument('--start_epoch', type=int, default=0)  # to resume

    opts = parser.parse_args()
    print(opts)

    # 2. device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 3. visdom
    vis = visdom.Visdom()

    # 4. dataset
    train_set = VOC_Dataset(root="D:\Data\VOC_ROOT", split='TRAIN')
    test_set = VOC_Dataset(root="D:\Data\VOC_ROOT", split='TEST')

    # 5. dataloader
    train_loader = DataLoader(dataset=train_set,
                              batch_size=opts.batch_size,
                              collate_fn=train_set.collate_fn,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=opts.num_workers)

    test_loader = DataLoader(dataset=test_set,
                             batch_size=1,
                             collate_fn=test_set.collate_fn,
                             shuffle=False)

    # 6. model
    model = YOLO_VGG_16().to(device)

    # 7. criterion
    criterion = Yolo_Loss(num_classes=20)
    # 8. optimizer
    optimizer = optim.SGD(params=model.parameters(),
                          lr=opts.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    # 9. scheduler
    scheduler = StepLR(optimizer=optimizer, step_size=100, gamma=0.1)
    scheduler = None

    # 10. resume
    if opts.start_epoch != 0:

        checkpoint = torch.load(
            os.path.join(opts.save_path, opts.save_file_name) +
            '.{}.pth.tar'.format(opts.start_epoch - 1))  # train
        model.load_state_dict(
            checkpoint['model_state_dict'])  # load model state dict
        optimizer.load_state_dict(
            checkpoint['optimizer_state_dict'])  # load optim state dict
        if scheduler is not None:
            scheduler.load_state_dict(
                checkpoint['scheduler_state_dict'])  # load sched state dict
        print('\nLoaded checkpoint from epoch %d.\n' %
              (int(opts.start_epoch) - 1))

    else:

        print('\nNo check point to resume.. train from scratch.\n')

    # 11. train
    for epoch in range(opts.start_epoch, opts.epochs):

        train(epoch=epoch,
              device=device,
              vis=vis,
              train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              scheduler=scheduler,
              save_path=opts.save_path,
              save_file_name=opts.save_file_name)

        if scheduler is not None:
            scheduler.step()

        # 12. test
        test(epoch=epoch,
             device=device,
             vis=vis,
             test_loader=test_loader,
             model=model,
             criterion=criterion,
             save_path=opts.save_path,
             save_file_name=opts.save_file_name,
             conf_thres=opts.conf_thres,
             eval=True)
def main():
    # ================================
    # Setting Argumentation
    # ================================

    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--lr', type=float, default=1e-6)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--num_classes', type=int, default=20)
    parser.add_argument('--backbone_pretrain', type=bool, default=True)
    parser.add_argument('--model_pretrain', type=bool, default=False)

    parser.add_argument('--save_file_name',
                        type=str,
                        default='yolo_v2_training')
    parser.add_argument('--conf_thres', type=float, default=0.01)
    parser.add_argument('--save_path', type=str, default='./saves')
    parser.add_argument('--start_epoch', type=int, default=0)
    parser.add_argument('--use_visdom', type=bool, default=True)

    parser.add_argument('--dataset_type',
                        type=str,
                        default='voc',
                        help='which dataset you want to use VOC or COCO')

    opts = parser.parse_args()
    print(opts)

    print("Use Visdom : ", opts.use_visdom)

    if opts.use_visdom:
        vis = visdom.Visdom()

    # ================================
    # Data Loader (root type)
    # [VOC]
    #  Train : /VOCDevkit/TRAIN/VOC2007/JPEGImages & /VOCDevkit/TRAIN/VOC2007/Annotations
    #  Test  : /VOCDevkit/TEST/VOC2007/JPEGImages & /VOCDevkit/TEST/VOC2007/Annotations

    # [COCO]
    #  Train : /COCO/images/train2017 & /COCO/annotations/instances_train2017.json
    #  Test  : /COCO/images/val2017 & /COCO/annotations/instances_val2017.json
    # ================================

    data_root = '/mnt/mydisk/hdisk/dataset/VOCdevkit'

    if opts.dataset_type == 'voc':
        train_set = VOC_Dataset(root=data_root, mode='TRAIN')
        test_set = VOC_Dataset(root=data_root, mode='TEST')

    elif opts.dataset_type == 'coco':
        train_set = COCO_Dataset(root=data_root, mode='TRAIN')
        test_set = COCO_Dataset(root=data_root, mode='TEST')

    train_loader = DataLoader(dataset=train_set,
                              batch_size=opts.batch_size,
                              collate_fn=train_set.collate_fn,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=opts.num_workers)

    darknet = darknet19.DarkNet19(num_classes=opts.num_classes)
    model = YOLOV2(darknet=darknet).cuda()

    if opts.backbone_pretrain:
        backbone_weights_file_root = 'yolo-voc.weights'
        print(" Darknet19 (used pre-train weights) : ",
              backbone_weights_file_root)
        weights_loader = WeightLoader()
        weights_loader.load(darknet, backbone_weights_file_root, backbone=True)

    if opts.model_pretrain:
        # load-weights (include backbone) / .weights file
        model_weights_file_root = 'yolo-voc.weights'
        print(" YOLOv2 (used pre-train weights) : ", model_weights_file_root)
        weights_loader = WeightLoader()
        weights_loader.load(model, model_weights_file_root, backbone=False)

    # ================================
    # Setting Loss
    # ================================
    criterion = Yolo_Loss(num_classes=opts.num_classes)

    # ================================
    # Setting Optimizer
    # ================================
    optimizer = optim.SGD(params=model.parameters(),
                          lr=opts.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    # ================================
    # Setting Scheduler
    # ================================
    scheduler = StepLR(optimizer=optimizer, step_size=100, gamma=0.1)

    # ================================
    # Setting Loss
    # ================================

    if opts.start_epoch != 0:
        checkpoint = torch.load(
            os.path.join(opts.save_path, opts.save_file_name) +
            '.{}.pth.tar'.format(opts.start_epoch - 1))  # train
        model.load_state_dict(
            checkpoint['model_state_dict'])  # load model state dict
        optimizer.load_state_dict(
            checkpoint['optimizer_state_dict'])  # load optim state dict
        if scheduler is not None:
            scheduler.load_state_dict(
                checkpoint['scheduler_state_dict'])  # load sched state dict
        print('\nLoaded checkpoint from epoch %d.\n' %
              (int(opts.start_epoch) - 1))

    else:
        print('\nNo check point to resume.. train from scratch.\n')

    # ================================
    # Training
    # ================================

    for epoch in range(opts.start_epoch, opts.epochs):

        train(epoch=epoch,
              vis=vis,
              train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              scheduler=scheduler,
              save_path=opts.save_path,
              save_file_name=opts.save_file_name)

        if scheduler is not None:
            scheduler.step()
    parser.add_argument('--save_path', type=str, default='./saves')
    parser.add_argument('--save_file_name', type=str, default='yolo_v2_vgg_16')
    parser.add_argument('--conf_thres', type=float, default=0.01)
    test_opts = parser.parse_args()
    print(test_opts)

    epoch = test_opts.test_epoch

    # 2. device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 3. visdom
    vis = None

    # 4. data set
    test_set = VOC_Dataset(root="D:\Data\VOC_ROOT", split='TEST')
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              collate_fn=test_set.collate_fn,
                                              shuffle=False)
    # 6. network
    model = YOLO_VGG_16().to(device)

    # 7. loss
    criterion = Yolo_Loss(num_classes=20)

    test(epoch=epoch,
         device=device,
         vis=vis,
         test_loader=test_loader,
         model=model,
Exemple #5
0
def main():

    # 1. argparser
    opts = parse(sys.argv[1:])
    print(opts)

    # 3. visdom
    vis = visdom.Visdom(port=opts.port)
    # 4. data set
    train_set = None
    test_set = None

    if opts.data_type == 'voc':
        train_set = VOC_Dataset(root=opts.data_root, split='train', resize=opts.resize)
        test_set = VOC_Dataset(root=opts.data_root, split='test', resize=opts.resize)
        opts.num_classes = 20

    elif opts.data_type == 'coco':
        train_set = COCO_Dataset(root=opts.data_root, set_name='train2017', split='train', resize=opts.resize)
        test_set = COCO_Dataset(root=opts.data_root, set_name='val2017', split='test', resize=opts.resize)
        opts.num_classes = 80

    # 5. data loader
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=opts.batch_size,
                                               collate_fn=train_set.collate_fn,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              collate_fn=test_set.collate_fn,
                                              shuffle=False,
                                              num_workers=2,
                                              pin_memory=True)

    # 6. network
    model = RetinaNet(num_classes=opts.num_classes).to(device)
    model = torch.nn.DataParallel(module=model, device_ids=device_ids)
    coder = RETINA_Coder(opts=opts)  # there is center_anchor in coder.

    # 7. loss
    criterion = Focal_Loss(coder=coder)

    # 8. optimizer
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=opts.lr,
                                momentum=opts.momentum,
                                weight_decay=opts.weight_decay)

    # 9. scheduler
    scheduler = MultiStepLR(optimizer=optimizer, milestones=[30, 45], gamma=0.1)

    # 10. resume
    if opts.start_epoch != 0:

        checkpoint = torch.load(os.path.join(opts.save_path, opts.save_file_name) + '.{}.pth.tar'
                                .format(opts.start_epoch - 1), map_location=device)        # 하나 적은걸 가져와서 train
        model.load_state_dict(checkpoint['model_state_dict'])                              # load model state dict
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])                      # load optim state dict
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])                      # load sched state dict
        print('\nLoaded checkpoint from epoch %d.\n' % (int(opts.start_epoch) - 1))

    else:

        print('\nNo check point to resume.. train from scratch.\n')

    # for statement
    for epoch in range(opts.start_epoch, opts.epoch):

        # 11. train
        train(epoch=epoch,
              vis=vis,
              train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              scheduler=scheduler,
              opts=opts)

        # 12. test
        test(epoch=epoch,
             vis=vis,
             test_loader=test_loader,
             model=model,
             criterion=criterion,
             coder=coder,
             opts=opts)

        scheduler.step()
Exemple #6
0
    parser.add_argument('--conf_thres', type=float, default=0.01)

    from config import device
    test_opts = parser.parse_args()
    print(test_opts)

    epoch = test_opts.test_epoch

    # 2. device
    device = device

    # 3. visdom
    vis = None

    # 4. data set
    test_set = VOC_Dataset(root=test_opts.data_path, split='TEST')
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              collate_fn=test_set.collate_fn,
                                              shuffle=False)
    # 6. network
    model = YOLO_VGG_16().to(device)

    # 7. loss
    criterion = Yolo_Loss(num_classes=20)

    test(epoch=epoch,
         device=device,
         vis=vis,
         test_loader=test_loader,
         model=model,