Esempio n. 1
0
def run():
    args = parse_args()

    # use cuda
    if args.cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    input_size = [args.input_size, args.input_size]

    # load net
    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device, 
                        input_size=input_size, 
                        num_classes=80, 
                        conf_thresh=args.conf_thresh, 
                        nms_thresh=args.nms_thresh, 
                        use_nms=args.use_nms)

    net.load_state_dict(torch.load(args.trained_model, map_location='cuda'))
    net.to(device).eval()
    print('Finished loading model!')

    # run
    if args.mode == 'camera':
        detect(net, device, BaseTransform(net.input_size), 
                    thresh=args.visual_threshold, mode=args.mode)
    elif args.mode == 'image':
        detect(net, device, BaseTransform(net.input_size), 
                    thresh=args.visual_threshold, mode=args.mode, path_to_img=args.path_to_img)
    elif args.mode == 'video':
        detect(net, device, BaseTransform(net.input_size),
                    thresh=args.visual_threshold, mode=args.mode, path_to_vid=args.path_to_vid, path_to_save=args.path_to_saveVid)
Esempio n. 2
0
def test():
    # get device
    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # load net
    num_classes = len(VOC_CLASSES)
    testset = VOCDetection(args.voc_root, [('2007', 'test')], None,
                           VOCAnnotationTransform())

    cfg = config.voc_cfg
    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device,
                        input_size=cfg['min_dim'],
                        num_classes=num_classes)

    net.load_state_dict(torch.load(args.trained_model, map_location=device))
    net.to(device).eval()
    print('Finished loading model!')

    # evaluation
    test_net(net,
             device,
             testset,
             BaseTransform(net.input_size,
                           mean=(0.406, 0.456, 0.485),
                           std=(0.225, 0.224, 0.229)),
             thresh=args.visual_threshold)
Esempio n. 3
0
def run():
    args = parse_args()

    if args.cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    if args.setup == 'VOC':
        print('use VOC style')
        cfg = config.voc_cfg
        num_classes = 20
    elif args.setup == 'COCO':
        print('use COCO style')
        cfg = config.coco_cfg
        num_classes = 80
    else:
        print('Only support VOC and COCO !!!')
        exit(0)

    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device, input_size=cfg['min_dim'], num_classes=num_classes, use_nms=True)

    
    net.load_state_dict(torch.load(args.trained_model, map_location=device))
    net.to(device).eval()
    print('Finished loading model!')

    # run
    if args.mode == 'camera':
        detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), 
                    thresh=args.vis_thresh, mode=args.mode, setup=args.setup)
    elif args.mode == 'image':
        detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), 
                    thresh=args.vis_thresh, mode=args.mode, path_to_img=args.path_to_img, setup=args.setup)
    elif args.mode == 'video':
        detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
                    thresh=args.vis_thresh, mode=args.mode, path_to_vid=args.path_to_vid, path_to_save=args.path_to_saveVid, setup=args.setup)
Esempio n. 4
0
def test():
    # get device
    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # load net
    num_classes = 80
    if args.dataset == 'COCO':
        cfg = config.coco_cfg
        testset = COCODataset(
                    data_dir=args.dataset_root,
                    json_file='instances_val2017.json',
                    name='val2017',
                    img_size=cfg['min_dim'][0],
                    debug=args.debug)
    elif args.dataset == 'VOC':
        cfg = config.voc_cfg
        testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform())


    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device, input_size=cfg['min_dim'], num_classes=num_classes)

    net.load_state_dict(torch.load(args.trained_model, map_location='cuda'))
    net.to(device).eval()
    print('Finished loading model!')

    # evaluation
    test_net(net, device, testset,
             BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
             thresh=args.visual_threshold)
Esempio n. 5
0
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # input size
    input_size = [args.input_size, args.input_size]

    # load net
    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device, 
                        input_size=input_size, 
                        num_classes=num_classes, 
                        backbone=args.backbone,
                        use_nms=args.use_nms)

    # load net
    net.load_state_dict(torch.load(args.trained_model, map_location='cuda'))
    net.eval()
    print('Finished loading model!')
    net = net.to(device)
    
    # evaluation
    with torch.no_grad():
        if args.dataset == 'voc':
            voc_test(net, device, input_size)
        elif args.dataset == 'coco-val':
            coco_test(net, device, input_size, test=False)
        elif args.dataset == 'coco-test':
            coco_test(net, device, input_size, test=True)
Esempio n. 6
0
                              name='val2017',
                              img_size=input_size[0])

    class_colors = [(np.random.randint(255), np.random.randint(255),
                     np.random.randint(255)) for _ in range(num_classes)]

    # load net
    if args.version == 'centernet':
        from models.centernet import CenterNet
        net = CenterNet(device,
                        input_size=input_size,
                        num_classes=num_classes,
                        conf_thresh=args.conf_thresh,
                        nms_thresh=args.nms_thresh,
                        use_nms=args.use_nms)

    net.load_state_dict(torch.load(args.trained_model, map_location=device))
    net.to(device).eval()
    print('Finished loading model!')

    # evaluation
    test(net=net,
         device=device,
         testset=dataset,
         transform=BaseTransform(input_size),
         thresh=args.visual_threshold,
         class_colors=class_colors,
         class_names=class_names,
         class_indexs=class_indexs,
         dataset=args.dataset)
Esempio n. 7
0
def train():
    args = parse_args()

    path_to_save = os.path.join(args.save_folder, args.version)
    os.makedirs(path_to_save, exist_ok=True)

    cfg = voc_cfg

    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    input_size = cfg['min_dim']
    dataset = VOCDetection(root=args.dataset_root,
                           transform=SSDAugmentation(cfg['min_dim'],
                                                     mean=(0.406, 0.456,
                                                           0.485),
                                                     std=(0.225, 0.224,
                                                          0.229)))

    # build model
    if args.version == 'centernet':
        from models.centernet import CenterNet

        net = CenterNet(device,
                        input_size=input_size,
                        num_classes=args.num_classes,
                        trainable=True)
        print('Let us train centernet on the VOC0712 dataset ......')

    else:
        print('Unknown version !!!')
        exit()

    # finetune the model trained on COCO
    if args.resume is not None:
        print('finetune COCO trained ')
        net.load_state_dict(torch.load(args.resume, map_location=device),
                            strict=False)

    # use tfboard
    if args.tfboard:
        print('use tensorboard')
        from torch.utils.tensorboard import SummaryWriter
        c_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
        log_path = os.path.join('log/voc/', args.version, c_time)
        os.makedirs(log_path, exist_ok=True)

        writer = SummaryWriter(log_path)

    print(
        "----------------------------------------Object Detection--------------------------------------------"
    )
    model = net
    model.to(device)

    base_lr = args.lr
    tmp_lr = base_lr
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # loss counters
    print("----------------------------------------------------------")
    print("Let's train OD network !")
    print('Training on:', dataset.name)
    print('The dataset size:', len(dataset))
    print("----------------------------------------------------------")

    epoch_size = len(dataset) // args.batch_size
    max_epoch = cfg['max_epoch']

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    t0 = time.time()

    # start training
    for epoch in range(max_epoch):

        # use cos lr
        if args.cos and epoch > 20 and epoch <= max_epoch - 20:
            # use cos lr
            tmp_lr = 0.00001 + 0.5 * (base_lr - 0.00001) * (
                1 + math.cos(math.pi * (epoch - 20) * 1. / (max_epoch - 20)))
            set_lr(optimizer, tmp_lr)

        elif args.cos and epoch > max_epoch - 20:
            tmp_lr = 0.00001
            set_lr(optimizer, tmp_lr)

        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                tmp_lr = tmp_lr * 0.1
                set_lr(optimizer, tmp_lr)

        for iter_i, (images, targets) in enumerate(data_loader):
            # WarmUp strategy for learning rate
            if not args.no_warm_up:
                if epoch < args.wp_epoch:
                    tmp_lr = base_lr * pow((iter_i + epoch * epoch_size) * 1. /
                                           (args.wp_epoch * epoch_size), 4)
                    # tmp_lr = 1e-6 + (base_lr-1e-6) * (iter_i+epoch*epoch_size) / (epoch_size * (args.wp_epoch))
                    set_lr(optimizer, tmp_lr)

                elif epoch == args.wp_epoch and iter_i == 0:
                    tmp_lr = base_lr
                    set_lr(optimizer, tmp_lr)

            targets = [label.tolist() for label in targets]
            # vis_data(images, targets, input_size)

            # make train label
            targets = tools.gt_creator(input_size, net.stride,
                                       args.num_classes, targets)

            # vis_heatmap(targets)

            # to device
            images = images.to(device)
            targets = torch.tensor(targets).float().to(device)

            # forward and loss
            cls_loss, txty_loss, twth_loss, total_loss = model(images,
                                                               target=targets)

            # backprop and update
            total_loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            if iter_i % 10 == 0:
                if args.tfboard:
                    # viz loss
                    writer.add_scalar('class loss', cls_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('txty loss', txty_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('twth loss', twth_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('total loss', total_loss.item(),
                                      iter_i + epoch * epoch_size)

                t1 = time.time()
                print(
                    '[Epoch %d/%d][Iter %d/%d][lr %.6f]'
                    '[Loss: cls %.2f || txty %.2f || twth %.2f ||total %.2f || size %d || time: %.2f]'
                    % (epoch + 1, max_epoch, iter_i, epoch_size, tmp_lr,
                       cls_loss.item(), txty_loss.item(), twth_loss.item(),
                       total_loss.item(), input_size, t1 - t0),
                    flush=True)

                t0 = time.time()

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                model.state_dict(),
                os.path.join(path_to_save,
                             args.version + '_' + repr(epoch + 1) + '.pth'))