示例#1
0
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    evaluate_detections(all_boxes, output_dir, dataset)


def evaluate_detections(box_list, output_dir, dataset):
    write_voc_results_file(box_list, dataset)
    do_python_eval(output_dir)


if __name__ == '__main__':
    # load net
    num_classes = len(labelmap) + 1  # +1 for background
    net = build_ssd('test', cfg, args.use_pred_module)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    dataset = VOCDetection(args.voc_root, [('2007', set_type)],
                           BaseTransform(cfg['min_dim'], dataset_mean),
                           VOCAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             dataset,
示例#2
0
args = parser.parse_args()

from fssd512_resnext import build_ssd

if args.dataset == 'COCO':
    from data import COCO_CLASSES as labelmap
    labelmap = COCO_CLASSES
    num_classes = len(labelmap) + 1  # +1 for background
    cfg = cocod512
elif args.dataset == 'VOC':
    from data import VOC_CLASSES as labelmap
    labelmap = VOC_CLASSES
    num_classes = len(labelmap) + 1  # +1 for background
    cfg = vocd512

net = build_ssd('test', cfg, args.use_pred_module)
net.load_state_dict(torch.load(args.trained_model))

net.eval()

image = cv2.imread(args.input_file, cv2.IMREAD_COLOR)
image = image[:, :, (2, 1, 0)]
x = base_transform(image, 512, (104.0, 117.0, 123.0))
x = torch.from_numpy(x).permute(2, 0, 1)

xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
if torch.cuda.is_available():
    xx = xx.cuda()
y = net(xx)

top_k = 10
示例#3
0
def train():
    cfg = ssd512
    dataset = DetectionDataset(os.path.join(args.dataset_root, 'train.tsv'),
                               transform=SSDAugmentation(cfg['min_dim'],
                                                         MEANS))

    ssd_net = build_ssd('train', cfg, args.use_pred_module)
    net = ssd_net
    print(net)

    if args.cuda:
        net = torch.nn.DataParallel(ssd_net)
        cudnn.benchmark = True

    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        ssd_net.load_weights(args.resume)
    else:
        resnext_weights = torch.load(args.save_folder + args.basenet)
        print('Loading base network...')
        ssd_net.resnext.load_state_dict(resnext_weights, strict=False)

    if args.cuda:
        net = net.cuda()

    if not args.resume:
        print('Initializing weights...')
        # initialize newly added layers' weights with xavier method
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
                          weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, cfg, args.cuda, loss_type=args.loss_type)

    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    print('Loading the dataset...', len(dataset))

    epoch_size = len(dataset) // args.batch_size
    print('Using the specified args:')
    print(args)

    step_index = 0

    data_loader = data.DataLoader(dataset, args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True,
                                  drop_last=True)

    total_samples = len(data_loader.sampler)
    batch_size = data_loader.batch_size
    steps_per_epoch = math.ceil(total_samples / batch_size)    
    # print(total_samples, batch_size, steps_per_epoch)     # 16551 32 518

    for epoch in range(args.start_epoch, cfg['max_epoch']):

        if epoch in cfg['lr_steps']:
            step_index += 1

        for iteration, (images, targets) in enumerate(data_loader):
            # to make burnin working, we adjust lr every epoch
            lr = adjust_learning_rate(optimizer, args.gamma, step_index,
                                      epoch, iteration, steps_per_epoch)
            
            if args.cuda:
                images = images.cuda()
                targets = [ann.cuda() for ann in targets]
            else:
                images = Variable(images)
                targets = [Variable(ann, volatile=True) for ann in targets]
                
            t0 = time.time()
            out = net(images)
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()
            t1 = time.time()

            if iteration % 10 == 0:
                print('iter {0:3d}/{1} || Loss: {2:6.4f} || lr: {3:.6f}|| {4:.4f} sec'
                      .format(iteration, len(data_loader), loss.data, lr, (t1-t0)))
            
        print('Saving state, epoch:', epoch)
        torch.save(ssd_net.state_dict(),
                   args.save_folder + args.weight_prefix + repr(epoch) + '.pth')