Example #1
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0
    print('Loading Dataset...')

    dataset = VHRDetection(args.vhr_root, train_sets,
                           SSDAugmentation(stdn_dim, means),
                           AnnotationTransform_VHR())

    epoch_size = len(dataset) // args.batch_size
    print('Training SSD on', dataset.name)
    step_index = 0
    if args.visdom:
        # initialize visdom loss plot
        lot = viz.line(X=torch.zeros((1, )).cpu(),
                       Y=torch.zeros((1, 3)).cpu(),
                       opts=dict(xlabel='Iteration',
                                 ylabel='Loss',
                                 title='Current SSD Training Loss',
                                 legend=['Loc Loss', 'Conf Loss', 'Loss']))
        epoch_lot = viz.line(X=torch.zeros((1, )).cpu(),
                             Y=torch.zeros((1, 3)).cpu(),
                             opts=dict(
                                 xlabel='Epoch',
                                 ylabel='Loss',
                                 title='Epoch SSD Training Loss',
                                 legend=['Loc Loss', 'Conf Loss', 'Loss']))
    batch_iterator = None
    data_loader = data.DataLoader(dataset,
                                  batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate_VHR,
                                  pin_memory=True)
    for iteration in range(args.start_iter, max_iter):
        if (not batch_iterator) or (iteration % epoch_size == 0):
            # create batch iterator
            batch_iterator = iter(data_loader)
        if iteration in stepvalues:
            step_index += 1
            adjust_learning_rate(optimizer, args.gamma, step_index)
            if args.visdom:
                viz.line(
                    X=torch.ones((1, 3)).cpu() * epoch,
                    Y=torch.Tensor([loc_loss, conf_loss, loc_loss + conf_loss
                                    ]).unsqueeze(0).cpu() / epoch_size,
                    win=epoch_lot,
                    update='append')
            # reset epoch loss counters
            loc_loss = 0
            conf_loss = 0
            epoch += 1

        # load train data
        images, targets = next(batch_iterator)

        if args.cuda:
            images = Variable(images.cuda())
            targets = [
                Variable(anno.cuda(), volatile=True) for anno in targets
            ]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.data[0]
        conf_loss += loss_c.data[0]
        if iteration % 10 == 0:
            print('Timer: %.4f sec.' % (t1 - t0))
            print('iter ' + repr(iteration) + ' || Loss: %.4f ||' %
                  (loss.data[0]),
                  end=' ')
            if args.visdom and args.send_images_to_visdom:
                random_batch_index = np.random.randint(images.size(0))
                viz.image(images.data[random_batch_index].cpu().numpy())
        if args.visdom:
            viz.line(X=torch.ones((1, 3)).cpu() * iteration,
                     Y=torch.Tensor([
                         loss_l.data[0], loss_c.data[0],
                         loss_l.data[0] + loss_c.data[0]
                     ]).unsqueeze(0).cpu(),
                     win=lot,
                     update='append')
            # hacky fencepost solution for 0th epoch plot
            if iteration == 0:
                viz.line(X=torch.zeros((1, 3)).cpu(),
                         Y=torch.Tensor(
                             [loc_loss, conf_loss,
                              loc_loss + conf_loss]).unsqueeze(0).cpu(),
                         win=epoch_lot,
                         update=True)
        if iteration % 5000 == 0:
            print('Saving state, iter:', iteration)
            torch.save(msc_net.state_dict(),
                       'weights/msc512_vhr_' + repr(iteration) + '.pth')
    torch.save(msc_net.state_dict(),
               args.save_folder + '' + args.version + '.pth')
Example #2
0
                coords = (pt[0], pt[1], pt[2], pt[3])
                pred_num += 1
                with open(filename, mode='a') as f:
                    f.write(
                        str(pred_num) + ' label: ' + label_name + ' score: ' +
                        str(score) + ' ' + ' || '.join(str(c)
                                                       for c in coords) + '\n')
                j += 1


if __name__ == '__main__':
    # load net
    num_classes = len(VHR_CLASSES) + 1  # +1 background
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    testset = VHRDetection(args.vhr_root, ['test2'], None,
                           AnnotationTransform_VHR())
    if args.cuda:
        net = net.cuda()
    cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             testset,
             BaseTransform(net.size, (104, 117, 123)),
             thresh=args.visual_threshold)
Example #3
0
                if pred_num == 0:
                    with open(filename, mode='a') as f:
                        f.write('PREDICTIONS: ' + '\n')
                score = detections[0, i, j, 0]
                label_name = labelmap[i - 1]
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                coords = (pt[0], pt[1], pt[2], pt[3])
                pred_num += 1
                with open(filename, mode='a') as f:
                    f.write(str(pred_num) + ' label: ' + label_name + ' score: ' +
                            str(score) + ' ' + ' || '.join(str(c) for c in coords) + '\n')
                j += 1


if __name__ == '__main__':
    # load net
    num_classes = len(VHR_CLASSES) + 1  # +1 background
    net = build_ssd('test', 512, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    testset = VHRDetection(args.vhr_root, ['test'], None, AnnotationTransform_VHR())
    if args.cuda:
        net = net.cuda()
    cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder, net, args.cuda, testset,
             BaseTransform(net.size, (104, 117, 123)),
             thresh=args.visual_threshold)