Ejemplo n.º 1
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(inceptionresnet_v1,
                      embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    para_model = torch.nn.parallel.data_parallel(model)
    for epoch in range(start, end):
        train(train_loader, para_model, optimizer, epoch)
        # test(test_loader, model, epoch)
        # do checkpointing
        torch.save({
            'epoch': epoch + 1,
            'state_dict': model.state_dict()
        }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
Ejemplo n.º 2
0
        if batch_idx % args.log_interval == 0:
            pbar.set_description(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t'
                'Train Prec@1 {:.2f} ({:.2f})'.format(
                    epoch, batch_idx * len(data_v), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader),
                    loss.data[0],float(top1.val[0]), float(top1.avg[0])))



    logger.log_value('Train Prec@1 ',float(top1.avg[0]))

    # do checkpointing
    torch.save({'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'centers': model.centers},
               '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))


def test(test_loader, model, epoch):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()
        data_a, data_p, label = Variable(data_a, volatile=True), \