Пример #1
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(inceptionresnet_v1,
                      embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    para_model = torch.nn.parallel.data_parallel(model)
    for epoch in range(start, end):
        train(train_loader, para_model, optimizer, epoch)
        # test(test_loader, model, epoch)
        # do checkpointing
        torch.save({
            'epoch': epoch + 1,
            'state_dict': model.state_dict()
        }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
Пример #2
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        train(train_loader, model, optimizer, epoch)
Пример #3
0
    # optionally resume from a checkpoint
    '''
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
        else:
            checkpoint = None
            print('=> no checkpoint found at {}'.format(args.resume))
    '''
    model = FaceModel(embedding_size=args.embedding_size,num_classes=len(train_dir.classes))

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        train(train_loader, model, optimizer, epoch)
        test(test_loader, model, epoch)
        testaccuracy(testaccuracy_loader, model, epoch)
        if test_display_triplet_distance:
            display_triplet_distance_test(model,test_loader,LOG_DIR+"/test_{}".format(epoch))


Пример #4
0
def main():
    args = parser.parse_args()

    cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed(args.seed)

    # 1. dataset
    root = args.root
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    test_transforms = transforms.Compose([
        transforms.Scale(96),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    test_dataset = ImageFolder(root, transform=test_transforms)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    val_iterator = validation_iterator(test_loader)

    # 2. model
    #train_dir = FaceDataset(dir='/media/lior/LinuxHDD/datasets/MSCeleb-cleaned',n_triplets=10)

    print('construct model')
    model = FaceModel(embedding_size=128, num_classes=3367, pretrained=False)

    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)

            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # extract feature
    print('extracting feature')
    embeds = []
    labels = []
    for data, target in val_iterator:
        if cuda:
            data, target = data.cuda(), target.cuda(async=True)
        data_var = Variable(data, volatile=True)
        # compute output
        output = model(data_var)

        embeds.append(output.data.cpu().numpy())
        labels.append(target.cpu().numpy())

    embeds = np.vstack(embeds)
    labels = np.hstack(labels)

    print('embeds shape is ', embeds.shape)
    print('labels shape is ', labels.shape)

    # prepare dict for display
    namedict = dict()
    for i in range(10):
        namedict[i] = str(i)

    visual_feature_space(embeds, labels, len(test_dataset.classes), namedict)