Beispiel #1
0
def train(aug, config, savemodel=False, model="model"):

    net = SiameseNet(p1b=True).cuda()

    if aug:
        trainset = LFWDataset(train=True,
                              transform=transforms.Compose([
                                  augmentation,
                                  transforms.Scale((128, 128)),
                                  transforms.ToTensor()
                              ]))
    else:
        trainset = LFWDataset(train=True,
                              transform=transforms.Compose([
                                  transforms.Scale((128, 128)),
                                  transforms.ToTensor()
                              ]))
    trainloader = DataLoader(trainset,
                             batch_size=config.batch_size,
                             shuffle=True,
                             num_workers=0)

    loss_fcn = ContrastiveLoss(margin=config.margin)
    learning_rate = 5e-6
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)

    counter = []
    loss_history = []
    iteration_number = 0

    for epoch in range(config.epochs):
        for i, data in enumerate(trainloader, 0):
            img0, img1, label = data
            img0, img1, label = Variable(img0).cuda(), Variable(
                img1).cuda(), Variable(label).cuda()
            output1, output2 = net(img0, img1)
            label = label.unsqueeze(1).float()

            optimizer.zero_grad()
            loss = loss_fcn(output1, output2, label)
            loss.backward()
            optimizer.step()
            if i % 10 == 0:
                print("Epoch number {}\n Current loss {}\n".format(
                    epoch, loss.data[0]))
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss.data[0])

        if epoch % 5 == 0:
            torch.save(net.state_dict(), model + "_epoch_" + str(epoch) + '.w')
    #to see loss
    show_plot(counter, loss_history, filename='p1b.png', save=True)

    if savemodel:
        torch.save(net.state_dict(), model)
        print("Model saved as: " + model)
Beispiel #2
0
transform = transforms.Compose([
    Scale(96),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

train_dir = TripletFaceDataset(dir=args.dataroot,
                               n_triplets=args.n_triplets,
                               transform=transform)
train_loader = torch.utils.data.DataLoader(train_dir,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           **kwargs)

test_dir = LFWDataset(dir=args.lfw_dir,
                      pairs_path=args.lfw_pairs_path,
                      transform=transform)
test_loader = torch.utils.data.DataLoader(test_dir,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          **kwargs)
data_size = dict()
data_size['train'] = len(train_dir)
data_size['test'] = len(test_dir)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True
Beispiel #3
0
transform = transforms.Compose([
    Scale(96),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

train_dir = TripletFaceDataset(dir=args.dataroot,
                               n_triplets=args.n_triplets,
                               transform=transform)
train_loader = torch.utils.data.DataLoader(train_dir,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           **kwargs)

test_loader = torch.utils.data.DataLoader(LFWDataset(
    dir=args.lfw_dir, pairs_path=args.lfw_pairs_path, transform=transform),
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          **kwargs)


def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(embedding_size=args.embedding_size,
Beispiel #4
0
def main():
    # settings
    parser = argparse.ArgumentParser(description='PyTorch Contrastive Convolution for FR')
    parser.add_argument('--batch_size', type=int, default = 64 , metavar='N',
                        help='input batch size for training (default: 64)')
    
    parser.add_argument('--epochs', type=int, default = 80, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--iters', type=int, default = 200000, metavar='N',
                        help='number of iterations to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                        help='how many batches to wait before logging training status')
  
    parser.add_argument('--pretrained', default = False, type = bool,
                       metavar='N', help='use pretrained ligthcnn model:True / False no pretrainedmodel )')

    parser.add_argument('--basemodel', default='ContrastiveCNN-4', type=str, metavar='BaseModel',
                       help='model type:ContrastiveCNN-4 LightCNN-4 LightCNN-9, LightCNN-29, LightCNN-29v2')


    parser.add_argument('--save_path', default='', type=str, metavar='PATH',
                       help='path to save checkpoint (default: none)')

    parser.add_argument('--resume', default='', type=str, metavar='PATH',
                       help='path to latest checkpoint (default: none)')

    parser.add_argument('--start-epoch', default = 0, type=int, metavar='N',
                       help='manual epoch number (useful on restarts)')




    #Testing on LFW settings 
    parser.add_argument('--lfw-dir', type=str, default='/data2/Saurav/DB/lfw_mtcnnpy_256/', //path of the LFW dataset 
                    help='path to dataset')
    parser.add_argument('--lfw_pairs_path', type=str, default='lfw_pairs.txt',
                    help='path to pairs file')
    parser.add_argument('--test_batch_size', type=int, default = 128, metavar='BST',
                    help='input batch size for testing (default: 1000)')
    parser.add_argument('--compute_contrastive', default = True, type = bool,
                     metavar='N', help='use contrastive featurs or base mode features: True / False )')


    parser.add_argument('--log_interval', type=int, default=10, help='how many batches to wait before logging training status')

    #Training dataset on Casia

    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 16)')
    parser.add_argument('--root_path', default='/data/Saurav/DB/CASIAaligned/', type=str, metavar='PATH', //path of the CASIA dataset
                    help='path to root path of images (default: none)')

    parser.add_argument('--num_classes', default=10574, type=int,
                    metavar='N', help='number of classes (default: 10574)')
    
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    #torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
       
    test_transform = transforms.Compose([
        transforms.Grayscale(num_output_channels=1),
        transforms.Resize(128),
        transforms.ToTensor() ])
     
    test_loader = torch.utils.data.DataLoader(LFWDataset(dir=args.lfw_dir,pairs_path=args.lfw_pairs_path,
                                           transform=test_transform),  batch_size=args.test_batch_size, shuffle=False, **kwargs)
    
    transform=transforms.Compose([
                transforms.Resize(128),  #Added only for vggface2 as images are of size 256x256
                #transforms.CenterCrop(128),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),])
     
    

    if args.basemodel == 'ContrastiveCNN-4':
        basemodel = Contrastive_4Layers(num_classes=args.num_classes)
        print('4 layer model')
    else:
        print('Model not found so existing.')
        assert(False)
    
    if args.pretrained is True:
 
        print('Loading pretrained model')

        pre_trained_dict = torch.load('./LightenedCNN_4_torch.pth', map_location = lambda storage, loc: storage) 
 
        model_dict = basemodel.state_dict()
        basemodel = basemodel.to(device)  #lightcnn model
        #only for ligthcnn4
        pre_trained_dict['features.0.filter.weight'] = pre_trained_dict.pop('0.weight')
        pre_trained_dict['features.0.filter.bias'] = pre_trained_dict.pop('0.bias')
        pre_trained_dict['features.2.filter.weight'] = pre_trained_dict.pop('2.weight')
        pre_trained_dict['features.2.filter.bias'] = pre_trained_dict.pop('2.bias')
        pre_trained_dict['features.4.filter.weight'] = pre_trained_dict.pop('4.weight')
        pre_trained_dict['features.4.filter.bias'] = pre_trained_dict.pop('4.bias')
        pre_trained_dict['features.6.filter.weight'] = pre_trained_dict.pop('6.weight')
        pre_trained_dict['features.6.filter.bias'] = pre_trained_dict.pop('6.bias')
        pre_trained_dict['fc1.filter.weight'] = pre_trained_dict.pop('9.1.weight')
        pre_trained_dict['fc1.filter.bias'] = pre_trained_dict.pop('9.1.bias')
        pre_trained_dict['fc2.weight'] = pre_trained_dict.pop('12.1.weight')
        pre_trained_dict['fc2.bias'] = pre_trained_dict.pop('12.1.bias')
        my_dict = {k: v for k, v in pre_trained_dict.items() if ("fc2" not in k )}  
        model_dict.update(my_dict) 
        

        basemodel.load_state_dict(model_dict, strict = False)             


    basemodel = basemodel.to(device)    
   
    
    genmodel = GenModel(512).to(device)       #kernel generator
    
    reg_model = Regressor(686).to(device)   #contrastive convolution o/p for binary regression

    idreg_model = Identity_Regressor(14 * 512 * 3 * 3, args.num_classes).to(device)  #Kernel o/p for Identity recongition
    params =  list(basemodel.parameters()) +  list(genmodel.parameters()) + list(reg_model.parameters()) + list(idreg_model.parameters())

    optimizer = optim.SGD(params , lr=args.lr, momentum=args.momentum)
    
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['iterno']
            genmodel.load_state_dict(checkpoint['state_dict1'])
            basemodel.load_state_dict(checkpoint['state_dict2'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print('Test acc at checkpoint was:',checkpoint['testacc'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
   
    criterion2   = nn.CrossEntropyLoss().to(device)         #for kernel loss: Identification loss
    criterion1   = nn.BCELoss().to(device)         #for Similarity loss
    
    print('Device being used is :' + str(device))

    for iterno in range(args.start_epoch + 1, args.iters + 1): 
        
        adjust_learning_rate(optimizer, iterno)
        
        traindataset = CasiaFaceDataset(noofpairs = args.batch_size, transform=transform,is_train = True)

        train_loader = torch.utils.data.DataLoader(traindataset, batch_size=args.batch_size, shuffle=True, **kwargs)

        #train(args, basemodel, idreg_model, genmodel, reg_model, device, train_loader, optimizer, criterion2, criterion1,iterno)
        #if iterno > 0 and iterno%1000==0:
        testacc =  test( test_loader, basemodel, genmodel, reg_model,  iterno, device, args)
        f = open('LFW_performance.txt','a')
        f.write('\n'+str(iterno)+': '+str( testacc*100));
        f.close() 
        print('Test accuracy: {:.4f}'.format(testacc*100))
            
        if iterno > 0 and iterno%10000==0:
            save_name = args.save_path +'base_gen_model' + str(iterno) + '_checkpoint.pth.tar'
            save_checkpoint({'iterno': iterno ,   'state_dict1': genmodel.state_dict(),'state_dict2':basemodel.state_dict(),
               'optimizer': optimizer.state_dict(),
               'testacc':testacc}, save_name)
kwargs = {'num_workers': 2, 'pin_memory': True} if cuda else {}
l2_dist = PairwiseDistance(2)

transform = transforms.Compose([
                         transforms.Scale(96),
                         transforms.RandomHorizontalFlip(),
                         transforms.ToTensor(),
                         transforms.Normalize(mean = [ 0.5, 0.5, 0.5 ],
                                               std = [ 0.5, 0.5, 0.5 ])
                     ])

train_dir = ImageFolder(dataroot,transform=transform)
train_loader = torch.utils.data.DataLoader(train_dir,
    batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
    LFWDataset(dir=lfw_dir,pairs_path=lfw_pairs_path,
                     transform=transform),
    batch_size=batch_size, shuffle=False, **kwargs)




def main_loop():
    test_display_triplet_distance= True
    # print the experiment configuration
    # print('\nparsed options:\n{}\n'.format(vars(args))
    # print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights


Beispiel #6
0
def test(
    loadmodel=False,
    model="model",
):

    net = SiameseNet(p1a=True).cuda()

    trainset = LFWDataset(train=True,
                          transform=transforms.Compose([
                              transforms.Scale((128, 128)),
                              transforms.ToTensor()
                          ]))
    trainloader = DataLoader(trainset,
                             batch_size=Config.train_batch_size,
                             shuffle=True,
                             num_workers=2)
    testset = LFWDataset(test=True,
                         transform=transforms.Compose([
                             transforms.Scale((128, 128)),
                             transforms.ToTensor()
                         ]))
    testloader = DataLoader(testset,
                            batch_size=Config.train_batch_size,
                            shuffle=True,
                            num_workers=2)

    if loadmodel:
        net.load_state_dict(torch.load(model))
        print("Loaded model: " + model)

    #Accuracy on Train Set
    trainright = trainwrong = 0.
    for i, data in enumerate(trainloader, 0):
        img0, img1, label = data
        img0, img1, label = Variable(img0).cuda(), Variable(
            img1).cuda(), Variable(label).cuda()

        output = net(img0, img1)
        for x, y in zip(output, label):
            if (x.data[0] <= 0.5 and y.data[0] == 0) or (x.data[0] > 0.5
                                                         and y.data[0] == 1):
                trainright += 1
            else:
                trainwrong += 1

    trainacc = trainright / (trainright + trainwrong)
    print("Accuracy on train set: {:.2f}".format(trainacc))

    #Accuracy on Test Set
    testright = testwrong = 0.
    for i, data in enumerate(testloader, 0):
        img0, img1, label = data
        img0, img1, label = Variable(img0).cuda(), Variable(
            img1).cuda(), Variable(label).cuda()

        output = net(img0, img1)
        for x, y in zip(output, label):
            if (x.data[0] <= 0.5 and y.data[0] == 0) or (x.data[0] > 0.5
                                                         and y.data[0] == 1):
                testright += 1
            else:
                testwrong += 1

    testacc = testright / (testright + testwrong)
    print("Accuracy on test set: {:.2f}".format(testacc))
Beispiel #7
0
    plt.show()


if __name__ == "__main__":
    # Set the file path
    lfw_lab_dir = "./"
    lfw_image_dir = os.path.join(lfw_lab_dir, "lfw/")
    lfw_test_path = os.path.join(lfw_lab_dir, "LFW_annotation_test.txt")
    lfw_train_path, lfw_valid_path = split_txt(os.path.join(
        lfw_lab_dir, "LFW_annotation_train.txt"),
                                               ratio=0.8)
    lfw_lab_results_dir = "./results/"  # for the results

    # Define Train Dataset/loader
    lfw_train_dataset = LFWDataset(lfw_image_dir,
                                   lfw_train_path,
                                   n_augmented=3)  # n_augmented=3
    lfw_train_loader = DataLoader(lfw_train_dataset,
                                  batch_size=32,
                                  shuffle=True,
                                  num_workers=6)
    print('Total lfw training items: ', len(lfw_train_dataset))
    print('Total lfw Training Batches size in one epoch: ',
          len(lfw_train_loader))

    # Define Valid Dataset/loader
    lfw_valid_dataset = LFWDataset(lfw_image_dir, lfw_valid_path)
    lfw_valid_loader = DataLoader(lfw_valid_dataset,
                                  batch_size=32,
                                  shuffle=True,
                                  num_workers=6)