Ejemplo n.º 1
0
def main():
    print(os.environ['PATH'])
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        # print(img.shape)
        img = cv2.resize(img, (128, 128))

        # img2 = np.resize(img, (128, 128))
        # imgtmp = Image.fromarray(img2)
        # imgtmp.save("hey_{}.jpg".format(count))

        # cv2.imwrite("resized_{}.jpg".format(count), img)

        # matplotlib.image.imsave("name_{}".format(count),img)

        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        _, features = model(input_var)
        # print("\n")
        # print(type(features.data.cpu().numpy()[0][0]))
        # print("\n")
        end = time.time() - start
        print("{}({}/{}). Time: {}".format(
            os.path.join(args.root_path, img_name), count, len(img_list), end))
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
Ejemplo n.º 2
0
def excute():
    global args
    args = parser.parse_args()

    # print(args)
    # exit()
    if args.root_path == '':
        #args.root_path = '/media/zli33/DATA/study/AdvCompVision/Project/Implementation/mtcnn-pytorch-master/NIR-VIS-2.0'
        args.root_path = '/brazos/kakadiaris/Datasets/CASIA-NIR-VIS-2-0/NIR-VIS-2.0'
    if args.resume == '':
        args.resume = 'LightCNN_9Layers_checkpoint.pth.tar'
    if args.protocols == '':
        args.protocols = 'protocols'

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        args.num_classes = 80013
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    # print('OK')

    gallery_file_list = 'vis_gallery_*.txt'
    probe_file_list = 'nir_probe_*.txt'
    import glob2

    gallery_file_list = glob2.glob(args.root_path + '/' + args.protocols +
                                   '/' + gallery_file_list)
    probe_file_list = glob2.glob(args.root_path + '/' + args.protocols + '/' +
                                 probe_file_list)
    # remove *_dev.txt file in both list
    gallery_file_list = sorted(gallery_file_list)[0:-1]
    probe_file_list = sorted(probe_file_list)[0:-1]

    avg_r_a, std_r_a, avg_v_a, std_v_a = load(model, args.root_path,
                                              gallery_file_list,
                                              probe_file_list)
    return avg_r_a, std_r_a, avg_v_a, std_v_a
def create_model(end2end=True):
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes, end2end=end2end)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes,
                                     end2end=end2end)
    else:
        print('Error model type\n')
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()
    # print(model)
    return model
Ejemplo n.º 4
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (128, 128))
        img = np.reshape(img, (128, 128, 1))
        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        outArray, features = model(input_var)
        print(outArray.shape)
        print("Max value:", np.max(outArray.data.cpu().numpy()[0]))
        print("Max index:", np.argmax(outArray.data.cpu().numpy()[0]))
        end = time.time() - start
        print("{}({}/{}). Time: {}".format(
            os.path.join(args.root_path, img_name), count, len(img_list), end))
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
        np.save(args.save_path + img_name, features.data.cpu().numpy()[0])
        np.save(args.save_path + img_name + ".fc",
                outArray.data.cpu().numpy()[0])
Ejemplo n.º 5
0
def main():
    global args
    args = parser.parse_args()
    model = LightCNN_9Layers(num_classes=args.num_classes)

    if args.cuda:
        model = model.cuda()

    print(model)

    #optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #load image
    train_loader = torch.utils.data.DataLoader(
        ImageList(root=args.root_path, fileList=args.train_list, 
            transform=transforms.Compose([ 
                transforms.RandomCrop((202, 162)),
                transforms.RandomHorizontalFlip(), 
                transforms.ToTensor(),
            ])),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        ImageList(root=args.root_path, fileList=args.val_list, 
            transform=transforms.Compose([ 
                transforms.CenterCrop((202, 162)),
                transforms.ToTensor(),
            ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)   

    # define loss function and optimizer
    #criterion = nn.CrossEntropyLoss()
    #criterion = nn.L1Loss()
    #criterion = nn.MSELoss()
    criterion = CombineLoss()
    if args.cuda:
        criterion.cuda()

    maxloss = 0
    validloss = validate(val_loader, model, criterion)
    maxloss = validloss
    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        validloss =validate(val_loader, model, criterion)

        #只保存验证集loss小的模型
        if validloss < maxloss:
            maxloss = validloss
            save_name = args.save_path + 'lightCNN_' + str(epoch+1) + '_checkpoint.pth.tar'
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
            }, save_name)
Ejemplo n.º 6
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-4':
        model = LightCNN_4Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            if args.model == 'LightCNN-4':
                pre_trained_dict = torch.load(
                    './LightenedCNN_4_torch.pth',
                    map_location='cpu')  # lambda storage, loc: storage)

                model_dict = model.state_dict()
                #model = model.to(device)  #lightcnn model
                pre_trained_dict[
                    'features.0.filter.weight'] = pre_trained_dict.pop(
                        '0.weight')
                pre_trained_dict[
                    'features.0.filter.bias'] = pre_trained_dict.pop('0.bias')
                pre_trained_dict[
                    'features.2.filter.weight'] = pre_trained_dict.pop(
                        '2.weight')
                pre_trained_dict[
                    'features.2.filter.bias'] = pre_trained_dict.pop('2.bias')
                pre_trained_dict[
                    'features.4.filter.weight'] = pre_trained_dict.pop(
                        '4.weight')
                pre_trained_dict[
                    'features.4.filter.bias'] = pre_trained_dict.pop('4.bias')
                pre_trained_dict[
                    'features.6.filter.weight'] = pre_trained_dict.pop(
                        '6.weight')

                my_dict = {
                    k: v
                    for k, v in pre_trained_dict.items() if ("fc2" not in k)
                }  #by DG

                model_dict.update(my_dict)
                model.load_state_dict(model_dict, strict=False)
            else:
                print("=> loading checkpoint '{}'".format(args.resume))
                #checkpoint = torch.load(args.resume, map_location='cpu')['state_dict']
                state_dict = torch.load(
                    args.resume, map_location='cpu'
                )['state_dict']  #torch.load(directory, map_location=lambda storage, loc: storage)
                #state_dict = torch.load(args.resume, map_location=lambda storage, loc: storage)['state_dict']
                new_state_dict = OrderedDict()

                for k, v in state_dict.items():
                    if k[:7] == 'module.':
                        name = k[7:]  # remove `module.`
                    else:
                        name = k
                    new_state_dict[name] = v
                model.load_state_dict(new_state_dict, strict=True)
            #model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    #print(len(img_list))
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)

    featuresmatrix = np.empty((0, 256))

    for img_name in img_list[:]:
        img_name = img_name[0]
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        #print(os.path.join(args.root_path, img_name))
        #img   = cv2.imread(os.path.join(args.root_path, 'Cropped_'+img_name), cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (128, 128))
        img = np.reshape(img, (128, 128, 1))
        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        '''
        if args.cuda:
            input = input.cuda()
        '''
        with torch.no_grad():
            input_var = input  #torch.tensor(input)#, volatile=True)
            _, features = model(input_var)
            #print(features.size())
            featuresmatrix = np.append(featuresmatrix,
                                       features.data.cpu().numpy(),
                                       axis=0)
            #print(features)

        end = time.time() - start
        #print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
        #save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
    #print(featuresmatrix.shape)
    similarity_matrix = cosine_similarity(featuresmatrix, featuresmatrix)
    #np.savetxt("similarity_score_validationset.txt",similarity_matrix,fmt ="%4.2f", delimiter=" ")
    np.savetxt("similarity_score_testset2019_lightcnn29_71.txt",
               similarity_matrix,
               fmt="%5.4f",
               delimiter=" ")
Ejemplo n.º 7
0
def main():
    global args
    args = GetArgs()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #load image
    train_loader = torch.utils.data.DataLoader(ImageList(
        root=args.train_path,
        fileList=args.train_list,
        transform=transforms.Compose([
            transforms.RandomCrop(128),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImageList(
        root=args.test_path,
        fileList=args.val_list,
        transform=transforms.Compose([
            transforms.CenterCrop(128),
            transforms.ToTensor(),
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        criterion.cuda()

    validate(val_loader, model, criterion)

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        pth_name = 'lightCNN_' + str(epoch + 1) + '_checkpoint.pth.tar'
        os.path.join(args.save_path, pth_name)
        save_name = os.path.join(args.save_path, pth_name)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'prec1': prec1,
            }, save_name)
Ejemplo n.º 8
0
def main():
    global args
    args = parser.parse_args()

    os.makedirs(args.save_path, exist_ok=True)
    input_dims = (args.image_height, args.image_width)
    log_path = os.path.join(args.save_path, 'log.txt')
    with open(log_path, 'w+') as f:
        f.write(
            '\n'.join(['%s: %s' % (k, v)
                       for k, v in args.__dict__.items()]) + '\n')

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes,
                                 input_dims=input_dims)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes,
                                  input_dims=input_dims)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes,
                                     input_dims=input_dims)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #load image
    train_loader = torch.utils.data.DataLoader(get_dataset(
        args.dataset,
        args.root_path,
        args.train_list,
        transform=get_transforms(dataset=args.dataset, phase='train')),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(get_dataset(
        args.dataset,
        args.root_path,
        args.val_list,
        transform=get_transforms(dataset=args.dataset, phase='train')),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        criterion.cuda()

    # validate(val_loader, model, criterion)
    with trange(args.start_epoch, args.epochs) as epochs:
        for epoch in epochs:
            epochs.set_description('Epoch %d' % epoch)

            adjust_learning_rate(optimizer, epoch)

            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch, log_path)
            if epoch % args.val_freq == 0:
                # evaluate on validation set
                prec1 = validate(val_loader, model, criterion, log_path)

            save_name = args.save_path + 'lightCNN_' + str(
                epoch + 1) + '_checkpoint.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'prec1': prec1,
                }, save_name)
Ejemplo n.º 9
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    # img_list  = read_list(args.img_list)
    # transform = transforms.Compose([transforms.ToTensor()])
    # count     = 0
    # input     = torch.zeros(1, 1, 128, 128)
    # for img_name in img_list:
    #     count = count + 1
    #     img   = cv2.imread(os.path.join(args.root_path, img_name), cv2.IMREAD_GRAYSCALE)
    #     img   = np.reshape(img, (128, 128, 1))
    #     img   = transform(img)
    #     input[0,:,:,:] = img
    #
    #     start = time.time()
    #     if args.cuda:
    #         input = input.cuda()
    #     input_var   = torch.autograd.Variable(input, volatile=True)
    #     _, features = model(input_var)
    #     end         = time.time() - start
    #     print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
    #     save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])

    dir_list = glob.glob(args.root_path + '\\' + '*')
    dir_names = [x.split('\\')[-1] for x in dir_list]
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    train_path = "E://2021WIN//SI681//LightCNN//CACD_feature_train"
    train_dir_list = glob.glob(train_path + '\\' + '*')
    train_dir_name = [x.split('\\')[-1] for x in train_dir_list]

    for dir in dir_names:
        if dir not in train_dir_name:
            continue
        img_path = glob.glob(args.root_path + '\\' + dir + '\\' + '*.jpg')
        img_list = [x.split('\\')[-1] for x in img_path]
        # print(img_list)
        for img_name in img_list:
            count = count + 1
            img = cv2.imread(os.path.join(args.root_path, dir, img_name),
                             cv2.IMREAD_GRAYSCALE)
            # cv2.imshow('image', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            # print(img.shape)
            # print(img)
            img = np.reshape(img, (128, 128, 1))
            img = transform(img)
            input[0, :, :, :] = img

            start = time.time()
            if args.cuda:
                input = input.cuda()
            input_var = torch.autograd.Variable(input, requires_grad=True)
            _, features = model(input_var)
            end = time.time() - start
            print("{}({}/{}). Time: {}".format(
                os.path.join(args.root_path, dir, img_name), count,
                len(img_list), end))
            save_feature(args.save_path, dir, img_name,
                         features.data.cpu().numpy()[0])
Ejemplo n.º 10
0
def main():
    global args
    args = parser.parse_args()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    all_transform = transforms.Compose([
        transforms.Grayscale(1),
        transforms.ToTensor(),
    ])
    # define trainloader and testloader
    trainset = CASIA_NIR_VIS(root=CASIA_DATA_DIR, transform=all_transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=8,
                                              drop_last=False)

    testdataset = CASIA_NIR_VIS(root=CASIA_DATA_DIR, transform=all_transform)
    testloader = torch.utils.data.DataLoader(testdataset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=8,
                                             drop_last=False)

    if args.cuda:
        criterion.cuda()

    validate(val_loader, model, criterion)

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        save_name = args.save_path + 'lightCNN_' + str(
            epoch + 1) + '_checkpoint.pth.tar'
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'prec1': prec1,
            }, save_name)
def main():
    
    mypath = "test_feat"
    for root, dirs, files in os.walk(mypath):
        for file in files:
            os.remove(os.path.join(root, file))
    
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))


    script(args.root_path)	
	
    img_list  = read_list(args.img_list)
    #print(args.img_list)
    #print("_____")
    #print(img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count     = 0
    input     = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        #print(img_name)
        count = count + 1
        img   = cv2.imread(os.path.join(args.root_path, img_name), cv2.IMREAD_GRAYSCALE)
        #img   = np.reshape(img, (128, 128, 1))
        img = cv2.resize(img,(128,128))
        img   = transform(img)
        input[0,:,:,:] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var   = torch.autograd.Variable(input, volatile=True)
        _, features = model(input_var)
        end         = time.time() - start
        print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
        
        
        
        
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
        cos_sim_cal(img_name)
Ejemplo n.º 12
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    use_cuda = args.cuda and torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")

    print('Device being used is :' + str(device))

    #model = torch.nn.DataParallel(model).to(device)
    model = model.to(device)
    DFWmodel = DFW().to(device)

    if args.pretrained:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            #checkpoint = torch.load(args.resume, map_location='cpu')['state_dict']
            if device == 'cpu':
                state_dict = torch.load(
                    args.resume, map_location='cpu'
                )['state_dict']  #torch.load(directory, map_location=lambda storage, loc: storage)
            else:
                state_dict = torch.load(
                    args.resume,
                    map_location=lambda storage, loc: storage)['state_dict']

            new_state_dict = OrderedDict()

            for k, v in state_dict.items():
                if k[:7] == 'module.':
                    name = k[7:]  # remove `module.`
                else:
                    name = k
                new_state_dict[name] = v
            model.load_state_dict(new_state_dict, strict=True)
            #model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    #load image
    train_loader = torch.utils.data.DataLoader(
        ImageList(
            root=args.root_path,
            fileList=args.train_list,
            transform=transforms.Compose([
                transforms.Resize((128, 128)),
                #transforms.Resize((144,144)),
                #transforms.FiveCrop((128,128)),
                #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
            ])),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImageList(
        root=args.root_path,
        fileList=args.val_list,
        transform=transforms.Compose([
            transforms.Resize((128, 128)),
            transforms.ToTensor(),
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    '''
    for param in list(model.named_parameters()):
        print(param)
    '''
    for name, param in model.named_parameters():
        if 'fc' in name and 'fc2' not in name:
            param.requires_grad = True
        else:
            param.requires_grad = False
    '''
    for name,param in model.named_parameters():
        print(name, param.requires_grad)
    '''

    params = list(model.fc.parameters()) + list(DFWmodel.parameters(
    ))  #learnable parameters are fc layer of lightcnn and DFWModel parameters

    optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum)
    #optimizer = optim.Adam(params , lr=args.lr)

    #criterion   = ContrastiveLoss(margin = 1.0 ).to(device)
    criterion = nn.BCELoss()  #ContrastiveLoss(margin = 1.0 ).to(device)

    for epoch in range(args.start_epoch, args.epochs):

        #adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, DFWmodel, criterion, optimizer, epoch,
              device)

        # evaluate on validation set
        acc = validate(val_loader, model, DFWmodel, criterion, epoch, device)
        if epoch % 10 == 0:
            save_name = args.save_path + 'lightCNN_' + str(
                epoch + 1) + '_checkpoint.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'acc': acc,
                    'optimizer': optimizer.state_dict(),
                }, save_name)
Ejemplo n.º 13
0
def main():

    use_cuda = True
    device = torch.device("cuda" if use_cuda else "cpu")


    parser = argparse.ArgumentParser(description='PyTorch AIFR')
    parser.add_argument('--batch_size', type=int, default = 64 , metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--epochs', type=int, default = 2, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--iters', type=int, default = 2000, metavar='N',
                        help='number of iterations to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')

    parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
                        help='SGD momentum (default: 0.5)')
    #parser.add_argument('--no-cuda', action='store_true', default=False,
    #                    help='disables CUDA training')

    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--pretrained', default = True, type = bool,
                    metavar='N', help='use pretrained ligthcnn model:True / False no pretrainedmodel )')

    parser.add_argument('--basemodel', default='LightCNN-4', type=str, metavar='BaseModel',
                    help='model type:ContrastiveCNN-4 LightCNN-4 LightCNN-9, LightCNN-29, LightCNN-29v2')

    parser.add_argument('--feature_size', default = 128, type=int, metavar='N',
                    help='ifeature size is 128 for lightcnn model )')

    parser.add_argument('--save_path', default='', type=str, metavar='PATH',
                    help='path to save checkpoint (default: none)')

    parser.add_argument('--resume', default=False, type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')

    parser.add_argument('--start-epoch', default = 0, type=int, metavar='N',
                    help='manual epoch number (useful on restarts)')
    #Training dataset on Morph

    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 16)')
    parser.add_argument('--root_path', default='/home/titanx/DB/Morph_preprocess/Morph_aligned/Morph/', type=str, metavar='PATH',
                    help='path to root path of images (default: none)')

    parser.add_argument('--num_classes', default=1000, type=int,
                    metavar='N', help='number of classes (default: 10000)')


    args = parser.parse_args()

    
    if args.basemodel == 'LightCNN-4':
        basemodel = LightCNN_4Layers(num_classes=args.num_classes)
        print('4 layer Lightcnn model')
    elif args.basemodel == 'LightCNN-9':
        basemodel = LightCNN_9Layers(num_classes=args.num_classes)
        print('9 layer Lightcnn model')
    else:
        print('Model not found so existing.')
        assert(False)
    


    basemodel = nn.DataParallel(basemodel).to(device)
 
    params1 = []

    for name, param in basemodel.named_parameters():

        if 'fc2' not in name:
            param.requires_grad = False
        else:
            params1.append(param)


    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    if args.pretrained is True:
        #pre_trained_dict = torch.load('./LightCNN_9Layers_checkpoint.pth.tar', map_location = lambda storage, loc: storage) #by DG
        pre_trained_dict = torch.load('./LightenedCNN_4_torch.pth', map_location = lambda storage, loc: storage) #by DG

        #pre_trained_dict = pre_trained_dict['state_dict'] #THIS LINE IS USED EXCEPT FOR LIGHTCNN 4 MODEL
        model_dict = basemodel.state_dict()
        
        # THIS ONE IS FOR CHANGING THE NAME IN THE MODEL:
        # IF WE ARE USING CUDA THEN WE NEED TO MENTION ( module ) LIKE HOW WE HAVE DONE BELOW         
        '''THIS FOLLOWING LINES ARE USED ONLY FOR LIGHTCNN 4 MODEL'''

        pre_trained_dict['module.features.0.filter.weight'] = pre_trained_dict.pop('0.weight')
        pre_trained_dict['module.features.0.filter.bias'] = pre_trained_dict.pop('0.bias')
        pre_trained_dict['module.features.2.filter.weight'] = pre_trained_dict.pop('2.weight')
        pre_trained_dict['module.features.2.filter.bias'] = pre_trained_dict.pop('2.bias')
        pre_trained_dict['module.features.4.filter.weight'] = pre_trained_dict.pop('4.weight')
        pre_trained_dict['module.features.4.filter.bias'] = pre_trained_dict.pop('4.bias')
        pre_trained_dict['module.features.6.filter.weight'] = pre_trained_dict.pop('6.weight')
        pre_trained_dict['module.features.6.filter.bias'] = pre_trained_dict.pop('6.bias')
        pre_trained_dict['module.fc1.filter.weight'] = pre_trained_dict.pop('9.1.weight')
        pre_trained_dict['module.fc1.filter.bias'] = pre_trained_dict.pop('9.1.bias')
        pre_trained_dict['module.fc2.weight'] = pre_trained_dict.pop('12.1.weight')
        pre_trained_dict['module.fc2.bias'] = pre_trained_dict.pop('12.1.bias')
       
       # 1. filter out unnecessary keys  
        pre_trained_dict = {k: v for k, v in pre_trained_dict.items() if ("fc2" not in k)}
       # 2. overwrite entries in the existing state dict
        model_dict.update(pre_trained_dict)
       # 3. load the new state dict  
        basemodel.load_state_dict(model_dict, strict = False)
    


    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    train_transform = transforms.Compose([transforms.Resize(144), transforms.RandomCrop(128), transforms.ToTensor()])#,    
                                           #transforms.Normalize(mean = [0.5224], std = [0.1989])])

    valid_transform = transforms.Compose([transforms.Resize(128), transforms.ToTensor()])#,

    #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode = 'max', factor = 0.1, patience = 5) 

    train_loader = DataLoader(AgeFaceDataset(transform = train_transform, istrain = True, isvalid = False),
                    batch_size = args.batch_size, shuffle = True,
                    num_workers =args.workers, pin_memory = False)


    valid_loader = DataLoader(AgeFaceDataset(transform = valid_transform, istrain = False, isvalid = True),
                    batch_size = args.batch_size, shuffle = False,
                    num_workers =args.workers, pin_memory = False)

    test_loader = DataLoader(AgeFaceDataset(transform = valid_transform,  istrain = False, isvalid = False),
                    batch_size = args.batch_size, shuffle = False,
                    num_workers = args.workers, pin_memory = False)

    trainprobe_loader = DataLoader(AgeFaceDataset(transform = valid_transform, istrain = True, isvalid = True),
                    batch_size = args.batch_size, shuffle = False,
                    num_workers = args.workers, pin_memory = False)   #for train accuracy

    basemodel = basemodel.to(device)

    aifrmodel = aifrNet(channels = 686).to(device)  #channels is embedding from lightcnn base model
    arcface = Arcface(embedding_size= 128, classnum=10000).to(device)

    params =    list(aifrmodel.parameters())  #+ list(arcface.parameters())

    optimizer = optim.SGD(params , lr=args.lr, momentum=args.momentum)

    optimizer = optim.Adam(params , lr=args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['iterno']
            genmodel.load_state_dict(checkpoint['state_dict1'])
            basemodel.load_state_dict(checkpoint['state_dict2'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print('Test acc at checkpoint was:',checkpoint['testacc'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    criterion1   = nn.CrossEntropyLoss().to(device)         #for  Identification loss
    criterion2   = nn.CrossEntropyLoss().to(device)         #for  Identification loss
    criterion3   = nn.CrossEntropyLoss().to(device)         #for  Identification loss
    #criterion1   = nn.MSELoss().to(device)         #for Age regression loss

    print('Device being used is :' + str(device))

    for iterno in range(args.start_epoch , 200):

        accuracy = test(test_loader, basemodel, aifrmodel,device)
        print('test accuracy is :', accuracy)
        #adjust_learning_rate(optimizer, iterno)

        #print('args iters',args.iters)

        train(args, basemodel, aifrmodel, arcface, device, train_loader, optimizer, criterion1, criterion2, criterion3,iterno)
      
        #TEST ACCURACY
        accuracy = test(test_loader, basemodel, aifrmodel,device)
        print('test accuracy is :', accuracy)
        f = open('Morph_performance_with_resentlike_lightcnn4_senet_acc_3resblocks_correct','a')
        f.write('\n'+str(iterno)+'\t'+str( accuracy) );
        f.close()