コード例 #1
0
def main():
    print(os.environ['PATH'])
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        # print(img.shape)
        img = cv2.resize(img, (128, 128))

        # img2 = np.resize(img, (128, 128))
        # imgtmp = Image.fromarray(img2)
        # imgtmp.save("hey_{}.jpg".format(count))

        # cv2.imwrite("resized_{}.jpg".format(count), img)

        # matplotlib.image.imsave("name_{}".format(count),img)

        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        _, features = model(input_var)
        # print("\n")
        # print(type(features.data.cpu().numpy()[0][0]))
        # print("\n")
        end = time.time() - start
        print("{}({}/{}). Time: {}".format(
            os.path.join(args.root_path, img_name), count, len(img_list), end))
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
コード例 #2
0
    def __init__(self):
        self.bridge = CvBridge()
        self.feature_pub = rospy.Publisher("extracted_features",
                                           featArr,
                                           queue_size=0)
        self.img_sub = rospy.Subscriber("cropped_face", faceArr, self.callback)
        self.path_to_saved_model = "/home/abhisek/Study/Robotics/face_data/LightCNN_29Layers_V2_checkpoint.pth.tar"
        self.model = LightCNN_29Layers_v2(num_classes=80013)
        self.cuda = True
        if self.cuda:
            self.model = torch.nn.DataParallel(self.model).cuda()
        if self.path_to_saved_model:
            print("There")
            if os.path.isfile(self.path_to_saved_model):
                self.checkpoint = torch.load(self.path_to_saved_model)
                self.model.load_state_dict(self.checkpoint['state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

        self.transform = transforms.Compose([
            transforms.Resize([128, 128]),
            transforms.CenterCrop(128),
            transforms.ToTensor()
        ])
        self.input = torch.zeros(1, 1, 128, 128)
コード例 #3
0
def excute():
    global args
    args = parser.parse_args()

    # print(args)
    # exit()
    if args.root_path == '':
        #args.root_path = '/media/zli33/DATA/study/AdvCompVision/Project/Implementation/mtcnn-pytorch-master/NIR-VIS-2.0'
        args.root_path = '/brazos/kakadiaris/Datasets/CASIA-NIR-VIS-2-0/NIR-VIS-2.0'
    if args.resume == '':
        args.resume = 'LightCNN_9Layers_checkpoint.pth.tar'
    if args.protocols == '':
        args.protocols = 'protocols'

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        args.num_classes = 80013
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    # print('OK')

    gallery_file_list = 'vis_gallery_*.txt'
    probe_file_list = 'nir_probe_*.txt'
    import glob2

    gallery_file_list = glob2.glob(args.root_path + '/' + args.protocols +
                                   '/' + gallery_file_list)
    probe_file_list = glob2.glob(args.root_path + '/' + args.protocols + '/' +
                                 probe_file_list)
    # remove *_dev.txt file in both list
    gallery_file_list = sorted(gallery_file_list)[0:-1]
    probe_file_list = sorted(probe_file_list)[0:-1]

    avg_r_a, std_r_a, avg_v_a, std_v_a = load(model, args.root_path,
                                              gallery_file_list,
                                              probe_file_list)
    return avg_r_a, std_r_a, avg_v_a, std_v_a
コード例 #4
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (128, 128))
        img = np.reshape(img, (128, 128, 1))
        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        outArray, features = model(input_var)
        print(outArray.shape)
        print("Max value:", np.max(outArray.data.cpu().numpy()[0]))
        print("Max index:", np.argmax(outArray.data.cpu().numpy()[0]))
        end = time.time() - start
        print("{}({}/{}). Time: {}".format(
            os.path.join(args.root_path, img_name), count, len(img_list), end))
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
        np.save(args.save_path + img_name, features.data.cpu().numpy()[0])
        np.save(args.save_path + img_name + ".fc",
                outArray.data.cpu().numpy()[0])
コード例 #5
0
def create_model(end2end=True):
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes, end2end=end2end)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes,
                                     end2end=end2end)
    else:
        print('Error model type\n')
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()
    # print(model)
    return model
コード例 #6
0
ファイル: model.py プロジェクト: machinedesign/gan_gen
 def __init__(self, path="LightCNN_29Layers_V2_checkpoint.pth.tar", device="cpu"):
     super().__init__()
     model = LightCNN_29Layers_v2(num_classes=80013)
     checkpoint = torch.load(path, map_location="cpu")
     ck = checkpoint['state_dict']
     ck_ = {}
     for k, v in ck.items():
         ck_[k.replace("module.", "")] = v
     model.load_state_dict(ck_)
     model.to(device)
     self.net = model
     self.latent_size = 256
コード例 #7
0
def main():
    global args
    args = parser.parse_args()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model = model.cuda()

    print(model)

    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    state_dict = torch.load(args.model_path)
    model.load_state_dict(state_dict)

    cudnn.benchmark = True
    # load image
    train_loader = torch.utils.data.DataLoader(ImgDataset(
        args.dataroot, False, args.crop, args.preload),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImgDataset(
        args.dataroot, True, args.crop, args.preload),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=0,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    criterion.cuda()

    validate(val_loader, model)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        validate(val_loader, model)

        if not os.path.exists(args.save_path):
            os.makedirs(args.save_path)

        if epoch % 5 == 0:
            save_checkpoint(
                model.state_dict(),
                join(args.save_path,
                     'lightCNN_' + str(epoch + 1) + '_checkpoint.pth'))
        save_checkpoint(model.state_dict(),
                        join(args.save_path, 'lightCNN_latest_checkpoint.pth'))
コード例 #8
0
def main():
    
    mypath = "test_feat"
    for root, dirs, files in os.walk(mypath):
        for file in files:
            os.remove(os.path.join(root, file))
    
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))


    script(args.root_path)	
	
    img_list  = read_list(args.img_list)
    #print(args.img_list)
    #print("_____")
    #print(img_list)
    transform = transforms.Compose([transforms.ToTensor()])
    count     = 0
    input     = torch.zeros(1, 1, 128, 128)
    for img_name in img_list:
        #print(img_name)
        count = count + 1
        img   = cv2.imread(os.path.join(args.root_path, img_name), cv2.IMREAD_GRAYSCALE)
        #img   = np.reshape(img, (128, 128, 1))
        img = cv2.resize(img,(128,128))
        img   = transform(img)
        input[0,:,:,:] = img

        start = time.time()
        if args.cuda:
            input = input.cuda()
        input_var   = torch.autograd.Variable(input, volatile=True)
        _, features = model(input_var)
        end         = time.time() - start
        print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
        
        
        
        
        save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
        cos_sim_cal(img_name)
コード例 #9
0
import torch
from torch.backends import cudnn
import torch.nn as nn
from light_cnn import LightCNN_29Layers_v2
from data import TrainDataset
import config
from torch.autograd import Variable
from my_utils import *
from utils import *

cudnn.enabled = True
img_list = open('img_list/051_all', 'r').read().split('\n')
img_list.pop()

feature_extract_model = LightCNN_29Layers_v2(num_classes=80013).cuda()
feature_extract_model = torch.nn.DataParallel(feature_extract_model).cuda()

feature_extract_model.module.fc2 = nn.Linear(in_features=256,
                                             out_features=360).cuda()
torch.nn.init.kaiming_uniform_(feature_extract_model.module.fc2.weight)

optim_LCNN = torch.optim.Adam(
    feature_extract_model.parameters(),
    lr=1e-4,
)
resume_model(feature_extract_model, 'model_save')
resume_optimizer(optim_LCNN, feature_extract_model, 'model_save')
# Train LightCNN on multipie

# input
trainloader = torch.utils.data.DataLoader(TrainDataset(img_list),
コード例 #10
0
    def talker(self):

        ##
        print("in callback")
        path_to_saved_model = "/home/abhisek/Study/Robotics/face_data/LightCNN_29Layers_V2_checkpoint.pth.tar"
        path_to_img = '/home/abhisek/Study/Robotics/face_data/output.txt'
        save_path = '/home/abhisek/Study/Robotics/face_data/face_feat_upaaa'
        imglist = self.read_list(path_to_img)
        model = LightCNN_29Layers_v2(num_classes=80013)
        path_to_save = "no need for this publish it into another topic"
        cuda = True

        centroid_info_dir = '/home/abhisek/Study/Robotics/face_data/centroid_info'
        file_name = 'centroid_info_8py.npy'
        try:
            centroid_info = np.load(os.path.join(centroid_info_dir, file_name))
        except:
            print("Could not load the centroid_info array.")

        model.eval()
        if cuda:
            model = torch.nn.DataParallel(model).cuda()

        if path_to_saved_model:
            print("There")
            print(path_to_saved_model)
            if os.path.isfile(path_to_saved_model):
                print("here")
                print("=> loading checkpoint '{}'".format(path_to_saved_model))
                checkpoint = torch.load(path_to_saved_model)
                model.load_state_dict(checkpoint['state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(path_to_saved_model))

    #==============================================================================
    #     img_list  = read_list(args.img_list)
    #     transform = transforms.Compose([transforms.Resize([128,156]),
    # 				    transforms.CenterCrop(128),
    # 				    transforms.ToTensor()])
    #     count     = 0
    #     input     = torch.zeros(1, 1, 128, 128)
    #     for img_name in img_list:
    #         count = count + 1
    #         img   = cv2.imread(os.path.join(args.root_path, img_name), cv2.IMREAD_GRAYSCALE)
    #         print(os.path.join(args.root_path, img_name))
    # 	print(img.shape)
    #==============================================================================
        transform = transforms.Compose([
            transforms.Resize([128, 156]),
            transforms.CenterCrop(128),
            transforms.ToTensor()
        ])
        input = torch.zeros(1, 1, 128, 128)
        for img_name in imglist:

            imgdata = cv2.imread(os.path.join(img_name), cv2.IMREAD_GRAYSCALE)

            imgdata = np.expand_dims(imgdata, axis=2)
            imgdatapil = transforms.functional.to_pil_image(imgdata)
            imgdatatrans = transform(imgdatapil)
            print(type(imgdatatrans))

            print("THE shape now")
            print(imgdatatrans.shape)

            print("PROCESSED DATA!!!")
            #print(imgdatatrans)
            #time.sleep(10)
            try:
                input[0, :, :, :] = imgdatatrans
                print("the INPUT")

                start = time.time()
                if cuda:
                    input = input.cuda()
                input_var = torch.autograd.Variable(input, volatile=True)
                #print(input)
                _, features = model(input_var)
                end = time.time() - start
                feature = features.cpu().detach().numpy()
                #print("FEAT")
                #print(feature)
                features = features.data.cpu().numpy()[0]
                print(type(feature))
                a = np.array(features, dtype=np.float32)
                #self.feature_pub.publish(a)
                print("Published Features!!!")
                print(img_name)
                print(features)
                #self.save_feature(save_path,img_name,features)
                cl2, collector = self.checkClass(centroid_info, features)
                final_class = cl2 + 1
                #print("This is the final class")
                self.feature_pub.publish(str(final_class))
            except Exception as e:
                print(traceback.format_exc())
                print(sys.exc_info()[0])
コード例 #11
0
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-4':
        model = LightCNN_4Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            if args.model == 'LightCNN-4':
                pre_trained_dict = torch.load(
                    './LightenedCNN_4_torch.pth',
                    map_location='cpu')  # lambda storage, loc: storage)

                model_dict = model.state_dict()
                #model = model.to(device)  #lightcnn model
                pre_trained_dict[
                    'features.0.filter.weight'] = pre_trained_dict.pop(
                        '0.weight')
                pre_trained_dict[
                    'features.0.filter.bias'] = pre_trained_dict.pop('0.bias')
                pre_trained_dict[
                    'features.2.filter.weight'] = pre_trained_dict.pop(
                        '2.weight')
                pre_trained_dict[
                    'features.2.filter.bias'] = pre_trained_dict.pop('2.bias')
                pre_trained_dict[
                    'features.4.filter.weight'] = pre_trained_dict.pop(
                        '4.weight')
                pre_trained_dict[
                    'features.4.filter.bias'] = pre_trained_dict.pop('4.bias')
                pre_trained_dict[
                    'features.6.filter.weight'] = pre_trained_dict.pop(
                        '6.weight')

                my_dict = {
                    k: v
                    for k, v in pre_trained_dict.items() if ("fc2" not in k)
                }  #by DG

                model_dict.update(my_dict)
                model.load_state_dict(model_dict, strict=False)
            else:
                print("=> loading checkpoint '{}'".format(args.resume))
                #checkpoint = torch.load(args.resume, map_location='cpu')['state_dict']
                state_dict = torch.load(
                    args.resume, map_location='cpu'
                )['state_dict']  #torch.load(directory, map_location=lambda storage, loc: storage)
                #state_dict = torch.load(args.resume, map_location=lambda storage, loc: storage)['state_dict']
                new_state_dict = OrderedDict()

                for k, v in state_dict.items():
                    if k[:7] == 'module.':
                        name = k[7:]  # remove `module.`
                    else:
                        name = k
                    new_state_dict[name] = v
                model.load_state_dict(new_state_dict, strict=True)
            #model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    img_list = read_list(args.img_list)
    #print(len(img_list))
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)

    featuresmatrix = np.empty((0, 256))

    for img_name in img_list[:]:
        img_name = img_name[0]
        count = count + 1
        img = cv2.imread(os.path.join(args.root_path, img_name),
                         cv2.IMREAD_GRAYSCALE)
        #print(os.path.join(args.root_path, img_name))
        #img   = cv2.imread(os.path.join(args.root_path, 'Cropped_'+img_name), cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (128, 128))
        img = np.reshape(img, (128, 128, 1))
        img = transform(img)
        input[0, :, :, :] = img

        start = time.time()
        '''
        if args.cuda:
            input = input.cuda()
        '''
        with torch.no_grad():
            input_var = input  #torch.tensor(input)#, volatile=True)
            _, features = model(input_var)
            #print(features.size())
            featuresmatrix = np.append(featuresmatrix,
                                       features.data.cpu().numpy(),
                                       axis=0)
            #print(features)

        end = time.time() - start
        #print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
        #save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])
    #print(featuresmatrix.shape)
    similarity_matrix = cosine_similarity(featuresmatrix, featuresmatrix)
    #np.savetxt("similarity_score_validationset.txt",similarity_matrix,fmt ="%4.2f", delimiter=" ")
    np.savetxt("similarity_score_testset2019_lightcnn29_71.txt",
               similarity_matrix,
               fmt="%5.4f",
               delimiter=" ")
コード例 #12
0
ファイル: train.py プロジェクト: Armar12/face-re
def main():
    global args
    args = parser.parse_args()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #load image
    train_loader = torch.utils.data.DataLoader(ImageList(
        root=args.root_path,
        fileList=args.train_list,
        transform=transforms.Compose([
            transforms.RandomCrop(128),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImageList(
        root=args.root_path,
        fileList=args.val_list,
        transform=transforms.Compose([
            transforms.CenterCrop(128),
            transforms.ToTensor(),
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        criterion.cuda()

    validate(val_loader, model, criterion)

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        save_name = args.save_path + 'lightCNN_' + str(
            epoch + 1) + '_checkpoint.pth.tar'
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'prec1': prec1,
            }, save_name)
コード例 #13
0
def main():
    global args
    args = parser.parse_args()

    os.makedirs(args.save_path, exist_ok=True)
    input_dims = (args.image_height, args.image_width)
    log_path = os.path.join(args.save_path, 'log.txt')
    with open(log_path, 'w+') as f:
        f.write(
            '\n'.join(['%s: %s' % (k, v)
                       for k, v in args.__dict__.items()]) + '\n')

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes,
                                 input_dims=input_dims)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes,
                                  input_dims=input_dims)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes,
                                     input_dims=input_dims)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #load image
    train_loader = torch.utils.data.DataLoader(get_dataset(
        args.dataset,
        args.root_path,
        args.train_list,
        transform=get_transforms(dataset=args.dataset, phase='train')),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(get_dataset(
        args.dataset,
        args.root_path,
        args.val_list,
        transform=get_transforms(dataset=args.dataset, phase='train')),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        criterion.cuda()

    # validate(val_loader, model, criterion)
    with trange(args.start_epoch, args.epochs) as epochs:
        for epoch in epochs:
            epochs.set_description('Epoch %d' % epoch)

            adjust_learning_rate(optimizer, epoch)

            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch, log_path)
            if epoch % args.val_freq == 0:
                # evaluate on validation set
                prec1 = validate(val_loader, model, criterion, log_path)

            save_name = args.save_path + 'lightCNN_' + str(
                epoch + 1) + '_checkpoint.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'prec1': prec1,
                }, save_name)
コード例 #14
0
ファイル: extract_features.py プロジェクト: zhengsyc/SI681
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model.eval()
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    # img_list  = read_list(args.img_list)
    # transform = transforms.Compose([transforms.ToTensor()])
    # count     = 0
    # input     = torch.zeros(1, 1, 128, 128)
    # for img_name in img_list:
    #     count = count + 1
    #     img   = cv2.imread(os.path.join(args.root_path, img_name), cv2.IMREAD_GRAYSCALE)
    #     img   = np.reshape(img, (128, 128, 1))
    #     img   = transform(img)
    #     input[0,:,:,:] = img
    #
    #     start = time.time()
    #     if args.cuda:
    #         input = input.cuda()
    #     input_var   = torch.autograd.Variable(input, volatile=True)
    #     _, features = model(input_var)
    #     end         = time.time() - start
    #     print("{}({}/{}). Time: {}".format(os.path.join(args.root_path, img_name), count, len(img_list), end))
    #     save_feature(args.save_path, img_name, features.data.cpu().numpy()[0])

    dir_list = glob.glob(args.root_path + '\\' + '*')
    dir_names = [x.split('\\')[-1] for x in dir_list]
    transform = transforms.Compose([transforms.ToTensor()])
    count = 0
    input = torch.zeros(1, 1, 128, 128)
    train_path = "E://2021WIN//SI681//LightCNN//CACD_feature_train"
    train_dir_list = glob.glob(train_path + '\\' + '*')
    train_dir_name = [x.split('\\')[-1] for x in train_dir_list]

    for dir in dir_names:
        if dir not in train_dir_name:
            continue
        img_path = glob.glob(args.root_path + '\\' + dir + '\\' + '*.jpg')
        img_list = [x.split('\\')[-1] for x in img_path]
        # print(img_list)
        for img_name in img_list:
            count = count + 1
            img = cv2.imread(os.path.join(args.root_path, dir, img_name),
                             cv2.IMREAD_GRAYSCALE)
            # cv2.imshow('image', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            # print(img.shape)
            # print(img)
            img = np.reshape(img, (128, 128, 1))
            img = transform(img)
            input[0, :, :, :] = img

            start = time.time()
            if args.cuda:
                input = input.cuda()
            input_var = torch.autograd.Variable(input, requires_grad=True)
            _, features = model(input_var)
            end = time.time() - start
            print("{}({}/{}). Time: {}".format(
                os.path.join(args.root_path, dir, img_name), count,
                len(img_list), end))
            save_feature(args.save_path, dir, img_name,
                         features.data.cpu().numpy()[0])
コード例 #15
0
ファイル: train.py プロジェクト: darshangera/LightCNNDFW
def main():
    global args
    args = parser.parse_args()

    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    use_cuda = args.cuda and torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")

    print('Device being used is :' + str(device))

    #model = torch.nn.DataParallel(model).to(device)
    model = model.to(device)
    DFWmodel = DFW().to(device)

    if args.pretrained:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            #checkpoint = torch.load(args.resume, map_location='cpu')['state_dict']
            if device == 'cpu':
                state_dict = torch.load(
                    args.resume, map_location='cpu'
                )['state_dict']  #torch.load(directory, map_location=lambda storage, loc: storage)
            else:
                state_dict = torch.load(
                    args.resume,
                    map_location=lambda storage, loc: storage)['state_dict']

            new_state_dict = OrderedDict()

            for k, v in state_dict.items():
                if k[:7] == 'module.':
                    name = k[7:]  # remove `module.`
                else:
                    name = k
                new_state_dict[name] = v
            model.load_state_dict(new_state_dict, strict=True)
            #model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    #load image
    train_loader = torch.utils.data.DataLoader(
        ImageList(
            root=args.root_path,
            fileList=args.train_list,
            transform=transforms.Compose([
                transforms.Resize((128, 128)),
                #transforms.Resize((144,144)),
                #transforms.FiveCrop((128,128)),
                #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
            ])),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImageList(
        root=args.root_path,
        fileList=args.val_list,
        transform=transforms.Compose([
            transforms.Resize((128, 128)),
            transforms.ToTensor(),
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    '''
    for param in list(model.named_parameters()):
        print(param)
    '''
    for name, param in model.named_parameters():
        if 'fc' in name and 'fc2' not in name:
            param.requires_grad = True
        else:
            param.requires_grad = False
    '''
    for name,param in model.named_parameters():
        print(name, param.requires_grad)
    '''

    params = list(model.fc.parameters()) + list(DFWmodel.parameters(
    ))  #learnable parameters are fc layer of lightcnn and DFWModel parameters

    optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum)
    #optimizer = optim.Adam(params , lr=args.lr)

    #criterion   = ContrastiveLoss(margin = 1.0 ).to(device)
    criterion = nn.BCELoss()  #ContrastiveLoss(margin = 1.0 ).to(device)

    for epoch in range(args.start_epoch, args.epochs):

        #adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, DFWmodel, criterion, optimizer, epoch,
              device)

        # evaluate on validation set
        acc = validate(val_loader, model, DFWmodel, criterion, epoch, device)
        if epoch % 10 == 0:
            save_name = args.save_path + 'lightCNN_' + str(
                epoch + 1) + '_checkpoint.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'acc': acc,
                    'optimizer': optimizer.state_dict(),
                }, save_name)
コード例 #16
0
ファイル: test.py プロジェクト: tobyclh/HollowFakes
import numpy as np
import cv2

from light_cnn import LightCNN_9Layers, LightCNN_29Layers, LightCNN_29Layers_v2
from load_imglist import ImageList
from preprocessing import normalize_image
from skimage.color import rgb2gray
from glob import glob
import matplotlib.pyplot as plt
from skimage.io import imread

person_A = glob('/home/toby/Documents/HollowFakes/data/donald/*.*')
person_B = glob('/home/toby/Documents/HollowFakes/data/boris/*.*')
test = glob('/home/toby/Documents/HollowFakes/data/test_images/*.*')
# print(f'Personal A :{person_A}')
model = LightCNN_29Layers_v2(num_classes=80013)
model.eval()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load('data/LightCNN_29Layers_V2_checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'])

with torch.no_grad():
    with torch.no_grad():
        donald_feature = []
        boris_feature = []
        for image in person_A:
            image = imread(image)
            img = normalize_image(image)
            if img is None:
                continue
            img = rgb2gray(img)
コード例 #17
0
def main():
    global args
    args = parser.parse_args()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-9':
        model = LightCNN_9Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    print(model)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    all_transform = transforms.Compose([
        transforms.Grayscale(1),
        transforms.ToTensor(),
    ])
    # define trainloader and testloader
    trainset = CASIA_NIR_VIS(root=CASIA_DATA_DIR, transform=all_transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=8,
                                              drop_last=False)

    testdataset = CASIA_NIR_VIS(root=CASIA_DATA_DIR, transform=all_transform)
    testloader = torch.utils.data.DataLoader(testdataset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=8,
                                             drop_last=False)

    if args.cuda:
        criterion.cuda()

    validate(val_loader, model, criterion)

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        save_name = args.save_path + 'lightCNN_' + str(
            epoch + 1) + '_checkpoint.pth.tar'
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'prec1': prec1,
            }, save_name)