Beispiel #1
0
def build_model(model_name, num_classes, pretrained=True):
    if model_name == 'ResNet-50':
        net = resnet50(num_classes=num_classes, pretrained=pretrained)
    elif model_name == 'ResNet-152':
        net = resnet152(num_classes=num_classes, pretrained=pretrained)
    elif model_name == 'ResNet-101':
        net = resnet101(num_classes=num_classes, pretrained=pretrained)
    else:
        print('wate a minute')
    return net
Beispiel #2
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    print('img_dir:', args.img_dir)
    print('end2end?:', args.end2end)

    # load data and prepare dataset
    train_list_file = '../../data/msceleb/train_list.txt'
    train_label_file = '../../data/msceleb/train_label.txt'
    caffe_crop = CaffeCrop('train')
    train_dataset = MsCelebDataset(
        args.img_dir, train_list_file, train_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    caffe_crop = CaffeCrop('test')
    val_list_file = '../../data/msceleb/test_list.txt'
    val_label_file = '../../data/msceleb/test_label.txt'
    val_dataset = MsCelebDataset(
        args.img_dir, val_list_file, val_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    assert (train_dataset.max_label == val_dataset.max_label)
    class_num = train_dataset.max_label + 1

    print('class_num: ', class_num)

    # prepare model
    model = None
    assert (args.arch in ['resnet18', 'resnet50', 'resnet101'])
    if args.arch == 'resnet18':
        model = resnet18(pretrained=False,
                         num_classes=class_num,
                         end2end=args.end2end)
    if args.arch == 'resnet50':
        model = resnet50(pretrained=False,
                         num_classes=class_num,
                         end2end=args.end2end)
    if args.arch == 'resnet101':
        model = resnet101(pretrained=False,
                          num_classes=class_num,
                          end2end=args.end2end)
    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.pretrained:
        checkpoint = torch.load(args.pretrained)
        pretrained_state_dict = checkpoint['state_dict']
        model_state_dict = model.state_dict()

        for key in pretrained_state_dict:
            model_state_dict[key] = pretrained_state_dict[key]
        model.load_state_dict(model_state_dict)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Beispiel #3
0
def extract_feat(arch, model_path, yaw_type):
    global args, best_prec1
    args = parser.parse_args()

    if arch.find('end2end') >= 0:
        end2end = True
    else:
        end2end = False

    arch = arch.split('_')[0]

    class_num = 87020
    #class_num = 13386

    model = None
    assert (arch in ['resnet18', 'resnet50', 'resnet101'])
    if arch == 'resnet18':
        model = resnet18(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)
    if arch == 'resnet50':
        model = resnet50(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)
    if arch == 'resnet101':
        model = resnet101(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)

    model = torch.nn.DataParallel(model).cuda()
    model.eval()

    assert (os.path.isfile(model_path))
    checkpoint = torch.load(model_path)
    pretrained_state_dict = checkpoint['state_dict']
    model_state_dict = model.state_dict()
    for key in pretrained_state_dict:
        if key in model_state_dict:
            model_state_dict[key] = pretrained_state_dict[key]
    model.load_state_dict(model_state_dict)

    print('load trained model complete')

    caffe_crop = CaffeCrop('test')

    infos = [('../../data/IJBA/align_image_11', 'ijb_a_11_align_split',
              'frame'),
             ('../../data/IJBA/align_image_11', 'ijb_a_11_align_split', 'img'),
             ('../../data/IJBA/align_image_1N', 'split', 'gallery'),
             ('../../data/IJBA/align_image_1N', 'split', 'probe')]

    for root_dir, sub_dir, img_type in infos:

        for split in range(1, 11):
            split_dir = os.path.join(root_dir, sub_dir + str(split))
            img_dir = os.path.join(split_dir, img_type)
            img_list_file = os.path.join(
                split_dir, '{}_list_{}.txt'.format(img_type, yaw_type))

            img_dataset = CFPDataset(
                args.img_dir, img_list_file,
                transforms.Compose([caffe_crop,
                                    transforms.ToTensor()]))
            img_loader = torch.utils.data.DataLoader(
                img_dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)

            data_num = len(img_dataset)
            img_feat_file = os.path.join(
                split_dir, '{}_{}_feat.bin'.format(arch, img_type))
            feat_dim = 256
            with open(img_feat_file, 'wb') as bin_f:
                bin_f.write(st.pack('ii', data_num, feat_dim))
                for i, (input, yaw) in enumerate(img_loader):
                    yaw = yaw.float().cuda(async=True)
                    yaw_var = torch.autograd.Variable(yaw)
                    input_var = torch.autograd.Variable(input, volatile=True)
                    output = model(input_var, yaw_var)
                    output_data = output.cpu().data.numpy()
                    feat_num = output.size(0)

                    for j in range(feat_num):
                        bin_f.write(
                            st.pack('f' * feat_dim, *tuple(output_data[j, :])))

            print('we have complete {} {}'.format(img_type, split))
Beispiel #4
0
def extract_feat(arch, resume):
    global args, best_prec1
    args = parser.parse_args()

    if arch.find('end2end') >= 0:
        end2end = True
    else:
        end2end = False

    arch = arch.split('_')[0]
    dataset = '/home/u0060/Datasets/cfp-align/'

    # load data and prepare dataset
    frontal_list_file = 'cfp_protocol/protocol/frontal_list_nonli.txt'
    caffe_crop = CaffeCrop('test')
    frontal_dataset = CFPDataset(
        dataset, frontal_list_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    frontal_loader = torch.utils.data.DataLoader(frontal_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    caffe_crop = CaffeCrop('test')
    profile_list_file = 'cfp_protocol/profile_list_nonli.txt'
    profile_dataset = CFPDataset(
        dataset, profile_list_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    profile_loader = torch.utils.data.DataLoader(profile_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    class_num = 13386

    model = None
    assert (arch in ['resnet18', 'resnet50', 'resnet101'])
    if arch == 'resnet18':
        model = resnet18(pretrained=False,
                         num_classes=class_num,
                         extract_feature=True,
                         end2end=end2end)
    if arch == 'resnet50':
        model = resnet50(pretrained=False,
                         num_classes=class_num,
                         extract_feature=True,
                         end2end=end2end)
    if arch == 'resnet101':
        model = resnet101(pretrained=False,
                          num_classes=class_num,
                          extract_feature=True,
                          end2end=end2end)

    model = torch.nn.DataParallel(model).cuda()
    model.eval()

    assert (os.path.isfile(resume))
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint['state_dict'])

    cudnn.benchmark = True

    data_num = len(frontal_dataset)
    frontal_feat_file = './frontal_feat.bin'
    feat_dim = 256
    with open(frontal_feat_file, 'wb') as bin_f:
        bin_f.write(st.pack('ii', data_num, feat_dim))
        for i, (input, yaw) in enumerate(frontal_loader):
            yaw = yaw.float().cuda(async=True)
            input_var = torch.autograd.Variable(input, volatile=True)
            yaw_var = torch.autograd.Variable(yaw, volatile=True)
            output = model(input_var, yaw_var)
            output_data = output.cpu().data.numpy()
            feat_num = output.size(0)

            for j in range(feat_num):
                bin_f.write(st.pack('f' * feat_dim, *tuple(output_data[j, :])))

    data_num = len(profile_dataset.imgs)
    profile_feat_file = './profile_feat.bin'
    with open(profile_feat_file, 'wb') as bin_f:
        bin_f.write(st.pack('ii', data_num, feat_dim))
        for i, (input, yaw) in enumerate(profile_loader):
            yaw = yaw.float().cuda(async=True)
            input_var = torch.autograd.Variable(input, volatile=True)
            yaw_var = torch.autograd.Variable(yaw, volatile=True)
            output = model(input_var, yaw_var)
            output_data = output.cpu().data.numpy()
            feat_num = output.size(0)

            for j in range(feat_num):
                bin_f.write(st.pack('f' * feat_dim, *tuple(output_data[j, :])))