Ejemplo n.º 1
0
def main(args):

    paths = Dataset(args.dataset_path)['abspath']
    print('%d images to load.' % len(paths))
    assert (len(paths) > 0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_dir)
    images = preprocess(paths, network.config, False)

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images,
                                           args.batch_size,
                                           verbose=True)
    feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
    # test
    print('mu:', mu.shape)
    lfwtest = LFWTest(paths)
    lfwtest.init_standard_proto(args.protocol_path)

    accuracy, threshold = lfwtest.test_standard_proto(mu, utils.pair_euc_score)
    print('Euclidean (cosine) accuracy: %.5f threshold: %.5f' %
          (accuracy, threshold))
    accuracy, threshold = lfwtest.test_standard_proto(feat_pfe,
                                                      utils.pair_MLS_score)
    print('MLS accuracy: %.5f threshold: %.5f' % (accuracy, threshold))
def main(args):
    #paths = Dataset(args.dataset_path)['abspath']

    ijbc_meta = np.load(args.meta_path)
    paths = [
        os.path.join(args.input_path,
                     img_name.split('/')[-1])
        for img_name in ijbc_meta['img_names']
    ]
    print('%d images to load.' % len(paths))
    assert (len(paths) > 0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_path)
    images = preprocess(paths, network.config, False)
    print('N_images', images.shape)

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images, 128, verbose=True)
    if args.nosigma:
        feat_pfe = mu
    else:
        feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
    print('N_features', feat_pfe.shape)
    np.save(args.output_path, feat_pfe)
Ejemplo n.º 3
0
def validate(network, config, log_dir, step):

    # Initialize testing
    if not hasattr(validate, 'images'):
        testset = Dataset(config.test_dataset_path,
                          prefix=config.data_prefix,
                          isDebug=config.isDebug)
        random_indices = np.random.permutation(np.where(
            testset.is_photo)[0])[:64]
        validate.images = testset.images[random_indices].astype(np.object)
        validate.images = preprocess(validate.images,
                                     config,
                                     is_training=False)

    output_dir = os.path.join(log_dir, 'samples')
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # scales = np.indices((8,8), dtype=np.float32)[1] * 5
    scales = np.ones((8, 8))
    scales = scales.flatten()
    test_results = network.generate_BA(validate.images, scales,
                                       config.batch_size)
    utils.save_manifold(test_results,
                        os.path.join(output_dir, '{}.jpg'.format(step)))
Ejemplo n.º 4
0
def eval(data_set,
         network,
         batch_size,
         nfolds=10,
         name='',
         result_dir='',
         re_extract_feature=True,
         filter_out_type='max'):
    print('testing verification..')
    data_list = data_set[0]
    issame_list = data_set[1]
    data_list = data_list[0].asnumpy()
    images = preprocess(data_list, network.config, False)
    del data_set
    for i in range(1):
        # name1 = name + '_keep0.9_%03d' % i
        name1 = name
        ret = eval_images(images,
                          issame_list,
                          network,
                          batch_size,
                          nfolds=10,
                          name=name1,
                          result_dir=result_dir,
                          re_extract_feature=re_extract_feature,
                          filter_out_type=filter_out_type)
        print(ret)
        # ret = eval_images_cmp(images, issame_list, network, batch_size, nfolds=10, name=name, result_dir=result_dir,
        #                       re_extract_feature=re_extract_feature, filter_out_type=filter_out_type)
    return ret
Ejemplo n.º 5
0
def eval(data_set, network, batch_size, nfolds=10, name=''):
  print('testing verification..')
  data_list = data_set[0]
  issame_list = data_set[1]
  data_list = data_list[0].asnumpy()
  images = preprocess(data_list, network.config, False)
  del data_set
  return eval_images(images, issame_list, network, batch_size, nfolds=10, name=name)
Ejemplo n.º 6
0
def main(args):

    paths = get_paths_all(os.path.expanduser(args.dataset_path))
    print('%d images to load.' % len(paths))
    assert (len(paths) > 0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_dir)
    # network.config.preprocess_train = []
    # network.config.preprocess_test = []
    images = preprocess(paths, network.config, False)
    import cv2
    # images = np.array([cv2.resize(img, (96, 96)) for img in images])
    # images = (images - 128.) / 128.
    # images = images[..., ::-1]
    print(images.shape)
    # print(images[0,:5,:5,0])

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images,
                                           args.batch_size,
                                           verbose=True)
    sigma_sq = sigma_sq[..., :1]
    feat_pfe = np.concatenate([mu, sigma_sq], axis=1)

    quality_score = -np.mean(np.log(sigma_sq), axis=1)
    print(
        'quality_score quality_score=-np.mean(np.log(sigma_sq),axis=1) percentile [0, 10, 30, 50, 70, 90, 100]'
    )
    print('quality_score ',
          np.percentile(quality_score.ravel(), [0, 10, 30, 50, 70, 90, 100]))
    print(
        'quality_score sigma_sq ',
        np.percentile(np.mean(sigma_sq, axis=1), [0, 10, 30, 50, 70, 90, 100]))

    lfwtest = LFWTest(paths)
    lfwtest.init_standard_proto(args.protocol_path)

    numTrials = 1
    numTrials = 10
    info1 = openset_lfw(mu, utils.pair_cosin_score, numTrials)
    print(info1)

    compare_func = lambda x, y: utils.nvm_MLS_score(x, y)
    info2 = openset_lfw(feat_pfe, compare_func, numTrials)
    print(info2)
    # compare_func = lambda x,y: utils.nvm_MLS_score_attention(x, y)
    # info3 = openset_lfw(feat_pfe, compare_func, numTrials)
    # print(info3)
    print('-----------')
    print(info1)
    print(info2)
    # print(info3)
    with open(os.path.join(args.model_dir, 'testing-log.txt'), 'a') as f:
        f.write(info1 + '\n')
        f.write(info2 + '\n')
def main(args):

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    trainset = Dataset(config.train_dataset_path)

    network = Network()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_format, proc_func=proc_func)


    # Main Loop
    print('\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.format(
            config.name, config.num_epochs, config.epoch_size, config.batch_format['size']))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['mu'].reshape(config.batch_format['size'], -1)
                                                , batch['conv_final'].reshape(config.batch_format['size'], -1)
                                                , batch['label']
                                                , learning_rate
                                                , config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Save the model
        network.save_model(log_dir, global_step)
Ejemplo n.º 8
0
def main(args):

    network = Network()
    network.load_model(args.model_dir)
    proc_func = lambda x: preprocess(x, network.config, False)

    testset = Dataset(args.dataset_path)
    if args.protocol == 'ijba':
        tester = IJBATest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    elif args.protocol == 'ijbc':
        tester = IJBCTest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    else:
        raise ValueError('Unkown protocol. Only accept "ijba" or "ijbc".')


    re_extract_feature = False
    save_name_feature = 'feature_jiba.npy'
    if re_extract_feature:
        mu, sigma_sq = network.extract_feature(tester.image_paths, args.batch_size, proc_func=proc_func, verbose=True)
        features = np.concatenate([mu, sigma_sq], axis=1)
        np.save(save_name_feature, features)
    else:
        features = np.load(save_name_feature)
        x = features
        D = int(x.shape[1] / 2)
        if x.shape[1] == 257 or x.shape[1] == 513:
            D = int(x.shape[1] - 1)
        mu, sigma_sq = x[:,:D], x[:,D:]
    print('sigma_sq', sigma_sq.mean(), sigma_sq.max(), sigma_sq.min())
    sigma_sq_avg = sigma_sq.mean()

    print('---- Average pooling')
    aggregate_templates(tester.verification_templates, features, 'mean')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))

    for T in [1, 0.5, 2]:
        print('')
        print('T:', T)
        print('---- Uncertainty pooling')
        aggregate_templates(tester.verification_templates, features, 'PFE_fuse', T=T)
        TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
        for i in range(len(TARs)):
            print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))

        print('---- MLS comparison')
        aggregate_templates(tester.verification_templates, features, 'PFE_fuse_match')
        TARs, std, FARs = tester.test_verification(force_compare(utils.pair_MLS_score))
        for i in range(len(TARs)):
            print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))
Ejemplo n.º 9
0
def main(args):

    paths = [
        r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
        r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
        r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\a-000013.jpg',
        r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
        r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
        r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
        r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
        r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
    ]
    print('%d images to load.' % len(paths))
    assert (len(paths) > 0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_dir)
    # network.config.preprocess_train = []
    # network.config.preprocess_test = []
    images = preprocess(paths, network.config, False)
    import cv2
    # images = np.array([cv2.resize(img, (96, 96)) for img in images])
    # images = (images - 128.) / 128.
    # images = images[..., ::-1]
    print(images.shape)
    # print(images[0,:5,:5,0])

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images,
                                           args.batch_size,
                                           verbose=True)
    print(mu.shape, sigma_sq.shape)

    print('sigma_sq', np.max(sigma_sq), np.min(sigma_sq), np.mean(sigma_sq),
          np.exp(np.mean(np.log(sigma_sq))))
    log_sigma_sq = np.log(sigma_sq)
    print('log_sigma_sq', np.max(log_sigma_sq), np.min(log_sigma_sq),
          np.mean(log_sigma_sq))
    # print('sigma_sq', sigma_sq)

    feat_pfe = np.concatenate([mu, sigma_sq], axis=1)

    score = utils.pair_cosin_score(mu[::2], mu[1::2])
    print(score)

    score = utils.pair_MLS_score(feat_pfe[::2], feat_pfe[1::2])
    print(score)
Ejemplo n.º 10
0
def main(args):

    network = Network()
    network.load_model(args.model_dir)
    proc_func = lambda x: preprocess(x, network.config, False)

    testset = Dataset(args.dataset_path)
    if args.protocol == 'ijba':
        tester = IJBATest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    elif args.protocol == 'ijbc':
        tester = IJBCTest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    else:
        raise ValueError('Unkown protocol. Only accept "ijba" or "ijbc".')

    mu, sigma_sq = network.extract_feature(tester.image_paths,
                                           args.batch_size,
                                           proc_func=proc_func,
                                           verbose=True)
    features = np.concatenate([mu, sigma_sq], axis=1)

    print('---- Average pooling')
    aggregate_templates(tester.verification_templates, features, 'mean')
    TARs, std, FARs = tester.test_verification(
        force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i],
                                                      FARs[i]))

    print('---- Uncertainty pooling')
    aggregate_templates(tester.verification_templates, features, 'PFE_fuse')
    TARs, std, FARs = tester.test_verification(
        force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i],
                                                      FARs[i]))

    print('---- MLS comparison')
    aggregate_templates(tester.verification_templates, features,
                        'PFE_fuse_match')
    TARs, std, FARs = tester.test_verification(
        force_compare(utils.pair_MLS_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i],
                                                      FARs[i]))
Ejemplo n.º 11
0
def main(args):
    print('start main')
    test_1v1_target = 'cfp_fp,agedb_30'
    test_1v1_target = 'cfp_fp'
    test_lfw_openset_numTrials = 0

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    t1 = time.time()
    read_imagelist_from_file = False
    imagelist_file_for_train = 'data/list_to_train_ms1m-retinaface-t1-img.txt'
    if read_imagelist_from_file:
        trainset = Dataset(imagelist_file_for_train)
        print('time', time.time() - t1)
    else:
        trainset = Dataset(config.train_dataset_path)
        print('time', time.time() - t1)
        # trainset.write_datalist_to_file(imagelist_file_for_train)
    trainset.set_base_seed(config.base_random_seed)

    network = Network()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        print(config.restore_model)
        network.restore_model(config.restore_model, config.restore_scopes,
                              config.exclude_restore_scopes)

    test_images_lfw = None
    if test_lfw_openset_numTrials > 0 and args.dataset_path:
        lfw_paths = get_paths_all(os.path.expanduser(args.dataset_path))
        test_images_lfw = preprocess(lfw_paths, config, False)

    ver_list = []
    ver_name_list = []
    for name in test_1v1_target.split(','):
        path = os.path.join(config.test_data_dir_mx, name + ".bin")
        if os.path.exists(path):
            image_size = [112, 112]
            data_list, issame_list = verification.load_bin(path, image_size)
            data_list = data_list[0].asnumpy()
            images = preprocess(data_list, network.config, False)
            data_set = (images, issame_list)
            ver_list.append(data_set)
            ver_name_list.append(name)
            print('ver', name)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_format, proc_func=proc_func)
    # batch = trainset.pop_batch_queue()

    # Main Loop
    print(
        '\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'
        .format(config.name, config.num_epochs, config.epoch_size,
                config.batch_format['size']))
    global_step = 0
    network.save_model(log_dir, global_step)
    start_time = time.time()
    for epoch in range(config.num_epochs + 1):

        # Save the model
        network.save_model(log_dir, global_step)

        if epoch > 0:
            info_w = ''
            if test_lfw_openset_numTrials > 0 and args.dataset_path:
                mu, sigma_sq = network.extract_feature(test_images_lfw,
                                                       64,
                                                       verbose=True)
                quality_score = -np.mean(np.log(sigma_sq), axis=1)
                print('sigma_sq percentile [0, 10, 30, 50, 70, 90, 100]')
                print(
                    'sigma_sq ',
                    np.percentile(quality_score.ravel(),
                                  [0, 10, 30, 50, 70, 90, 100]))
                feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
                info1 = openset_lfw(mu, utils.pair_cosin_score,
                                    test_lfw_openset_numTrials)
                info_w += info1 + '\n'
                print(info1)
                info2 = openset_lfw(feat_pfe, utils.nvm_MLS_score,
                                    test_lfw_openset_numTrials)
                print(info2)
                info_w += info2 + '\n'
                info3 = openset_lfw(feat_pfe, utils.nvm_MLS_score_attention,
                                    test_lfw_openset_numTrials)
                print(info3)
                info_w += info3 + '\n'
            info_ver = ''
            for i in range(len(ver_list)):
                print('---', ver_name_list[i], '---')
                info_ver_ = verification.eval_images(ver_list[i][0],
                                                     ver_list[i][1], network,
                                                     128, 10)
                print(info_ver_)
                info_ver += '---' + ver_name_list[i] + '\n'
                info_ver += info_ver_ + '\n'
            info_w += info_ver + '\n'
            with open(os.path.join(log_dir, 'training-log.txt'), 'a') as f:
                f.write(info_w)
        if epoch == config.num_epochs:
            break

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(
                global_step, config)
            batch = trainset.pop_batch_queue()
            if len(batch['image']) > len(batch['label']):
                batch['label'] = np.concatenate(
                    [batch['label'], batch['label']], axis=0)

            wl, global_step = network.train(batch['image'], batch['label'],
                                            learning_rate, config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                with open(os.path.join(log_dir, 'training-log.txt'), 'a') as f:
                    s = utils.display_info(epoch, step, duration, wl)
                    print(s)
                    f.write(s + '\n')
Ejemplo n.º 12
0
from utils.dataset import Dataset
from utils import utils
from advfaces import AdvFaces

import os
import scipy.misc

# Load Adversarial Face Generator
network = AdvFaces()
network.load_model('pretrained/obfuscation')

## Load images
# Images can be loaded via
# 1. Image Filelist
# dataset = Dataset('image_list.txt')

# 2. Folder of images
dataset = Dataset('data')

# Load config and images
config = utils.import_file('config/default.py', 'config')
images = preprocess(dataset.images, config, is_training=False)

# Generate Adversarial Images and Adversarial Masks
adversaries, adversaries_mask = network.generate_images(images)
for i, adv in enumerate(adversaries):
    adversaries[i] = images[i] + adversaries_mask[i]

# Save adversarial image
scipy.misc.imsave('results/result.jpg', adversaries[0])
Ejemplo n.º 13
0
def preprocess_t(images):
    import configfig.resface64_msarcface as config_g
    images = preprocess(images, config_g, True)
    return images
Ejemplo n.º 14
0
def main(args):
    config_file = args.config_file
    # I/O
    config = utils.import_file(config_file, "config")

    trainset = Dataset(config.train_dataset_path, config.mode)
    testset = Dataset(config.test_dataset_path, config.mode)

    network = AdvFaces()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)

    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)
    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_size,
                               batch_format=config.batch_format,
                               proc_func=proc_func)

    #
    # Main Loop
    #
    print("\nStart Training\n# epochs: %d\nepoch_size: %d\nbatch_size: %d\n" %
          (config.num_epochs, config.epoch_size, config.batch_size))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        if epoch == 0:
            print("Loading Test Set")
            originals = preprocess(testset.images, config, is_training=False)
            targets = preprocess(testset.targets, config, False)
            print('Done loading test set')
            test_images = np.squeeze(
                originals[np.where(testset.labels < 5)[0]])
            target_feats = network.aux_matcher_extract_feature(targets)
            output_dir = os.path.join(log_dir, "samples")
            if not os.path.isdir(output_dir):
                os.makedirs(output_dir)
            utils.save_manifold(test_images,
                                os.path.join(output_dir, "original.jpg"))
            print("Computing initial success rates..")
            success_rate(network, config, originals, targets, target_feats,
                         log_dir, global_step)
            print("testing.")
            test(
                network,
                config,
                test_images,
                targets,
                log_dir,
                global_step,
            )

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(
                global_step, config)
            batch = trainset.pop_batch_queue()
            wl, sm, global_step = network.train(
                batch["images"],
                batch["targets"],
                batch["labels"],
                learning_rate,
                config.keep_prob,
                trainset.num_classes,
            )
            wl["lr"] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Computing success rate
        success_rate(network, config, originals, targets, target_feats,
                     log_dir, global_step)

        # Testing
        test(
            network,
            config,
            test_images,
            targets,
            log_dir,
            global_step,
        )

        # Save the model
        network.save_model(log_dir, global_step)