Exemple #1
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
    batch_size = args.batch_size
    print('#####', args.model, args.output_root)
    gpuid = 0
    import torch, torch.utils.data

    model_prefix, epoch = args.model.split(',')
    sys.path.insert(0, args.code)
    from config import conf
    conf.need_log = False
    from Learner import face_learner, FaceInfer
    learner = FaceInfer(conf, (gpuid, ))
    learner.load_state(resume_path=model_prefix, )
    learner.model.eval()

    loader = torch.utils.data.DataLoader(TestDataset(args),
                                         batch_size=batch_size,
                                         num_workers=12,
                                         shuffle=False,
                                         pin_memory=True,
                                         drop_last=False)
    bin_filename = os.path.join(
        args.images_list.split('/')[-2],
        args.images_list.split('/')[-1].split('.')[0] + '.bin')
    if args.use_torch:
        model_name = model_prefix.strip('/').split('/')[-2]
    else:
        model_name = os.path.basename(model_prefix)
    if args.model_name is not None:
        model_name = args.model_name
    dump_path = os.path.join(args.output_root, model_name, bin_filename)
    print('###### features will be dumped to:%s' % dump_path)
    dirname = os.path.dirname(dump_path)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    dump = open(dump_path, 'wb')

    for batch in loader:
        import lz, torch
        dev = torch.device(f'cuda:{gpuid}')
        batch = batch - 127.5
        batch = batch / 127.5
        with torch.no_grad():
            embs = learner.model(lz.to_torch(batch).to(dev)).cpu().numpy()
            if flip_test:
                embs += learner.model(
                    lz.to_torch(
                        batch[..., ::-1].copy()).to(dev)).cpu().numpy()
        # from IPython import embed;  embed()
        embs = sklearn.preprocessing.normalize(embs)

        for k in range(embs.shape[0]):
            dump.write(embs[k].astype(np.float32))
            dump.flush()
    dump.flush()
    dump.close()
Exemple #2
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
    batch_size = args.batch_size

    print('#####', args.model, args.output_root)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    for_test = np.array(['1231', '213213'], dtype='str')
    test_ims = tf.placeholder(for_test.dtype, [None])

    def input_parser2(filename):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_jpeg(image_string,
                                             dct_method="INTEGER_ACCURATE")
        image = tf.cast(image_decoded, tf.float32)
        image = tf.transpose(image, perm=[2, 0, 1])
        if preprocess_img:
            image = image - 127.5
            image = image / 128.
        return image

    test_data = tf.data.Dataset.from_tensor_slices((test_ims))
    test_data = test_data.map(input_parser2, num_parallel_calls=48)
    test_data = test_data.prefetch(batch_size * 100)
    test_data = test_data.batch(batch_size)
    iterator2 = test_data.make_initializable_iterator()
    next_element2 = iterator2.get_next()
    sess.run(iterator2.initializer, feed_dict={test_ims: for_test})
    if not args.use_torch:
        gpuid = 0
        ctx = mx.gpu(gpuid)
        net = edict()
        net.ctx = ctx
        model_prefix, epoch = args.model.split(',')
        epoch = int(epoch)
        print('loading %s %d' % (model_prefix, epoch))
        net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(
            model_prefix, epoch)
        net.sym = net.sym.get_internals()['fc1_output']
        net.model = mx.mod.Module(symbol=net.sym,
                                  context=net.ctx,
                                  label_names=None)
        net.model.bind(for_training=False,
                       data_shapes=[('data', (batch_size, 3, 112, 112))])
        net.model.set_params(net.arg_params, net.aux_params)
    else:
        model_prefix, epoch = args.model.split(',')
        sys.path.insert(0, os.environ['HOME'] + '/prj/InsightFace_Pytorch/')
        from config import conf
        gpuid = 0
        conf.ipabn = False
        conf.need_log = False
        from Learner import face_learner, FaceInfer
        learner = FaceInfer(conf, gpuid)
        learner.load_state(resume_path=model_prefix, )

    #     data = mx.nd.array(np.random.normal(size=(batch_size,3,112,112)))
    #     db = mx.io.DataBatch(data=(data,))
    #     net.model.forward(db, is_train=False)

    spisok = open(args.images_list).read().split('\n')[:-1]
    for i in range(len(spisok)):
        spisok[i] = args.prefix + spisok[i].split(' ')[0]

    for_test = np.array(spisok, dtype='str')
    sess.run(iterator2.initializer, feed_dict={test_ims: for_test})
    bin_filename = os.path.join(
        args.images_list.split('/')[-2],
        args.images_list.split('/')[-1].split('.')[0] + '.bin')
    #     embed()
    if args.use_torch:
        model_name = model_prefix.strip('/').split('/')[-2]
    else:
        model_name = os.path.basename(model_prefix)
    dump_path = os.path.join(args.output_root, model_name, bin_filename)
    print('###### features will be dumped to:%s' % dump_path)
    dirname = os.path.dirname(dump_path)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    dump = open(dump_path, 'wb')

    for i in tqdm(range(int(np.ceil(len(spisok) / float(batch_size))))):
        batch = sess.run(next_element2)
        data = mx.nd.array(batch)

        # im = np.transpose(batch[0],(1,2,0))
        # cv2.imshow('x',im.astype(np.uint8))
        # cv2.waitKey(0)
        db = mx.io.DataBatch(data=(data, ))

        if not args.use_torch:
            net.model.forward(db, is_train=False)
            embs = net.model.get_outputs()[0].asnumpy()

            if flip_test:
                batch = batch[:, :, :, ::-1]
                data = mx.nd.array(batch)
                db = mx.io.DataBatch(data=(data, ))
                net.model.forward(db, is_train=False)
                embs += net.model.get_outputs()[0].asnumpy()
        else:
            import lz, torch
            dev = torch.device(f'cuda:{gpuid}')
            # batch = test_transform(batch)
            # from IPython import embed; embed()
            batch = batch - 127.5
            batch = batch / 127.5
            with torch.no_grad():
                embs = learner.model(lz.to_torch(batch).to(dev)).cpu().numpy()
                if flip_test:
                    embs += learner.model(
                        lz.to_torch(
                            batch[..., ::-1].copy()).to(dev)).cpu().numpy()

        embs = sklearn.preprocessing.normalize(embs)

        for k in range(embs.shape[0]):
            dump.write(embs[k].astype(np.float32))

    dump.close()
Exemple #3
0
conf.use_chkpnt = False
learner = FaceInfer(conf, )
learner.load_state(
    # resume_path='work_space/emore.r152.cont/save/',
    resume_path='work_space/asia.emore.r50.5/save/',
    latest=True,
)
learner.model.eval()
from sklearn.preprocessing import normalize

for ind, data in enumerate(loader):
    if (data['finish'] == 1).all().item():
        logging.info('finish')
        break
    # if ind % 10 == 0:
    print(f'proc batch {ind}')
    img = data['img'].cuda()
    img_flip = data['img_flip'].cuda()
    imgfn = data['imgfn']
    with torch.no_grad():
        fea = learner.model(img)
        fea_mirror = learner.model(img_flip)
        fea += fea_mirror
        fea = fea.cpu().numpy()
    fea = normalize(fea, axis=1)
    for imgfn_, fea_ in zip(imgfn, fea):
        feafn_ = imgfn_.replace(root_folder_name + '_OPPOFaces', root_folder_name + '_OPPOFeatures') + '_OPPO.bin'
        dst_folder = osp.dirname(feafn_)
        lz.mkdir_p(dst_folder, delete=False)
        save_mat(feafn_, fea_)