Exemple #1
0
def embedding_generator(gpu_id, q_in, q_out):
    gpuid = gpu_id
    # from IPython import embed; embed()
    mod = get_mod(gpu_id)
    # from torchvision import transforms as trans
    # test_transform = trans.Compose([
    #  trans.ToTensor(),
    #  trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    #  ])
    while True:
        data = q_in.get(block=True)
        if data is None:
            break
        imlist = []
        imnamelist = []
        for impath in data:
            im = cv2.imread(impath).astype(np.float32)
            if preprocess_img:
                im = im - 127.5
                im = im / 128.
            imlist.append(im[:, :, ::-1])  # RGB
            imnamelist.append(impath.replace(args.prefix, ''))
        batch = np.array(imlist).transpose((0, 3, 1, 2))  # RGB

        if not use_torch:
            data = mx.nd.array(batch)
            db = mx.io.DataBatch(data=(data, ))
            # from IPython import embed; embed()
            mod.forward(db, is_train=False)
            embs = mod.get_outputs()[0].asnumpy()
            if flip_test:
                batch = batch[:, :, :, ::-1]
                data = mx.nd.array(batch)
                db = mx.io.DataBatch(data=(data, ))
                mod.forward(db, is_train=False)
                embs += mod.get_outputs()[0].asnumpy()
        else:
            import lz, torch
            # batch = test_transform(batch)
            #           embed()
            batch = batch - 127.5
            batch = batch / 127.5
            dev = torch.device(f'cuda:{gpuid}')
            #             lz.plt_imshow(batch[0])
            #             lz.plt.savefig('/tmp/t.png')
            with torch.no_grad():
                embs = mod.model(lz.to_torch(batch).to(dev)).cpu().numpy()
                if flip_test:
                    embs += mod.model(
                        lz.to_torch(
                            batch[..., ::-1].copy()).to(dev)).cpu().numpy()

        #         for j in range(embs.shape[0]):
        #             embs[j] = embs[j]/np.sqrt(np.sum(embs[j]**2))
        import sklearn, sklearn.preprocessing
        embs = sklearn.preprocessing.normalize(embs)

        q_out.put((embs, imnamelist))
def embedding_generator(gpu_id, q_in, q_out):
    if args.use_torch:
        import torch
        print('-->', torch.cuda.device_count(), gpu_id)
        torch.cuda.set_device(gpu_id)
    mod = get_mod(gpu_id)

    while True:
        data = q_in.get(block=True)
        if data is None:
            break
        imlist = []
        imnamelist = []
        for impath in data:
            assert os.path.exists(impath), impath
            im = cv2.imread(impath).astype(np.float32)
            if preprocess_img:
                im = im - 127.5
                im = im / 128.
            imlist.append(im[:, :, ::-1])  #RGB
            imnamelist.append(impath.replace(args.prefix, ''))
        batch = np.array(imlist).transpose((0, 3, 1, 2))  #RGB

        if not args.use_torch:
            import mxnet as mx
            from mxnet import ndarray as nd
            data = mx.nd.array(batch)
            db = mx.io.DataBatch(data=(data, ))
            mod.forward(db, is_train=False)
            embs = mod.get_outputs()[0].asnumpy()
            if flip_test:
                batch = batch[:, :, :, ::-1]
                data = mx.nd.array(batch)
                db = mx.io.DataBatch(data=(data, ))
                mod.forward(db, is_train=False)
                embs += mod.get_outputs()[0].asnumpy()
        else:
            import lz, torch
            #batch = test_transform(batch)
            batch = batch - 127.5
            batch = batch / 127.5
            dev = torch.device(f'cuda:{gpu_id}')
            #             lz.plt_imshow(batch[0])
            #             lz.plt.savefig('/tmp/t.png')
            with torch.no_grad():
                embs = mod.model(lz.to_torch(batch).to(dev)).cpu().numpy()
                if flip_test:
                    embs += mod.model(
                        lz.to_torch(
                            batch[..., ::-1].copy()).to(dev)).cpu().numpy()


#         for j in range(embs.shape[0]):
#             embs[j] = embs[j]/np.sqrt(np.sum(embs[j]**2))
        import sklearn, sklearn.preprocessing
        embs = sklearn.preprocessing.normalize(embs)
        # from IPython import embed; embed()
        q_out.put((embs, imnamelist))
Exemple #3
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
    batch_size = args.batch_size
    print('#####', args.model, args.output_root)
    gpuid = 0
    import torch, torch.utils.data

    model_prefix, epoch = args.model.split(',')
    sys.path.insert(0, args.code)
    from config import conf
    conf.need_log = False
    from Learner import face_learner, FaceInfer
    learner = FaceInfer(conf, (gpuid, ))
    learner.load_state(resume_path=model_prefix, )
    learner.model.eval()

    loader = torch.utils.data.DataLoader(TestDataset(args),
                                         batch_size=batch_size,
                                         num_workers=12,
                                         shuffle=False,
                                         pin_memory=True,
                                         drop_last=False)
    bin_filename = os.path.join(
        args.images_list.split('/')[-2],
        args.images_list.split('/')[-1].split('.')[0] + '.bin')
    if args.use_torch:
        model_name = model_prefix.strip('/').split('/')[-2]
    else:
        model_name = os.path.basename(model_prefix)
    if args.model_name is not None:
        model_name = args.model_name
    dump_path = os.path.join(args.output_root, model_name, bin_filename)
    print('###### features will be dumped to:%s' % dump_path)
    dirname = os.path.dirname(dump_path)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    dump = open(dump_path, 'wb')

    for batch in loader:
        import lz, torch
        dev = torch.device(f'cuda:{gpuid}')
        batch = batch - 127.5
        batch = batch / 127.5
        with torch.no_grad():
            embs = learner.model(lz.to_torch(batch).to(dev)).cpu().numpy()
            if flip_test:
                embs += learner.model(
                    lz.to_torch(
                        batch[..., ::-1].copy()).to(dev)).cpu().numpy()
        # from IPython import embed;  embed()
        embs = sklearn.preprocessing.normalize(embs)

        for k in range(embs.shape[0]):
            dump.write(embs[k].astype(np.float32))
            dump.flush()
    dump.flush()
    dump.close()
 def __getitem__(self, item):
     img_index = item
     each_line = files[img_index]
     name_lmk_score = each_line.strip().split(' ')
     faceness_score = float(name_lmk_score[-1])
     if self.env is None:
         img_name = os.path.join(img_path, name_lmk_score[0])
         img = cvb.read_img(img_name)
         img = cvb.bgr2rgb(img)  # this is RGB
     else:
         with self.env.begin(write=False) as txn:
             imgbuf = txn.get(str(item).encode())
         buf = six.BytesIO()
         buf.write(imgbuf)
         buf.seek(0)
         f = Image.open(buf)
         img2 = f.convert('RGB')
         img2 = np.asarray(img2)
         img = img2
     assert img is not None, img_name
     lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
                    dtype=np.float32)
     lmk = lmk.reshape((5, 2))
     warp_img = preprocess(img, landmark=lmk)
     # cvb.write_img(warp_img, f'/share/data/aligned/{name_lmk_score[0]}', )
     warp_img = to_image(warp_img)
     if not use_mxnet:
         img = self.test_transform(warp_img)
     else:
         img = np.array(np.transpose(warp_img, (2, 0, 1)))
         img = lz.to_torch(img).float()
     return img, faceness_score, item, name_lmk_score[0]
def get_feature(imgs, nets):
    count = len(imgs)
    if not use_mxnet:
        data = torch.zeros((count * 2, 3, imgs[0].shape[0], imgs[0].shape[1]))
        for idx, img in enumerate(imgs):
            img = img[:, :, ::-1].copy()  # to rgb
            img = test_transform(img).numpy()
            for flipid in [0, 1]:
                _img = np.copy(img)
                if flipid == 1:
                    _img = _img[:, :, ::-1].copy()
                _img = lz.to_torch(_img)
                data[count * flipid + idx] = _img
        F = []
        for net in nets:
            with torch.no_grad():
                x = net.model(data).cpu().numpy()
                embedding = x[0:count, :] + x[count:, :]
                embedding = sklearn.preprocessing.normalize(embedding)
                F.append(embedding)
        F = np.concatenate(F, axis=1)
        F = sklearn.preprocessing.normalize(F)
    else:
        data = mx.nd.zeros(shape=(count * 2, 3, imgs[0].shape[0],
                                  imgs[0].shape[1]))
        for idx, img in enumerate(imgs):
            img = img[:, :, ::-1]  # to rgb
            img = np.transpose(img, (2, 0, 1))
            for flipid in [0, 1]:
                _img = np.copy(img)
                if flipid == 1:
                    _img = _img[:, :, ::-1]
                _img = nd.array(_img)
                data[count * flipid + idx] = _img

        F = []
        for net in nets:
            db = mx.io.DataBatch(data=(data, ))
            net.model.forward(db, is_train=False)
            x = net.model.get_outputs()[0].asnumpy()
            embedding = x[0:count, :] + x[count:, :]
            embedding = sklearn.preprocessing.normalize(embedding)
            # print('emb', embedding.shape)
            F.append(embedding)
        F = np.concatenate(F, axis=1)
        F = sklearn.preprocessing.normalize(F)
        # print('F', F.shape)
    return F
Exemple #6
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
    batch_size = args.batch_size

    print('#####', args.model, args.output_root)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    for_test = np.array(['1231', '213213'], dtype='str')
    test_ims = tf.placeholder(for_test.dtype, [None])

    def input_parser2(filename):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_jpeg(image_string,
                                             dct_method="INTEGER_ACCURATE")
        image = tf.cast(image_decoded, tf.float32)
        image = tf.transpose(image, perm=[2, 0, 1])
        if preprocess_img:
            image = image - 127.5
            image = image / 128.
        return image

    test_data = tf.data.Dataset.from_tensor_slices((test_ims))
    test_data = test_data.map(input_parser2, num_parallel_calls=48)
    test_data = test_data.prefetch(batch_size * 100)
    test_data = test_data.batch(batch_size)
    iterator2 = test_data.make_initializable_iterator()
    next_element2 = iterator2.get_next()
    sess.run(iterator2.initializer, feed_dict={test_ims: for_test})
    if not args.use_torch:
        gpuid = 0
        ctx = mx.gpu(gpuid)
        net = edict()
        net.ctx = ctx
        model_prefix, epoch = args.model.split(',')
        epoch = int(epoch)
        print('loading %s %d' % (model_prefix, epoch))
        net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(
            model_prefix, epoch)
        net.sym = net.sym.get_internals()['fc1_output']
        net.model = mx.mod.Module(symbol=net.sym,
                                  context=net.ctx,
                                  label_names=None)
        net.model.bind(for_training=False,
                       data_shapes=[('data', (batch_size, 3, 112, 112))])
        net.model.set_params(net.arg_params, net.aux_params)
    else:
        model_prefix, epoch = args.model.split(',')
        sys.path.insert(0, os.environ['HOME'] + '/prj/InsightFace_Pytorch/')
        from config import conf
        gpuid = 0
        conf.ipabn = False
        conf.need_log = False
        from Learner import face_learner, FaceInfer
        learner = FaceInfer(conf, gpuid)
        learner.load_state(resume_path=model_prefix, )

    #     data = mx.nd.array(np.random.normal(size=(batch_size,3,112,112)))
    #     db = mx.io.DataBatch(data=(data,))
    #     net.model.forward(db, is_train=False)

    spisok = open(args.images_list).read().split('\n')[:-1]
    for i in range(len(spisok)):
        spisok[i] = args.prefix + spisok[i].split(' ')[0]

    for_test = np.array(spisok, dtype='str')
    sess.run(iterator2.initializer, feed_dict={test_ims: for_test})
    bin_filename = os.path.join(
        args.images_list.split('/')[-2],
        args.images_list.split('/')[-1].split('.')[0] + '.bin')
    #     embed()
    if args.use_torch:
        model_name = model_prefix.strip('/').split('/')[-2]
    else:
        model_name = os.path.basename(model_prefix)
    dump_path = os.path.join(args.output_root, model_name, bin_filename)
    print('###### features will be dumped to:%s' % dump_path)
    dirname = os.path.dirname(dump_path)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    dump = open(dump_path, 'wb')

    for i in tqdm(range(int(np.ceil(len(spisok) / float(batch_size))))):
        batch = sess.run(next_element2)
        data = mx.nd.array(batch)

        # im = np.transpose(batch[0],(1,2,0))
        # cv2.imshow('x',im.astype(np.uint8))
        # cv2.waitKey(0)
        db = mx.io.DataBatch(data=(data, ))

        if not args.use_torch:
            net.model.forward(db, is_train=False)
            embs = net.model.get_outputs()[0].asnumpy()

            if flip_test:
                batch = batch[:, :, :, ::-1]
                data = mx.nd.array(batch)
                db = mx.io.DataBatch(data=(data, ))
                net.model.forward(db, is_train=False)
                embs += net.model.get_outputs()[0].asnumpy()
        else:
            import lz, torch
            dev = torch.device(f'cuda:{gpuid}')
            # batch = test_transform(batch)
            # from IPython import embed; embed()
            batch = batch - 127.5
            batch = batch / 127.5
            with torch.no_grad():
                embs = learner.model(lz.to_torch(batch).to(dev)).cpu().numpy()
                if flip_test:
                    embs += learner.model(
                        lz.to_torch(
                            batch[..., ::-1].copy()).to(dev)).cpu().numpy()

        embs = sklearn.preprocessing.normalize(embs)

        for k in range(embs.shape[0]):
            dump.write(embs[k].astype(np.float32))

    dump.close()