Ejemplo n.º 1
0
def feature_patches(img_files, minu_files, output_path):

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)

    n = 0
    for minu_file, img_file in zip(minu_files, img_files):
        img = cv2.imread(img_file)
        #cv2.imshow("latent", latent_img)
        #cv2.imshow("rolled", rolled_img)

        minutiae = np.loadtxt(minu_file, dtype='f', delimiter=',')
        minutiae[:, 2] = minutiae[:, 2] - math.pi / 2
        #show_features(latent_img, latent_minutiae, ROI=None, fname=None)
        minutiae[:, 2] = -minutiae[:, 2]

        patches = descriptor.extract_patches(minutiae,
                                             img,
                                             patchIndexV,
                                             patch_type=6)

        for j in range(len(patches)):

            fname = "%05d" % n + '.jpeg'
            n = n + 1
            cv2.imwrite(output_path + fname, patches[j])
Ejemplo n.º 2
0
def main_new_minutiae(args,
                      patch_types=None,
                      model_dirs=None,
                      rolled_range=None):
    minutiae_path = args.minutiae_path
    img_path = args.img_path
    new_template_path = args.new_template_path
    mask_path = args.mask_path
    template_path = args.template_path

    if not os.path.exists(new_template_path):
        os.makedirs(new_template_path)
    # for latents
    isLatent = (args.image_type == 'latent')
    if isLatent:
        minutiae_files = []
        for i in range(len(minutiae_path)):
            minutiae_files.append(glob.glob(minutiae_path[i] + '*.txt'))
            minutiae_files[-1].sort()
        img_files = glob.glob(img_path + '*.bmp')
        img_files.sort()
        mask_files = glob.glob(mask_path + '*.bmp')
        mask_files.sort()

        template_files = glob.glob(template_path + '*.dat')
        template_files.sort()

    else:
        template_files = [
            str(i + 1) + '.dat'
            for i in range(rolled_range[0], rolled_range[1])
        ]
    assert (len(minutiae_files) > 0)

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)

    assert (len(patch_types) == len(model_dirs))

    models = []
    for model_dir in model_dirs:
        models.append(ImportGraph(model_dir))

    #for template in template.minu_template:
    batch_size = args.batch_size
    nrof_imgs = len(img_files)
    nrof_minutiae_set = len(minutiae_files)
    for print_ind in range(nrof_imgs):
        # minutiae templates
        img_file = img_files[print_ind]
        img = cv2.imread(img_file)  # cv2.IMREAD_GRAYSCALE
        img = img.astype(float)
        #mask = cv2.imread(mask_files[print_ind], cv2.IMREAD_GRAYSCALE)

        template_file = template_files[print_ind]
        template = Template.Bin2Template_Byte(template_file, isLatent=isLatent)

        for n in range(nrof_minutiae_set):
            minutiae = np.loadtxt(minutiae_files[n][print_ind])
            nrof_minutiae = len(minutiae)
            mask = template.minu_template[0].mask
            #show_minutiae_sets(img,[minutiae],ROI=None)
            #plt.imshow(patch, cmap='gray')
            #plt.show()
            #remove minutiae in the background
            h, w = mask.shape
            flag = np.ones((nrof_minutiae, ), dtype=bool)
            for i in range(nrof_minutiae):
                x = int(minutiae[i, 0])
                y = int(minutiae[i, 1])
                if y < 10 or x < 10 or x > w - 10 or y > h - 10:
                    flag[i] = False
                elif np.sum(mask[y - 1:y + 2, x - 1:x + 2]) == 0:
                    flag[i] = False
            minutiae = minutiae[flag, :]
            if len(minutiae) < 3:
                print(len(minutiae))
            #show_minutiae_sets(img,[minutiae], ROI=None, fname=None, block=True)
            template.minu_template[n].des = []
            template.minu_template[n].minutiae = minutiae
            for k, patch_type in enumerate(patch_types):
                embedding_size = models[k].embedding_size
                patches = descriptor.extract_patches(minutiae,
                                                     img,
                                                     patchIndexV,
                                                     patch_type=patch_type)
                # for i in range(len(patches)):
                #     patch = patches[i, :, :, 0]
                #     plt.imshow(patch, cmap='gray')
                #     plt.show()
                nrof_patches = len(patches)
                emb_array = np.zeros((nrof_patches, embedding_size))
                nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size))
                for i in range(nrof_batches):
                    #print(i)
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_patches)
                    patches_batch = patches[start_index:end_index, :, :]
                    emb_array[start_index:end_index, :] = models[k].run(
                        patches_batch)
                for i in range(nrof_patches):
                    norm = np.linalg.norm(emb_array[i, :]) + 0.0000001
                    emb_array[i, :] = emb_array[i, :] / norm
                template.minu_template[n].des.append(emb_array)
        for n, t in enumerate(template.texture_template):
            template.texture_template[n].minutiae = []
            #minutiae = t.minutiae
            minutiae = None

            template.texture_template[n].des = []
            continue

        fname = new_template_path + os.path.basename(template_file)
        Template.Template2Bin_Byte_TF(fname, template, isLatent=isLatent)
Ejemplo n.º 3
0
def main(args, patch_types=None, model_dirs=None, rolled_range=None):
    template_path = args.template_path
    img_path = args.img_path
    new_template_path = args.new_template_path

    if not os.path.exists(new_template_path):
        os.makedirs(new_template_path)
    # for latents
    isLatent = (args.image_type == 'latent')
    if isLatent:
        template_files = os.listdir(template_path)
        template_files.sort()
    else:
        template_files = [
            str(i + 1) + '.dat'
            for i in range(rolled_range[0], rolled_range[1])
        ]
    assert (len(template_files) > 0)

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)

    assert (len(patch_types) == len(model_dirs))

    models = []
    for model_dir in model_dirs:
        models.append(ImportGraph(model_dir))

    #for template in template.minu_template:
    batch_size = args.batch_size
    for print_ind, file in enumerate(template_files):
        print(print_ind)
        template = Template.Bin2Template_Byte(template_path + file,
                                              isLatent=isLatent)
        if template is None:
            continue
        # minutiae templates
        img_file = img_path + file.split('.')[0] + '.bmp'
        img = cv2.imread(img_file)  # cv2.IMREAD_GRAYSCALE
        img = img.astype(float)
        for n, t in enumerate(template.minu_template):
            minutiae = t.minutiae
            template.minu_template[n].des = []
            for k, patch_type in enumerate(patch_types):
                embedding_size = models[k].embedding_size
                patches = descriptor.extract_patches(minutiae,
                                                     img,
                                                     patchIndexV,
                                                     patch_type=patch_type)
                # for i in range(len(patches)):
                #     patch = patches[i, :, :, 0]
                #     plt.imshow(patch, cmap='gray')
                #     plt.show()
                nrof_patches = len(patches)
                emb_array = np.zeros((nrof_patches, embedding_size))
                nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size))
                for i in range(nrof_batches):
                    #print(i)
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_patches)
                    patches_batch = patches[start_index:end_index, :, :]
                    emb_array[start_index:end_index, :] = models[k].run(
                        patches_batch)
                for i in range(nrof_patches):
                    norm = np.linalg.norm(emb_array[i, :]) + 0.0000001
                    emb_array[i, :] = emb_array[i, :] / norm
                template.minu_template[n].des.append(emb_array)
        for n, t in enumerate(template.texture_template):
            template.texture_template[n].minutiae = []
            #minutiae = t.minutiae
            minutiae = None

            template.texture_template[n].des = []
            continue

            for k, patch_type in enumerate(patch_types):
                embedding_size = models[k].embedding_size
                patches = descriptor.extract_patches(minutiae,
                                                     img,
                                                     patchIndexV,
                                                     patch_type=patch_type)
                nrof_patches = len(patches)
                emb_array = np.zeros((nrof_patches, embedding_size))
                nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size))
                for i in range(nrof_batches):
                    #print(i)
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_patches)
                    patches_batch = patches[start_index:end_index, :, :]
                    emb_array[start_index:end_index, :] = models[k].run(
                        patches_batch)
                for i in range(nrof_patches):
                    norm = np.linalg.norm(emb_array[i, :]) + 0.0000001
                    emb_array[i, :] = emb_array[i, :] / norm
                #template.texture_template[n].des[patch_type] = emb_array
                template.texture_template[n].des.append(emb_array)
        fname = new_template_path + file
        Template.Template2Bin_Byte_TF(fname, template, isLatent=isLatent)
Ejemplo n.º 4
0
def main_single(args):
    template_path = args.template_path
    img_path = args.img_path
    new_template_path = args.new_template_path
    # for latents
    if args.image_type == 'latent':
        template_files = os.listdir(template_path)
        template_files.sort()
        isLatent = 1
    assert (len(template_files) > 0)

    batch_size = args.batch_size
    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)

    #for template in template.minu_template:
    #    a = 1

    batch_size = args.batch_size
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Get the paths for the corresponding images
            # paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_dir), pairs, args.lfw_file_ext)
            # Load the model
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = facenet.get_model_filenames(
                os.path.expanduser(args.model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)
            facenet.load_model(args.model_dir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "batch_join:0")
            # embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            # images_placeholder = tf.get_default_graph4().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("Add:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on testing images')

            for file in template_files:
                template = Template.Bin2Template_Byte(template_path + file,
                                                      isLatent=isLatent)
                # minutiae templates
                img_file = img_path + file.split('.')[0] + '.bmp'
                img = cv2.imread(img_file)  # cv2.IMREAD_GRAYSCALE
                img = img.astype(float)
                for t in template.minu_template:
                    minutiae = t.minutiae
                    patches = descriptor.extract_patches(minutiae,
                                                         img,
                                                         patchIndexV,
                                                         patch_type=6)
                    nrof_patches = len(patches)
                    emb_array = np.zeros((nrof_patches, embedding_size))
                    nrof_batches = int(
                        math.ceil(1.0 * nrof_patches / batch_size))
                    for i in range(nrof_batches):
                        print(i)
                        start_index = i * batch_size
                        end_index = min((i + 1) * batch_size, nrof_patches)
                        patches_batch = patches[start_index:end_index, :, :]
                        feed_dict = {
                            images_placeholder: patches_batch,
                            phase_train_placeholder: False
                        }
                        emb_array[start_index:end_index, :] = sess.run(
                            embeddings, feed_dict=feed_dict)
                    for i in range(nrof_patches):
                        norm = np.linalg.norm(emb_array[i, :]) + 0.0000001
                        emb_array[i, :] = emb_array[i, :] / norm
                        print(i)
Ejemplo n.º 5
0
def feature_extraction(img_files,
                       minu_files,
                       model_dir,
                       output_file,
                       patch_type=6):

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Get the paths for the corresponding images
            # paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_dir), pairs, args.lfw_file_ext)
            # Load the model
            print('Model directory: %s' % model_dir)
            meta_file, ckpt_file = facenet.get_model_filenames(
                os.path.expanduser(model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)
            facenet.load_model(model_dir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "batch_join:0")
            # embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            # images_placeholder = tf.get_default_graph4().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("Add:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]
            features = np.array([],
                                dtype=np.float32).reshape(0, embedding_size)
            for minu_file, img_file in zip(minu_files, img_files):
                img = cv2.imread(img_file)
                #cv2.imshow("latent", latent_img)
                #cv2.imshow("rolled", rolled_img)

                minutiae = np.loadtxt(minu_file, dtype='f', delimiter=',')
                minutiae[:, 2] = minutiae[:, 2] - math.pi / 2
                #show_features(latent_img, latent_minutiae, ROI=None, fname=None)
                #minutiae[:, 2] = -minutiae[:, 2]

                patches = descriptor.extract_patches(minutiae,
                                                     img,
                                                     patchIndexV,
                                                     patch_type=patch_type)
                # for i in range(len(patches)):
                #     patch = patches[i, :, :, 0]
                #     plt.imshow(patch, cmap='gray')
                #     plt.show()

                feed_dict = {
                    images_placeholder: patches,
                    phase_train_placeholder: False
                }
                latent_emb = sess.run(embeddings, feed_dict=feed_dict)

                features = np.vstack([features, latent_emb])

                #cv2.imshow("latent patch", latent_patches[0]/255)
                #cv2.imshow("rolled patch", rolled_patches[0]/255)

                #cv2.waitKey(0)
            np.save(output_file, features)