Beispiel #1
0
def compare_AEM_with_MMM(model_path, img_path='/Data/Latent/NISTSD27/image/',
                         output_path='/AutomatedLatentRecognition/Results/minutiae/NISTSD27_latents_Contrast/',
                         minu_path='/Data/Latent/NISTSD27/ManMinu', processing=None, thr=0.01):
    minu_model = ImportGraph(model_path)

    img_files = glob.glob(img_path + '*.bmp')
    img_files.sort()

    manu_files = glob.glob(minu_path + '*.txt')
    manu_files.sort()
    for i, img_file in enumerate(img_files):
        img = misc.imread(img_file, mode='L')
        if processing == 'contrast':
            img = LP.local_constrast_enhancement(img)
        elif processing == 'STFT':
            img = LP.STFT(img)
        elif processing == 'texture':
            img = LP.FastCartoonTexture(img)
        elif processing == 'texture_STFT':
            img = LP.FastCartoonTexture(img)
            img = LP.STFT(img)
        mnt = minu_model.run_whole_image(img, minu_thr=thr)
        img_name = os.path.basename(img_file)
        fname = output_path + os.path.splitext(img_name)[0] + '_minu.jpeg'

        minutiae_set = []
        minutiae_set.append(mnt)

        input_minu = np.loadtxt(manu_files[i])
        input_minu[:, 2] = input_minu[:, 2] / 180.0 * np.pi
        minutiae_set.append(input_minu)
        print i
        show.show_minutiae_sets(img, minutiae_set, mask=None, block=False, fname=fname)
        print fname
Beispiel #2
0
def get_maps_STFT(img, patch_size=64, block_size=16, preprocess=False):
    assert len(img.shape) == 2

    nrof_dirs = 16
    ovp_size = (patch_size - block_size) // 2
    if preprocess:
        img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)
    h0, w0 = img.shape
    img = np.lib.pad(img, (ovp_size, ovp_size), 'symmetric')

    h, w = img.shape
    blkH = (h - patch_size) // block_size + 1
    blkW = (w - patch_size) // block_size + 1
    local_info = np.empty((blkH, blkW), dtype=object)

    x, y = np.meshgrid(range(-patch_size / 2, patch_size / 2),
                       range(-patch_size / 2, patch_size / 2))
    x = x.astype(np.float32)
    y = y.astype(np.float32)
    r = np.sqrt(x * x + y * y) + 0.0001

    # if preprocess:
    # -------------------------
    # Bandpass filter
    # -------------------------
    RMIN = 3  # min allowable ridge spacing
    RMAX = 18  # maximum allowable ridge spacing
    FLOW = patch_size / RMAX
    FHIGH = patch_size / RMIN
    dRLow = 1. / (1 + (r / FHIGH)**4)  # low pass     butterworth     filter
    dRHigh = 1. / (1 +
                   (FLOW / r)**4)  # high    pass     butterworth     filter
    dBPass = dRLow * dRHigh  # bandpass

    dir = np.arctan2(y, x)
    dir[dir < 0] = dir[dir < 0] + math.pi
    dir_ind = np.floor(dir / (math.pi / nrof_dirs))
    dir_ind = dir_ind.astype(np.int, copy=False)
    dir_ind[dir_ind == nrof_dirs] = 0

    dir_ind_list = []
    for i in range(nrof_dirs):
        tmp = np.argwhere(dir_ind == i)
        dir_ind_list.append(tmp)

    sigma = patch_size / 3
    weight = np.exp(-(x * x + y * y) / (sigma * sigma))

    for i in range(0, blkH):
        for j in range(0, blkW):
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size].copy()
            local_info[i, j] = local_STFT(patch, weight, dBPass)
            local_info[i, j].analysis(r, dir_ind_list)

    # get the ridge flow from the local information
    dir_map, fre_map = get_ridge_flow_top(local_info)
    dir_map = smooth_dir_map(dir_map)

    return dir_map, fre_map
    def feature_extraction_longitudinal(self, img_file):
        block_size = 16

        img = io.imread(img_file)
        #print img.shape
        img = preprocessing.adjust_image_size(img, block_size)
        h, w = img.shape
        texture_img = preprocessing.FastCartoonTexture(img,
                                                       sigma=2.5,
                                                       show=False)

        contrast_img_guassian = preprocessing.local_constrast_enhancement_gaussian(
            img)

        mask = get_maps.get_quality_map_intensity(img)
        #show.show_mask(mask, img, fname=None, block=True)
        quality_map, dir_map, fre_map = get_maps.get_quality_map_dict(
            texture_img,
            self.dict_all,
            self.dict_ori,
            self.dict_spacing,
            block_size=16,
            process=False)

        enh_constrast_img = filtering.gabor_filtering_pixel(
            contrast_img_guassian,
            dir_map + math.pi / 2,
            fre_map,
            mask=np.ones((h, w), np.int),
            block_size=16,
            angle_inc=3)

        mnt = self.minu_model.run(img, minu_thr=0.2)

        #show.show_minutiae(img,mnt)
        des = descriptor.minutiae_descriptor_extraction(img,
                                                        mnt,
                                                        self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV,
                                                        batch_size=128)

        blkH, blkW = dir_map.shape

        minu_template = template.MinuTemplate(h=h,
                                              w=w,
                                              blkH=blkH,
                                              blkW=blkW,
                                              minutiae=mnt,
                                              des=des,
                                              oimg=dir_map,
                                              mask=mask)

        rolled_template = template.Template()
        rolled_template.add_minu_template(minu_template)

        return rolled_template, texture_img, enh_constrast_img
Beispiel #4
0
def get_quality_map_ori_dict(img, dict, spacing, dir_map=None, block_size=16):
    if img.dtype == 'uint8':
        img = img.astype(np.float)
    img = preprocessing.FastCartoonTexture(img)
    h, w = img.shape
    blkH, blkW = dir_map.shape

    quality_map = np.zeros((blkH, blkW), dtype=np.float)
    fre_map = np.zeros((blkH, blkW), dtype=np.float)
    ori_num = len(dict)
    #dir_map = math.pi/2 - dir_map
    dir_ind = dir_map * ori_num / math.pi
    dir_ind = dir_ind.astype(np.int)
    dir_ind = dir_ind % ori_num

    patch_size = np.sqrt(dict[0].shape[1])
    patch_size = patch_size.astype(np.int)
    pad_size = (patch_size - block_size) // 2
    img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')
    for i in range(0, blkH):
        for j in range(0, blkW):
            ind = dir_ind[i, j]
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size]

            patch = patch.reshape(patch_size * patch_size, )
            patch = patch - np.mean(patch)
            patch = patch / (np.linalg.norm(patch) + 0.0001)
            patch[patch > 0.05] = 0.05
            patch[patch < -0.05] = -0.05
            # if ind==0:
            #     simi = np.dot(np.concatenate((dict[-1],dict[ind],dict[ind+1]),axis=0),patch)
            # elif ind == len(dict)-1:
            #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[0]), axis=0), patch)
            # else:
            #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[ind + 1]), axis=0), patch)

            simi = np.dot(dict[ind], patch)
            similar_ind = np.argmax(abs(simi))
            quality_map[i, j] = np.max(abs(simi))
            fre_map[i, j] = 1. / spacing[ind][similar_ind]
            # print np.max(abs(simi))
            # print j
            # plt.subplot(121), plt.imshow(patch.reshape(patch_size, patch_size), cmap='gray')
            # plt.subplot(122), plt.imshow(dict[ind][similar_ind, :].reshape(patch_size, patch_size), cmap='gray')
            # plt.show(block=True)
            # plt.close()

    quality_map = gaussian(quality_map, sigma=2)
    # plt.imshow(quality_map,cmap='gray')
    # plt.show(block=True)
    return quality_map, fre_map
Beispiel #5
0
def demo_minutiae_extraction(img_path,minu_model_dir):

    img_files = glob.glob(img_path+'*.bmp')
    img_files.sort()
    minu_model = (minutiae_AEC.ImportGraph(minu_model_dir))
    block = True
    for i, img_file in enumerate(img_files):
        if i<11:
            continue
        img = io.imread(img_file)
        name = os.path.basename(img_file)
        h, w = img.shape
        mask = np.ones((h,w),dtype=np.uint8)

        minu_thr = 0.3

        texture_img = preprocessing.FastCartoonTexture(img)
        contrast_img = preprocessing.local_constrast_enhancement_gaussian(img)

        dir_map, fre_map = get_maps.get_maps_STFT(contrast_img, patch_size=64, block_size=16, preprocess=True)

        dict, spacing,_ = get_maps.construct_dictionary(ori_num=60)
        quality_map, fre_map = get_maps.get_quality_map_ori_dict(contrast_img, dict, spacing,
                                                                 dir_map=dir_map,
                                                                 block_size=16)
        enh_texture_img = filtering.gabor_filtering_pixel(contrast_img, dir_map + math.pi / 2, fre_map,
                                                          mask=mask,
                                                          block_size=16, angle_inc=3)

        mnt = minu_model.run(contrast_img, minu_thr=0.1)
        #mnt = minu_model.remove_spurious_minutiae(mnt, mask)
        #minutiae_sets.append(mnt)
        #fname = output_path + os.path.splitext(name)[0] + '_texture_img.jpeg'
        show.show_minutiae(contrast_img, mnt, block=block, fname=None)

        mnt = minu_model.run(texture_img, minu_thr=0.1)
        # mnt = minu_model.remove_spurious_minutiae(mnt, mask)
        # minutiae_sets.append(mnt)
        # fname = output_path + os.path.splitext(name)[0] + '_texture_img.jpeg'
        show.show_minutiae(texture_img, mnt, block=block, fname=None)

        print(i)
Beispiel #6
0
def get_quality_map_ori_dict(img, dict, spacing, dir_map=None, block_size=16):
    if img.dtype == 'uint8':
        img = img.astype(np.float)
    img = preprocessing.FastCartoonTexture(img)
    h, w = img.shape
    blkH, blkW = dir_map.shape

    quality_map = np.zeros((blkH, blkW), dtype=np.float)
    fre_map = np.zeros((blkH, blkW), dtype=np.float)
    ori_num = len(dict)
    dir_ind = dir_map * ori_num / math.pi
    dir_ind = dir_ind.astype(np.int)
    dir_ind = dir_ind % ori_num

    patch_size = np.sqrt(dict[0].shape[1])
    patch_size = patch_size.astype(np.int)
    pad_size = (patch_size - block_size) // 2
    img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')
    for i in range(0, blkH):
        for j in range(0, blkW):
            ind = dir_ind[i, j]
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size]

            patch = patch.reshape(patch_size * patch_size, )
            patch = patch - np.mean(patch)
            patch = patch / (np.linalg.norm(patch) + 0.0001)
            patch[patch > 0.05] = 0.05
            patch[patch < -0.05] = -0.05

            simi = np.dot(dict[ind], patch)
            similar_ind = np.argmax(abs(simi))
            quality_map[i, j] = np.max(abs(simi))
            fre_map[i, j] = 1. / spacing[ind][similar_ind]
    quality_map = gaussian(quality_map, sigma=2)
    return quality_map, fre_map
Beispiel #7
0
def minutiae_extraction_latent(model_path, sample_path, imgs, output_name='reconstruction/gen:0', block=True):
    imgs = glob.glob('/Data/Latent/DB/NIST27/image/' + '*.bmp')

    minu_files = glob.glob('/Data/Latent/DB/ManualInformation/NIST27/ManMinu/*.txt')
    minu_files.sort()
    imgs.sort()

    import os
    if not os.path.isdir(sample_path):
        os.makedirs(sample_path)

    weight = get_weights(opt.SHAPE, opt.SHAPE, 12)
    with tf.Graph().as_default():

        with TowerContext('', is_training=False):
            with tf.Session() as sess:
                is_training = get_current_tower_context().is_training
                load_model(model_path)
                images_placeholder = tf.get_default_graph().get_tensor_by_name('QueueInput/input_deque:0')  # sub:0
                minutiae_cylinder_placeholder = tf.get_default_graph().get_tensor_by_name(output_name)
                for k, file in enumerate(imgs):
                    print file
                    img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
                    u, texture = LP.FastCartoonTexture(img)
                    img = texture / 128.0 - 1

                    h, w = img.shape
                    x = []
                    y = []
                    nrof_samples = len(range(0, h, opt.SHAPE // 2)) * len(range(0, w, opt.SHAPE // 2))
                    patches = np.zeros((nrof_samples, opt.SHAPE, opt.SHAPE, 1))
                    n = 0
                    for i in range(0, h - opt.SHAPE + 1, opt.SHAPE // 2):

                        for j in range(0, w - opt.SHAPE + 1, opt.SHAPE // 2):
                            print j
                            patch = img[i:i + opt.SHAPE, j:j + opt.SHAPE, np.newaxis]
                            x.append(j)
                            y.append(i)
                            patches[n, :, :, :] = patch
                            n = n + 1
                        # print x[-1]
                    feed_dict = {images_placeholder: patches}
                    minutiae_cylinder_array = sess.run(minutiae_cylinder_placeholder, feed_dict=feed_dict)

                    minutiae_cylinder = np.zeros((h, w, 12))
                    minutiae_cylinder_array[:, -10:, :, :] = 0
                    minutiae_cylinder_array[:, :10, :, :] = 0
                    minutiae_cylinder_array[:, :, -10:, :] = 0
                    minutiae_cylinder_array[:, :, 10, :] = 0
                    for i in range(n):
                        minutiae_cylinder[y[i]:y[i] + opt.SHAPE, x[i]:x[i] + opt.SHAPE, :] = minutiae_cylinder[y[i]
                            :y[i] + opt.SHAPE, x[i]:x[i] + opt.SHAPE, :] + minutiae_cylinder_array[i] * weight
                    minutiae = prepare_data.get_minutiae_from_cylinder(minutiae_cylinder, thr=0.05)

                    minutiae = prepare_data.refine_minutiae(minutiae, dist_thr=10, ori_dist=np.pi / 4)

                    minutiae_sets = []
                    minutiae_sets.append(minutiae)

                    manu_minutiae = np.loadtxt(minu_files[k])
                    manu_minutiae[:, 2] = manu_minutiae[:, 2] / 180 * np.pi
                    minutiae_sets.append(manu_minutiae)

                    fname = sample_path + os.path.basename(file)[:-4] + '.jpeg'
                    prepare_data.show_minutiae_sets(img, minutiae_sets, ROI=None, fname=fname, block=block)
                    fname = sample_path + os.path.basename(file)[:-4] + '.txt'
                    np.savetxt(fname, minutiae_sets[0])
                    print(n)
Beispiel #8
0
def get_quality_map_dict(img,
                         dict,
                         ori,
                         spacing,
                         block_size=16,
                         process=False):
    if img.dtype == 'uint8':
        img = img.astype(np.float)
    if process:
        img = preprocessing.FastCartoonTexture(img)
    h, w = img.shape

    blkH, blkW = h // block_size, w // block_size
    quality_map = np.zeros((blkH, blkW), dtype=np.float)
    dir_map = np.zeros((blkH, blkW), dtype=np.float)
    fre_map = np.zeros((blkH, blkW), dtype=np.float)

    patch_size = np.sqrt(dict.shape[0])
    patch_size = patch_size.astype(np.int)
    pad_size = (patch_size - block_size) // 2
    img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')

    # pixel_list = []
    # for i in range(0,blkH):
    #     for j in range(0,blkW):
    #         pixel_list.append((i,j))
    #
    # def apply_filter(src_arr, dir_arr, fre_arr, quality_arr, idx_list):
    #     for p, i, j in idx_list:
    #         patch = src_arr[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size]
    #         patch = patch.reshape(patch_size * patch_size, )
    #         patch = patch - np.mean(patch)
    #         patch = patch / (np.linalg.norm(patch) + 0.0001)
    #         patch[patch > 0.05] = 0.05
    #         patch[patch < -0.05] = -0.05
    #         # if ind==0:
    #         #     simi = np.dot(np.concatenate((dict[-1],dict[ind],dict[ind+1]),axis=0),patch)
    #         # elif ind == len(dict)-1:
    #         #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[0]), axis=0), patch)
    #         # else:
    #         #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[ind + 1]), axis=0), patch)
    #
    #         simi = np.dot(dict, patch)
    #         similar_ind = np.argmax(abs(simi))
    #         quality_arr[p] = np.max(abs(simi))
    #         dir_arr[p] = ori[similar_ind]
    #         fre_arr[p] = spacing[similar_ind]
    #
    #     return
    #
    #     # from threading import Thread
    #
    # from multiprocessing import Process, Array
    #
    # threads = []
    # thread_num = 5
    # candi_num = blkH*blkW
    # pixels_per_thread = candi_num // thread_num
    # dir_arr = Array('f', candi_num)
    # fre_arr = Array('f', candi_num)
    # quality_arr = Array('f', candi_num)
    # for k in xrange(0, thread_num):
    #     idx_list = []
    #     for n in xrange(0, pixels_per_thread):
    #         p = k * pixels_per_thread + n
    #         if p >= candi_num:
    #             break
    #         idx_list.append((p, pixel_list[p][0], pixel_list[p][1]))
    #     t = Process(target=apply_filter, args=(img, dir_arr, fre_arr, quality_arr, idx_list))
    #     t.daemon = True
    #     t.start()
    #     threads.append(t)
    #
    # for t in threads:
    #     t.join()
    #
    # for k in xrange(candi_num):
    #     i = pixel_list[k][0]
    #     j = pixel_list[k][1]
    #     quality_map[i, j] =quality_arr[k]
    #     dir_map[i, j] = dir_arr[k]
    #     fre_map[i, j] = fre_arr[k]

    patches = []
    pixel_list = []

    for i in range(0, blkH - 0):
        for j in range(0, blkW - 0):
            pixel_list.append((i, j))
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size]

            patch = patch.reshape(patch_size * patch_size, )
            patch = patch - np.mean(patch)
            patch = patch / (np.linalg.norm(patch) + 0.0001)

            patches.append(patch)
            # if ind==0:
            #     simi = np.dot(np.concatenate((dict[-1],dict[ind],dict[ind+1]),axis=0),patch)
            # elif ind == len(dict)-1:
            #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[0]), axis=0), patch)
            # else:
            #     simi = np.dot(np.concatenate((dict[ind - 1], dict[ind], dict[ind + 1]), axis=0), patch)

            # simi = np.dot(dict, patch)
            # similar_ind = np.argmax(abs(simi))
            # quality_map[i,j] = np.max(abs(simi))
            # dir_map[i,j] = ori[similar_ind]
            # fre_map[i,j] = spacing[similar_ind]
            # print np.max(abs(simi))
            # print j
            # print ori[similar_ind]
            # plt.subplot(121), plt.imshow(patch.reshape(patch_size, patch_size), cmap='gray')
            # plt.subplot(122), plt.imshow(dict[similar_ind, :].reshape(patch_size, patch_size), cmap='gray')
            # R = 5
            # x1 = 16 - R * math.cos(ori[similar_ind])
            # x2 = 16 + R * math.cos(ori[similar_ind])
            # y1 = 16 - R * math.sin(ori[similar_ind])
            # y2 = 16 + R * math.sin(ori[similar_ind])
            # plt.plot([x1, x2], [y1, y2], 'r-', lw=2)
            # plt.show(block=True)
            # plt.close()

    patches = np.asarray(patches)
    patches[patches > 0.05] = 0.05
    patches[patches < -0.05] = -0.05
    simi = abs(np.dot(patches, dict))
    similar_ind = np.argmax(simi, axis=1)

    n = 0
    for i in range(0, blkH - 0):
        for j in range(0, blkW - 0):
            quality_map[i, j] = simi[n, similar_ind[n]]
            dir_map[i, j] = ori[similar_ind[n]]
            fre_map[i, j] = spacing[similar_ind[n]]
            n += 1
    # plt.imshow(quality_map,cmap='gray')
    # plt.show(block=True)

    quality_map = cv2.GaussianBlur(quality_map, (3, 3), 1)
    dir_map = smooth_dir_map(dir_map, sigma=1)
    fre_map = cv2.GaussianBlur(fre_map, (3, 3), 1)
    return quality_map, dir_map, fre_map
Beispiel #9
0
def get_quality_map_dict_coarse(img,
                                dict,
                                ori,
                                spacing,
                                block_size=16,
                                process=False,
                                R=500.0,
                                t=0.5):
    if img.dtype == 'uint8':
        img = img.astype(np.float)
    if process:
        img = preprocessing.FastCartoonTexture(img)
    h, w = img.shape

    blkH, blkW = h // block_size, w // block_size
    quality_map = np.zeros((blkH, blkW), dtype=np.float)
    dir_map = np.zeros((blkH, blkW), dtype=np.float)
    fre_map = np.zeros((blkH, blkW), dtype=np.float)

    patch_size = np.sqrt(dict.shape[0])
    patch_size = patch_size.astype(np.int)
    pad_size = (patch_size - block_size) // 2
    img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')

    patches = []
    pixel_list = []

    r = 1
    x, y = np.meshgrid(range(-patch_size / 2, patch_size / 2),
                       range(-patch_size / 2, patch_size / 2))
    x = x.astype(np.float32)
    y = y.astype(np.float32)
    weight = np.exp(-(x * x + y * y) / (patch_size * patch_size / 3.0))
    # t = 0.02
    for i in range(r, blkH - r):
        for j in range(r, blkW - r):
            pixel_list.append((i, j))
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size].copy()
            patch = patch - np.median(patch)
            patch = patch / (np.linalg.norm(patch) + R)
            patch[patch > t] = 0.0
            patch[patch < -t] = 0.

            patch = patch * weight
            patch = patch.reshape(patch_size * patch_size, )

            patches.append(patch)

    patches = np.asarray(patches)
    simi = abs(np.dot(patches, dict))
    similar_ind = np.argmax(simi, axis=1)

    n = 0
    # blks_in_patch = patch_size / block_size
    for i in range(r, blkH - r):
        for j in range(r, blkW - r):
            quality_map[i, j] = simi[n, similar_ind[n]]
            dir_map[i, j] = -math.atan2(ori[32 + 64, similar_ind[n]],
                                        ori[32, similar_ind[n]]) / 2.0
            fre_map[i, j] = spacing[32, similar_ind[n]]
            n += 1

    for i in range(r):
        fre_map[i] = fre_map[r]
        dir_map[i] = dir_map[r]
        fre_map[-(r - i) - 1] = fre_map[-r - 1]
        dir_map[-(r - i) - 1] = fre_map[-r - 1]
        fre_map[:, i] = fre_map[:, r]
        dir_map[:, i] = dir_map[:, r]
        fre_map[:, -(r - i) - 1] = fre_map[:, -r - 1]
        dir_map[:, -(r - i) - 1] = fre_map[:, -r - 1]

    quality_map = cv2.GaussianBlur(quality_map, (5, 5), 2)
    dir_map = smooth_dir_map(dir_map, sigma=1.5)
    fre_map = cv2.GaussianBlur(fre_map, (3, 3), 1)
    return quality_map, dir_map, fre_map
Beispiel #10
0
def get_quality_map_dict(img,
                         dict,
                         ori,
                         spacing,
                         block_size=16,
                         process=False,
                         R=500.0,
                         t=0.05):
    if img.dtype == 'uint8':
        img = img.astype(np.float)
    if process:
        img = preprocessing.FastCartoonTexture(img)
    h, w = img.shape

    blkH, blkW = h // block_size, w // block_size
    quality_map = np.zeros((blkH, blkW), dtype=np.float)
    dir_map = np.zeros((blkH, blkW), dtype=np.float)
    fre_map = np.zeros((blkH, blkW), dtype=np.float)

    patch_size = np.sqrt(dict.shape[0])
    patch_size = patch_size.astype(np.int)
    pad_size = (patch_size - block_size) // 2
    img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')

    patches = []
    pixel_list = []

    r = 1

    for i in range(r, blkH - r):
        for j in range(r, blkW - r):
            pixel_list.append((i, j))
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size].copy()

            patch = patch.reshape(patch_size * patch_size, )
            patch = patch - np.mean(patch)
            patch = patch / (np.linalg.norm(patch) + R)
            patch[patch > t] = 0.0
            patch[patch < -t] = -0.0
            patches.append(patch)

    patches = np.asarray(patches)
    simi = abs(np.dot(patches, dict))
    similar_ind = np.argmax(simi, axis=1)

    n = 0
    for i in range(r, blkH - r):
        for j in range(r, blkW - r):
            quality_map[i, j] = simi[n, similar_ind[n]]
            dir_map[i, j] = ori[similar_ind[n]]
            fre_map[i, j] = spacing[similar_ind[n]]
            n += 1

    for i in range(r):
        fre_map[i] = fre_map[r]
        dir_map[i] = dir_map[r]
        fre_map[-(r - i) - 1] = fre_map[-r - 1]
        dir_map[-(r - i) - 1] = fre_map[-r - 1]
        fre_map[:, i] = fre_map[:, r]
        dir_map[:, i] = dir_map[:, r]
        fre_map[:, -(r - i) - 1] = fre_map[:, -r - 1]
        dir_map[:, -(r - i) - 1] = fre_map[:, -r - 1]

    quality_map = cv2.GaussianBlur(quality_map, (5, 5), 0)
    dir_map = smooth_dir_map(dir_map, sigma=1.5)
    fre_map = cv2.GaussianBlur(fre_map, (3, 3), 1)
    return quality_map, dir_map, fre_map
def feature_extraction_single_latent(raw_img_file,
                                     AEC_img_file,
                                     mask_file,
                                     patch_types=None,
                                     des_models=None):
    ###
    #  input:
    # raw_img, original latent image
    # AEC_img, enhanced latent image by Autoencoder
    # mask:    ROI
    # main idea:
    # 1) Use AEC_img to estimate ridge flow and ridge spacing
    # 2) use AEC_image and raw_img to extract two different minutiae set
    ###
    raw_img = io.imread(raw_img_file)
    AEC_img = io.imread(AEC_img_file)
    mask = io.imread(mask_file)
    #mask = mask_dilation(mask, block_size=16)

    texture_img = preprocessing.FastCartoonTexture(raw_img,
                                                   sigma=2.5,
                                                   show=False)

    dir_map, fre_map, rec_img = get_maps.get_maps_STFT(AEC_img,
                                                       patch_size=64,
                                                       block_size=16,
                                                       preprocess=True)

    descriptor_img = filtering.gabor_filtering_pixel(texture_img,
                                                     dir_map + math.pi / 2,
                                                     fre_map,
                                                     mask=mask,
                                                     block_size=16,
                                                     angle_inc=3)

    bin_img = binarization.binarization(texture_img,
                                        dir_map,
                                        block_size=16,
                                        mask=mask)

    enhanced_img = filtering.gabor_filtering_block(bin_img,
                                                   dir_map + math.pi / 2,
                                                   fre_map,
                                                   patch_size=64,
                                                   block_size=16)
    enhanced_img = filtering.gabor_filtering_block(enhanced_img,
                                                   dir_map + math.pi / 2,
                                                   fre_map,
                                                   patch_size=64,
                                                   block_size=16)

    # plt.subplot(131), plt.imshow(raw_img, cmap='gray')
    # plt.title('Input image'), plt.xticks([]), plt.yticks([])
    # plt.subplot(132), plt.imshow(descriptor_img, cmap='gray')
    # plt.title('Feature image'), plt.xticks([]), plt.yticks([])
    #
    # plt.subplot(133), plt.imshow(enhanced_img, cmap='gray')
    # plt.title('Feature image'), plt.xticks([]), plt.yticks([])
    # plt.show(block=True)
    # plt.close()

    enhanced_AEC_img = filtering.gabor_filtering_block(AEC_img,
                                                       dir_map + math.pi / 2,
                                                       fre_map,
                                                       patch_size=64,
                                                       block_size=16)
    bin_img = binarization.binarization(enhanced_AEC_img,
                                        dir_map,
                                        block_size=16,
                                        mask=mask)
    # plt.imshow(AEC_img,cmap='gray')
    # plt.show()
    # plt.close()

    bin_img2 = 1 - bin_img
    thin_img = skeletonize(bin_img2)
    # thin_img2 = thin_img.astype(np.uint8)
    # thin_img2[thin_img2 > 0] = 255

    mnt, thin_img2 = crossnumber.extract_minutiae(1 - thin_img,
                                                  mask=mask,
                                                  R=10)
    crossnumber.show_minutiae(thin_img, mnt)

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)
    if len(descriptor_img.shape) == 2:
        h, w = descriptor_img.shape
        ret = np.empty((h, w, 3), dtype=np.float)
        ret[:, :, :] = descriptor_img[:, :, np.newaxis]
        descriptor_img = ret

    if len(enhanced_AEC_img.shape) == 2:
        h, w = enhanced_AEC_img.shape
        ret = np.empty((h, w, 3), dtype=np.float)
        ret[:, :, :] = enhanced_AEC_img[:, :, np.newaxis]
        enhanced_AEC_img = ret

    des = descriptor.minutiae_descriptor_extraction(enhanced_AEC_img,
                                                    mnt,
                                                    patch_types,
                                                    des_models,
                                                    patchIndexV,
                                                    batch_size=128)

    h, w = mask.shape
    blkH, blkW = dir_map.shape
    minu_template = template.MinuTemplate(h=h,
                                          w=w,
                                          blkH=blkH,
                                          blkW=blkW,
                                          minutiae=mnt,
                                          des=des,
                                          oimg=dir_map,
                                          mask=mask)

    latent_template = template.Template()
    latent_template.add_minu_template(minu_template)

    print des
Beispiel #12
0
def get_maps_STFT(img, patch_size=64, block_size=16, preprocess=False):
    assert len(img.shape) == 2

    nrof_dirs = 16
    ovp_size = (patch_size - block_size) // 2
    if preprocess:
        img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)
    h0, w0 = img.shape
    img = np.lib.pad(img, (ovp_size, ovp_size), 'symmetric')
    #img = cv2.copyMakeBorder(img, ovp_size, ovp_size, ovp_size, ovp_size, cv2.BORDER_CONSTANT, value=0)

    h, w = img.shape
    blkH = (h - patch_size) // block_size + 1
    blkW = (w - patch_size) // block_size + 1
    local_info = np.empty((blkH, blkW), dtype=object)

    x, y = np.meshgrid(range(-patch_size / 2, patch_size / 2),
                       range(-patch_size / 2, patch_size / 2))
    x = x.astype(np.float32)
    y = y.astype(np.float32)
    r = np.sqrt(x * x + y * y) + 0.0001

    #if preprocess:
    #-------------------------
    # Bandpass filter
    # -------------------------
    RMIN = 3  # min allowable ridge spacing
    RMAX = 18  # maximum allowable ridge spacing
    FLOW = patch_size / RMAX
    FHIGH = patch_size / RMIN
    dRLow = 1. / (1 + (r / FHIGH)**4)  # low pass     butterworth     filter
    dRHigh = 1. / (1 +
                   (FLOW / r)**4)  # high    pass     butterworth     filter
    dBPass = dRLow * dRHigh  # bandpass

    dir = np.arctan2(y, x)
    dir[dir < 0] = dir[dir < 0] + math.pi
    dir_ind = np.floor(dir / (math.pi / nrof_dirs))
    dir_ind = dir_ind.astype(np.int, copy=False)
    dir_ind[dir_ind == nrof_dirs] = 0

    dir_ind_list = []
    for i in range(nrof_dirs):
        tmp = np.argwhere(dir_ind == i)
        dir_ind_list.append(tmp)

    sigma = patch_size / 3
    weight = np.exp(-(x * x + y * y) / (sigma * sigma))
    #plt.imshow(weight,cmap='gray')
    #plt.show()

    for i in range(0, blkH):
        for j in range(0, blkW):
            patch = img[i * block_size:i * block_size + patch_size,
                        j * block_size:j * block_size + patch_size].copy()
            local_info[i, j] = local_STFT(patch, weight, dBPass)
            local_info[i, j].analysis(r, dir_ind_list)

            # plt.subplot(221), plt.imshow(patch, cmap='gray')
            # plt.title('Input Image'), plt.xticks([]), plt.yticks([])
            # plt.subplot(222), plt.imshow(magnitude_spectrum, cmap='gray')
            # plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
            # plt.subplot(223), plt.imshow(rec_patch, cmap='gray')
            # plt.title('reconstructed patch'), plt.xticks([]), plt.yticks([])
            # plt.subplot(224), plt.imshow(dBPass, cmap='gray')
            # plt.title('reconstructed patch'), plt.xticks([]), plt.yticks([])
            # plt.show()

    # get the ridge flow from the local information
    dir_map, fre_map = get_ridge_flow_top(local_info)
    #dir_map = get_ridge_flow_optimal_N(local_info, N=2)
    dir_map = smooth_dir_map(dir_map)
    #show_orientation_field(img,dir_map)

    # # first around enhancement
    # rec_img = np.zeros((h, w))
    # for i in range(0, blkH):
    #     for j in range(0, blkW):
    #         if fre_map[i,j] < 0:
    #             continue
    #         rec_patch = local_info[i, j].gabor_filtering(dir_map[i,j],fre_map[i,j], weight=weight)
    #         rec_img[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size] += rec_patch
    #
    #
    # rec_img = rec_img[ovp_size:ovp_size + h0, ovp_size:ovp_size + w0]
    # rec_img = (rec_img - np.min(rec_img)) / (np.max(rec_img) - np.min(rec_img)) * 255

    return dir_map, fre_map  #, rec_img
def extract_minutiae_cylinder(img_input,
                              minutiae_input,
                              ROI=None,
                              num_ori=12,
                              angle=None,
                              processing=None):
    # for the latent or the low quality rolled print
    minutiae = minutiae_input.copy()
    img = img_input.copy()
    if processing == 'STFT':
        img = LP.STFT(img)
    elif processing == 'contrast':
        img = LP.local_constrast_enhancement(img)
    elif processing == 'texture':
        img = LP.FastCartoonTexture(img)
    sigma = 5**2
    if ROI is not None:
        h, w = ROI.shape
        for i in range(h):
            for j in range(w):
                if ROI[i, j] == 0:
                    img[i, j] = 255

        h, w = ROI.shape
        col_sum = np.sum(ROI, axis=0)

        ind = [x for x in range(len(col_sum)) if col_sum[x] > 0]
        min_x = np.max([np.min(ind) - 32, 0])
        max_x = np.min([np.max(ind) + 32, w])

        row_sum = np.sum(ROI, axis=1)

        ind = [x for x in range(len(row_sum)) if row_sum[x] > 0]
        min_y = np.max([np.min(ind) - 32, 0])
        max_y = np.min([np.max(ind) + 32, h])

        ROI = ROI[min_y:max_y, min_x:max_x]
        img = img[min_y:max_y, min_x:max_x]
        minutiae[:, 0] = minutiae[:, 0] - min_x
        minutiae[:, 1] = minutiae[:, 1] - min_y
    else:
        h, w = img.shape[0:2]
        ROI = np.ones((h, w))

    # rotate the image and ROI, and also update minutiae points
    h0, w0 = ROI.shape
    if angle is not None:
        h02 = (h0 + 1) / 2
        w02 = (w0 + 1) / 2

        img = rotate(img, angle)
        ROI = rotate(ROI, angle)

        h, w = ROI.shape
        h2 = (h + 1) / 2
        w2 = (w + 1) / 2

        angle = -angle / 180.0 * math.pi
        cosTheta = math.cos(angle)
        sinTheta = math.sin(angle)
        xx = (minutiae[:, 0] - w02) * cosTheta - (minutiae[:, 1] -
                                                  h02) * sinTheta + w2

        yy = (minutiae[:, 0] - w02) * sinTheta + (minutiae[:, 1] -
                                                  h02) * cosTheta + h2
        ori = minutiae[:, 2] - angle
        #
        minutiae[:, 0] = xx
        minutiae[:, 1] = yy
        minutiae[:, 2] = ori
        show = 0
        if show:
            minu_num = minutiae.shape[0]
            fig, ax = plt.subplots(1)
            ax.set_aspect('equal')

            R = 10
            arrow_len = 15
            ax.imshow(img, cmap='gray')
            for i in range(0, minu_num):
                xx = minutiae[i, 0]
                yy = minutiae[i, 1]
                circ = Circle((xx, yy), R, color='r', fill=False)
                ax.add_patch(circ)

                ori = -minutiae[i, 2]
                dx = math.cos(ori) * arrow_len
                dy = math.sin(ori) * arrow_len
                ax.arrow(xx,
                         yy,
                         dx,
                         dy,
                         head_width=0.05,
                         head_length=0.1,
                         fc='r',
                         ec='r')
            plt.show()
    h, w = ROI.shape
    minutiae_cylinder = np.zeros((h, w, num_ori), dtype=float)
    cylinder_ori = np.asarray(range(num_ori)) * math.pi * 2 / num_ori

    Y, X = np.mgrid[0:h, 0:w]
    minu_num = minutiae.shape[0]
    for i in range(0, minu_num):
        xx = minutiae[i, 0]
        yy = minutiae[i, 1]
        if yy < 0 or xx < 0:
            continue
            print xx, yy
            minu_num = minutiae.shape[0]
            fig, ax = plt.subplots(1)
            ax.set_aspect('equal')

            R = 10
            arrow_len = 15
            ax.imshow(img, cmap='gray')
            for i in range(0, minu_num):
                xx = minutiae[i, 0]
                yy = minutiae[i, 1]
                circ = Circle((xx, yy), R, color='r', fill=False)
                ax.add_patch(circ)

                ori = -minutiae[i, 2]
                dx = math.cos(ori) * arrow_len
                dy = math.sin(ori) * arrow_len
                ax.arrow(xx,
                         yy,
                         dx,
                         dy,
                         head_width=0.05,
                         head_length=0.1,
                         fc='r',
                         ec='r')
            plt.show()
        weight = np.exp(-((X - xx) * (X - xx) + (Y - yy) * (Y - yy)) / sigma)

        ori = minutiae[i, 2]
        if ori < 0:
            ori += np.pi * 2
        if ori > np.pi * 2:
            ori -= np.pi * 2

        for j in range(num_ori):

            ori_diff = np.fabs(ori - cylinder_ori[j])

            if ori_diff > np.pi * 2:
                ori_diff = ori_diff - np.pi * 2

            ori_diff = np.min([ori_diff, np.pi * 2 - ori_diff])
            minutiae_cylinder[:, :,
                              j] += weight * np.exp(-ori_diff / np.pi * 6)
    show = 0
    if show:
        fig, ax = plt.subplots(1)
        ax.set_aspect('equal')

        R = 10
        arrow_len = 15
        ax.imshow(img, cmap='gray')
        for i in range(0, minu_num):
            xx = minutiae[i, 0]
            yy = minutiae[i, 1]
            circ = Circle((xx, yy), R, color='r', fill=False)
            ax.add_patch(circ)

            ori = -minutiae[i, 2]
            dx = math.cos(ori) * arrow_len
            dy = math.sin(ori) * arrow_len
            ax.arrow(xx,
                     yy,
                     dx,
                     dy,
                     head_width=0.05,
                     head_length=0.1,
                     fc='r',
                     ec='r')
        plt.show()

    return img, ROI, minutiae_cylinder
Beispiel #14
0
    #
    #     minutiae = prepare_data.refine_minutiae(minutiae, dist_thr=10, ori_dist=np.pi / 4)
    #
    #     return minutiae


if __name__ == '__main__':
    model_dir = '/media/kaicao/data2/AutomatedLatentRecognition/models/OF/facenet/20171229-120921/'
    OF_center_file = 'OriCenter.mat'

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    img_path = '../../../../../Data/Latent/NISTSD27/image/'

    mask_path = '../../../../../Data/Latent/NISTSD27/maskNIST27/'

    img_files = glob.glob(img_path + '*.bmp')
    img_files.sort()

    mask_files = glob.glob(mask_path + '*.bmp')
    mask_files.sort()

    for i in range(250, len(img_files)):
        img = io.imread(img_files[i])
        mask = io.imread(mask_files[i])
        img = preprocessing.FastCartoonTexture(img)
        img[mask == 0] = 0

        model = ImportGraph(model_dir, OF_center_file)
        dir_map = model.run(img, mask=mask)
Beispiel #15
0
    def feature_extraction_single_latent_evaluation(self,img_file, mask_file, AEC_img_file,output_path = None ):

        img = io.imread(img_file)
        name = os.path.basename(img_file)
        AEC_img = io.imread(AEC_img_file)
        mask = io.imread(mask_file)
        h,w = mask.shape
        #mask = mask_dilation(mask, block_size=16)
        latent_template = template.Template()
        block = False
        minu_thr = 0.3

        contrast_img = preprocessing.local_constrast_enhancement(img)
        # Two ways for orientation field estimation
        #  Use the AEC_img and STFT on texture image
        dir_map_sets = []
        texture_img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)
        dir_map, fre_map = get_maps.get_maps_STFT(texture_img, patch_size=64, block_size=16, preprocess=True)
        dir_map_sets.append(dir_map)

        blkH, blkW = dir_map.shape

        dir_map, fre_map = get_maps.get_maps_STFT(AEC_img, patch_size=64, block_size=16, preprocess=True)
        dir_map_sets.append(dir_map)

        #dir_map, fre_map = get_maps.get_maps_STFT(contrast_img, patch_size=64, block_size=16, preprocess=True)
        #dir_map_sets.append(dir_map)

        # based on the OF, we can use texture image and AEC image for frequency field estimation

        fre_map_sets = []
        quality_map, fre_map = get_maps.get_quality_map_ori_dict(AEC_img, self.dict, self.spacing, dir_map=dir_map_sets[0],
                                                                 block_size=16)
        fre_map_sets.append(fre_map)

        quality_map, fre_map = get_maps.get_quality_map_ori_dict(contrast_img, self.dict, self.spacing,
                                                                 dir_map=dir_map_sets[1],
                                                                 block_size=16)
        fre_map_sets.append(fre_map)

        descriptor_imgs = [texture_img]
        descriptor_imgs.append(contrast_img)
        enh_texture_img = filtering.gabor_filtering_pixel(texture_img, dir_map + math.pi / 2, fre_map_sets[0], mask=mask,
                                                         block_size=16, angle_inc=3)
        descriptor_imgs.append(enh_texture_img)
        enh_contrast_img = filtering.gabor_filtering_pixel(contrast_img, dir_map + math.pi / 2, fre_map_sets[1], mask=mask,
                                                           block_size=16, angle_inc=3)
        descriptor_imgs.append(enh_contrast_img)

        minutiae_sets = []
        mnt = self.minu_model.run(texture_img, minu_thr=0.1)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_texture_img.jpeg'
        show.show_minutiae(texture_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(contrast_img, minu_thr=0.1)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_contrast_img.jpeg'
        show.show_minutiae(contrast_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(enh_texture_img, minu_thr=minu_thr)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_enh_texture_img.jpeg'
        show.show_minutiae(enh_texture_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(enh_contrast_img, minu_thr=minu_thr)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_enh_contrast_img.jpeg'
        show.show_minutiae(enh_contrast_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(AEC_img, minu_thr=minu_thr)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)

        fname = output_path + os.path.splitext(name)[0] + '_AEC_img.jpeg'
        show.show_minutiae(AEC_img, mnt, block=block, fname=fname)



        for mnt in minutiae_sets:
            for des_img in descriptor_imgs:
                des = descriptor.minutiae_descriptor_extraction(des_img, mnt, self.patch_types, self.des_models,
                                                        self.patchIndexV,batch_size=128)
                minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=mnt,
                                                  des=des, oimg=dir_map_sets[1], mask=mask)
                latent_template.add_minu_template(minu_template)

        return latent_template
Beispiel #16
0
    def feature_extraction_single_latent(self,img_file, output_path = None, show_processes=False ):
        #block = True
        img = io.imread(img_file)

        name = os.path.basename(img_file)
        mask_CNN,_ = self.ROI_model.run(img)
        h,w = mask_CNN.shape
        #mask = mask_dilation(mask, block_size=16)
        latent_template = template.Template()

        minu_thr = 0.3

        # template set 1: no ROI and enhancement are required
        # texture image is used for coase segmentation

        descriptor_imgs = []
        texture_img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)




        descriptor_imgs.append(texture_img)

        contrast_img_guassian = preprocessing.local_constrast_enhancement_gaussian(img)

        quality_map, _, _ = get_maps.get_quality_map_dict(texture_img, self.dict_all, self.dict_ori,
                                                                      self.dict_spacing, block_size=16, process=False)
        quality_map_pixel = cv2.resize(quality_map, (0, 0), fx=16, fy=16)
        #plt.imshow(quality_map_pixel,cmap='gray')
        #plt.show()
        mask_coarse = quality_map_pixel > 0.3
        mask_coarse = mask_coarse.astype(np.int)
        mask = mask_coarse * mask_CNN
        # show.show_mask(mask_CNN, img, fname='mask_RCNN.jpeg',block=block)
        # show.show_mask(mask_coarse,img,fname = 'mask_coarse.jpeg',block=block)
        # show.show_mask(mask, img, fname='mask.jpeg',block=block)




        #show.show_mask(mask, AEC_img, fname='mask_AEC.jpeg',block=block)
        # plt.imshow(AEC_img,cmap = 'gray')
        # plt.show(block=block)
        # plt.close()



        #show.show_mask(mask_CNN, img, fname='mask_RCNN.jpeg',block=block)

        # AEC_img[mask == 0] = 128
        # plt.imshow(AEC_img, cmap='gray')
        # plt.show(block=block)
        # plt.close()

        AEC_img = self.enhancement_model.run(texture_img)
        quality_map, dir_map, fre_map = get_maps.get_quality_map_dict(AEC_img, self.dict_all, self.dict_ori,self.dict_spacing, block_size=16, process=False)

        blkH, blkW = dir_map.shape

        if show_processes:
            show.show_orientation_field(img, dir_map,mask = mask,fname='OF.jpeg')




        # mnt = self.minu_model.run(contrast_img_mean, minu_thr=0.1)
        # mnt = self.remove_spurious_minutiae(mnt, mask)
        # minutiae_sets.append(mnt)
        #
        # fname = output_path + os.path.splitext(name)[0] + '_contrast_img_mean.jpeg'
        # show.show_minutiae(contrast_img_mean, mnt, block=block, fname=fname)


        enh_contrast_img = filtering.gabor_filtering_pixel(contrast_img_guassian, dir_map + math.pi / 2, fre_map,
                                                          mask=mask,
                                                          block_size=16, angle_inc=3)

        enh_texture_img = filtering.gabor_filtering_pixel(texture_img, dir_map + math.pi / 2, fre_map,
                                                          mask=mask,
                                                          block_size=16, angle_inc=3)

        if show_processes:
            show.show_image(texture_img, mask=mask, block=True, fname='cropped_texture_image.jpeg')
            show.show_image(AEC_img, mask=mask, block=True, fname='cropped_AEC_image.jpeg')
            show.show_image(enh_contrast_img, mask=mask, block=True, fname='cropped_enh_image.jpeg')

        #np.ones((h, w), np.int)
        descriptor_imgs.append(enh_contrast_img)


        quality_map2, _ , _ = get_maps.get_quality_map_dict(enh_contrast_img, self.dict_all,self.dict_ori,self.dict_spacing, block_size=16,
                                                                      process=False)
        quality_map_pixel2 = cv2.resize(quality_map2, (0, 0), fx=16, fy=16)

        mask2 = quality_map_pixel2 > 0.50

        #mask = mask*mask2

        minutiae_sets = []
        mnt = self.minu_model.run(contrast_img_guassian, minu_thr=0.05)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_texture_img.jpeg'
            show.show_minutiae(texture_img, mnt, mask=mask,block=block, fname=fname)

        mnt = self.minu_model.run(AEC_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_AEC_img.jpeg'
            show.show_minutiae(AEC_img, mnt, mask=mask, block=block, fname=fname)

        mnt = self.minu_model.run(enh_contrast_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_enh_contrast_img.jpeg'
            show.show_minutiae(enh_contrast_img, mnt, mask=mask,block=block, fname=fname)

        mnt = self.minu_model.run(enh_texture_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)

        # minutiae template 1
        des = descriptor.minutiae_descriptor_extraction(texture_img, minutiae_sets[0], self.patch_types, self.des_models,
                                                         self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[0],
                                                   des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 2
        des = descriptor.minutiae_descriptor_extraction(texture_img, minutiae_sets[1], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[1],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 3
        des = descriptor.minutiae_descriptor_extraction(enh_texture_img, minutiae_sets[2], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[2],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 4
        des = descriptor.minutiae_descriptor_extraction(enh_texture_img, minutiae_sets[3], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[3],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)



        return latent_template
Beispiel #17
0
    def feature_extraction_single_latent_evaluation_AEM18T(self,img_file, mask_file, AEC_img_file,output_path = None ):

        img = io.imread(img_file)
        name = os.path.basename(img_file)
        AEC_img = io.imread(AEC_img_file)
        mask_CNN = io.imread(mask_file)
        h,w = mask_CNN.shape
        #mask = mask_dilation(mask, block_size=16)
        latent_template = template.Template()
        block = False
        minu_thr = 0.3

        # template set 1: no ROI and enhancement are required
        # texture image is used for coase segmentation

        descriptor_imgs = []
        texture_img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)
        descriptor_imgs.append(texture_img)
        contrast_img_mean = preprocessing.local_constrast_enhancement(img)
        contrast_img_guassian = preprocessing.local_constrast_enhancement_gaussian(img)

        quality_map, dir_map, fre_map = get_maps.get_quality_map_dict(texture_img, self.dict_all, self.dict_ori,
                                                                      self.dict_spacing, block_size=16, process=False)
        quality_map_pixel = cv2.resize(quality_map, (0, 0), fx=16, fy=16)
        mask_coarse = quality_map_pixel > 0.3
        mask_coarse = mask_coarse.astype(np.int)
        quality_map, dir_map, fre_map = get_maps.get_quality_map_dict(AEC_img, self.dict_all, self.dict_ori,self.dict_spacing, block_size=16, process=False)


        minutiae_sets = []
        mnt = self.minu_model.run(texture_img, minu_thr=0.1)
        mnt = self.remove_spurious_minutiae(mnt, mask_coarse)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_texture_img.jpeg'
        show.show_minutiae(texture_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(contrast_img_mean, minu_thr=0.1)
        mnt = self.remove_spurious_minutiae(mnt, mask_coarse)
        minutiae_sets.append(mnt)

        fname = output_path + os.path.splitext(name)[0] + '_contrast_img_mean.jpeg'
        show.show_minutiae(contrast_img_mean, mnt, block=block, fname=fname)


        mnt = self.minu_model.run(contrast_img_guassian, minu_thr=0.1)
        mnt = self.remove_spurious_minutiae(mnt, mask_coarse)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_contrast_img_guassian.jpeg'
        show.show_minutiae(contrast_img_guassian, mnt, block=block, fname=fname)

        #show.show_orientation_field(AEC_img,dir_map)

        enh_texture_img = filtering.gabor_filtering_pixel(texture_img, dir_map + math.pi / 2, fre_map,
                                                          mask=np.ones((h, w), np.int),
                                                          block_size=16, angle_inc=3)

        descriptor_imgs.append(enh_texture_img)

        enh_constrast_img = filtering.gabor_filtering_pixel(contrast_img_guassian, dir_map + math.pi / 2, fre_map,
                                                          mask=np.ones((h,w),np.int),
                                                          block_size=16, angle_inc=3)

        descriptor_imgs.append(enh_constrast_img)

        quality_map2, _ , _ = get_maps.get_quality_map_dict(enh_texture_img, self.dict_all,self.dict_ori,self.dict_spacing, block_size=16,
                                                                      process=False)
        quality_map_pixel2 = cv2.resize(quality_map2, (0, 0), fx=16, fy=16)

        mask = quality_map_pixel2 > 0.55


        mask = mask.astype(np.int)
        mask = mask_coarse * mask
        mask = mask * mask_CNN

        mnt = self.minu_model.run(AEC_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_AEC_img.jpeg'
        show.show_minutiae(AEC_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(enh_texture_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)

        fname = output_path + os.path.splitext(name)[0] + '_enh_texture_img.jpeg'
        show.show_minutiae(enh_texture_img, mnt, block=block, fname=fname)

        mnt = self.minu_model.run(enh_constrast_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        fname = output_path + os.path.splitext(name)[0] + '_enh_constrast_img.jpeg'
        show.show_minutiae(enh_constrast_img, mnt, block=block, fname=fname)


        blkH, blkW = dir_map.shape
        for mnt in minutiae_sets:
            for des_img in descriptor_imgs:
                des = descriptor.minutiae_descriptor_extraction(des_img, mnt, self.patch_types, self.des_models,
                                                        self.patchIndexV, batch_size=128)
                minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=mnt,
                                                  des=des, oimg=dir_map, mask=mask)
                latent_template.add_minu_template(minu_template)

        return latent_template
    def feature_extraction_single_latent(self,
                                         img_file,
                                         output_dir=None,
                                         ppi=500,
                                         show_processes=False,
                                         show_minutiae=False,
                                         minu_file=None):
        block = False
        block_size = 16
        img0 = io.imread(img_file, mode='L')  # / 255.0

        img = img0.copy()

        if ppi != 500:
            img = cv2.resize(img, (0, 0), fx=500.0 / ppi, fy=500.0 / ppi)
        img = preprocessing.adjust_image_size(img, block_size)
        name = os.path.basename(img_file)
        start = timer()
        h, w = img.shape

        if h > 1000 and w > 1000:
            return None, None

        # cropping using two dictionary based approach
        if minu_file is not None:
            manu_minu = np.loadtxt(minu_file)
            # #     # remove low quality minutiae points
            input_minu = np.array(manu_minu)
            input_minu[:, 2] = input_minu[:, 2] / 180.0 * np.pi
        else:
            input_minu = []

        descriptor_imgs = []
        texture_img = preprocessing.FastCartoonTexture(img,
                                                       sigma=2.5,
                                                       show=False)
        STFT_texture_img = preprocessing.STFT(texture_img)

        contrast_img_guassian = preprocessing.local_constrast_enhancement_gaussian(
            img)
        STFT_img = preprocessing.STFT(img)
        constrast_STFT_img = preprocessing.STFT(contrast_img_guassian)

        # step 1: enhance the latent based on our autoencoder
        AEC_img = self.enhancement_model.run_whole_image(STFT_texture_img)
        quality_map_AEC, dir_map_AEC, fre_map_AEC = get_maps.get_quality_map_dict(
            AEC_img, self.dict_all, self.dict_ori, self.dict_spacing, R=500.0)
        blkmask_AEC = quality_map_AEC > 0.45
        blkmask_AEC = binary_closing(blkmask_AEC, np.ones(
            (3, 3))).astype(np.int)
        blkmask_AEC = binary_opening(blkmask_AEC, np.ones(
            (3, 3))).astype(np.int)
        blkmask_SSIM = get_maps.SSIM(STFT_texture_img, AEC_img, thr=0.2)
        blkmask = blkmask_SSIM * blkmask_AEC
        blkH, blkW = blkmask.shape
        mask = cv2.resize(blkmask.astype(float),
                          (block_size * blkW, block_size * blkH),
                          interpolation=cv2.INTER_LINEAR)
        mask[mask > 0] = 1

        minutiae_sets = []

        mnt_STFT = self.minu_model[0].run_whole_image(STFT_img, minu_thr=0.05)
        minutiae_sets.append(mnt_STFT)
        if show_minutiae:
            fname = output_dir + os.path.splitext(name)[0] + '_STFT_img.jpeg'
            show.show_minutiae_sets(STFT_img, [input_minu, mnt_STFT],
                                    mask=None,
                                    block=block,
                                    fname=fname)

        mnt_STFT = self.minu_model[0].run_whole_image(constrast_STFT_img,
                                                      minu_thr=0.1)
        minutiae_sets.append(mnt_STFT)

        mnt_AEC = self.minu_model[1].run_whole_image(AEC_img, minu_thr=0.25)
        mnt_AEC = self.remove_spurious_minutiae(mnt_AEC, mask)
        minutiae_sets.append(mnt_AEC)
        if show_minutiae:
            fname = output_dir + os.path.splitext(name)[0] + '_AEC_img.jpeg'
            show.show_minutiae_sets(AEC_img, [input_minu, mnt_AEC],
                                    mask=mask,
                                    block=block,
                                    fname=fname)

        enh_contrast_img = filtering.gabor_filtering_pixel2(
            contrast_img_guassian,
            dir_map_AEC + math.pi / 2,
            fre_map_AEC,
            mask=np.ones((h, w)),
            block_size=16,
            angle_inc=3)
        mnt_contrast = self.minu_model[1].run_whole_image(enh_contrast_img,
                                                          minu_thr=0.25)
        mnt_contrast = self.remove_spurious_minutiae(mnt_contrast, mask)
        minutiae_sets.append(mnt_contrast)

        enh_texture_img = filtering.gabor_filtering_pixel2(
            texture_img,
            dir_map_AEC + math.pi / 2,
            fre_map_AEC,
            mask=np.ones((h, w)),
            block_size=16,
            angle_inc=3)

        mnt_texture = self.minu_model[1].run_whole_image(enh_texture_img,
                                                         minu_thr=0.25)
        mnt_texture = self.remove_spurious_minutiae(mnt_texture, mask)
        minutiae_sets.append(mnt_texture)

        h, w = img.shape
        latent_template = template.Template()

        # template set 1: no ROI and enhancement are required
        # texture image is used for coase segmentation
        descriptor_imgs = []

        descriptor_imgs.append(STFT_img)
        descriptor_imgs.append(texture_img)
        descriptor_imgs.append(enh_texture_img)
        descriptor_imgs.append(enh_contrast_img)

        mnt2 = self.get_common_minutiae(minutiae_sets, thr=2)

        mnt3 = self.get_common_minutiae(minutiae_sets, thr=3)

        minutiae_sets.append(mnt3)
        minutiae_sets.append(mnt2)
        if show_minutiae:
            fname = output_dir + os.path.splitext(name)[0] + '_common_2.jpeg'
            show.show_minutiae_sets(img, [input_minu, mnt2],
                                    mask=mask,
                                    block=block,
                                    fname=fname)
        end = timer()
        print('Time for minutiae extraction: %f' % (end - start))

        start = timer()
        for mnt in minutiae_sets:
            for des_img in descriptor_imgs:
                des = descriptor.minutiae_descriptor_extraction(
                    des_img,
                    mnt,
                    self.patch_types,
                    self.des_models,
                    self.patchIndexV,
                    batch_size=128)
                minu_template = template.MinuTemplate(h=h,
                                                      w=w,
                                                      blkH=blkH,
                                                      blkW=blkW,
                                                      minutiae=mnt,
                                                      des=des,
                                                      oimg=dir_map_AEC,
                                                      mask=mask)
                latent_template.add_minu_template(minu_template)
        end = timer()
        print('Time for minutiae descriptor generation: %f' % (end - start))

        start = timer()
        # texture templates
        stride = 16
        x = np.arange(24, w - 24, stride)
        y = np.arange(24, h - 24, stride)

        virtual_minutiae = []
        distFromBg = scipy.ndimage.morphology.distance_transform_edt(mask)
        for y_i in y:
            for x_i in x:
                if (distFromBg[y_i][x_i] <= 16):
                    continue
                ofY = int(y_i / 16)
                ofX = int(x_i / 16)

                ori = -dir_map_AEC[ofY][ofX]
                virtual_minutiae.append([x_i, y_i, ori])
                virtual_minutiae.append([x_i, y_i, math.pi + ori])
        virtual_minutiae = np.asarray(virtual_minutiae)

        texture_template = []
        if len(virtual_minutiae) > 3:
            virtual_des = descriptor.minutiae_descriptor_extraction(
                enh_contrast_img,
                virtual_minutiae,
                self.patch_types,
                self.des_models,
                self.patchIndexV,
                batch_size=128,
                patch_size=96)

            texture_template = template.TextureTemplate(
                h=h,
                w=w,
                minutiae=virtual_minutiae,
                des=virtual_des,
                mask=None)
            latent_template.add_texture_template(texture_template)

        end = timer()

        print('Time for texture template generation: %f' % (end - start))
        return latent_template, texture_template
Beispiel #19
0
def feature_extraction_single_latent(raw_img_file,
                                     AEC_img_file,
                                     mask_file,
                                     patch_types=None,
                                     des_models=None):

    raw_img = io.imread(raw_img_file)
    AEC_img = io.imread(AEC_img_file)
    mask = io.imread(mask_file)

    texture_img = preprocessing.FastCartoonTexture(raw_img,
                                                   sigma=2.5,
                                                   show=False)

    dir_map, fre_map, rec_img = get_maps.get_maps_STFT(AEC_img,
                                                       patch_size=64,
                                                       block_size=16,
                                                       preprocess=True)

    descriptor_img = filtering.gabor_filtering_pixel(texture_img,
                                                     dir_map + math.pi / 2,
                                                     fre_map,
                                                     mask=mask,
                                                     block_size=16,
                                                     angle_inc=3)

    bin_img = binarization.binarization(texture_img,
                                        dir_map,
                                        block_size=16,
                                        mask=mask)

    enhanced_img = filtering.gabor_filtering_block(bin_img,
                                                   dir_map + math.pi / 2,
                                                   fre_map,
                                                   patch_size=64,
                                                   block_size=16)
    enhanced_img = filtering.gabor_filtering_block(enhanced_img,
                                                   dir_map + math.pi / 2,
                                                   fre_map,
                                                   patch_size=64,
                                                   block_size=16)

    enhanced_AEC_img = filtering.gabor_filtering_block(AEC_img,
                                                       dir_map + math.pi / 2,
                                                       fre_map,
                                                       patch_size=64,
                                                       block_size=16)
    bin_img = binarization.binarization(enhanced_AEC_img,
                                        dir_map,
                                        block_size=16,
                                        mask=mask)

    bin_img2 = 1 - bin_img
    thin_img = skeletonize(bin_img2)

    mnt, thin_img2 = crossnumber.extract_minutiae(1 - thin_img,
                                                  mask=mask,
                                                  R=10)
    crossnumber.show_minutiae(thin_img, mnt)

    patchSize = 160
    oriNum = 64
    patchIndexV = descriptor.get_patch_index(patchSize,
                                             patchSize,
                                             oriNum,
                                             isMinu=1)
    if len(descriptor_img.shape) == 2:
        h, w = descriptor_img.shape
        ret = np.empty((h, w, 3), dtype=np.float)
        ret[:, :, :] = descriptor_img[:, :, np.newaxis]
        descriptor_img = ret

    if len(enhanced_AEC_img.shape) == 2:
        h, w = enhanced_AEC_img.shape
        ret = np.empty((h, w, 3), dtype=np.float)
        ret[:, :, :] = enhanced_AEC_img[:, :, np.newaxis]
        enhanced_AEC_img = ret

    des = descriptor.minutiae_descriptor_extraction(enhanced_AEC_img,
                                                    mnt,
                                                    patch_types,
                                                    des_models,
                                                    patchIndexV,
                                                    batch_size=128)

    h, w = mask.shape
    blkH, blkW = dir_map.shape
    minu_template = template.MinuTemplate(h=h,
                                          w=w,
                                          blkH=blkH,
                                          blkW=blkW,
                                          minutiae=mnt,
                                          des=des,
                                          oimg=dir_map,
                                          mask=mask)

    latent_template = template.Template()
    latent_template.add_minu_template(minu_template)

    print des
Beispiel #20
0
    def feature_extraction_single_rolled(self,
                                         img_file,
                                         output_dir=None,
                                         ppi=500):
        block_size = 16

        if not os.path.exists(img_file):
            return None
        img = io.imread(img_file, s_grey=True)
        if ppi != 500:
            img = cv2.resize(img, (0, 0), fx=500.0 / ppi, fy=500.0 / ppi)

        img = preprocessing.adjust_image_size(img, block_size)
        if len(img.shape) > 2:
            img = rgb2gray(img)
        h, w = img.shape
        start = timeit.default_timer()
        mask = get_maps.get_quality_map_intensity(img)
        stop = timeit.default_timer()
        print('time for cropping : %f' % (stop - start))
        start = timeit.default_timer()
        contrast_img = preprocessing.local_constrast_enhancement(img)
        texture_img = preprocessing.FastCartoonTexture(img,
                                                       sigma=2.5,
                                                       show=False)
        mnt = self.minu_model.run_whole_image(texture_img, minu_thr=0.15)
        stop = timeit.default_timer()
        print('time for minutiae : %f' % (stop - start))

        start = timeit.default_timer()
        des = descriptor.minutiae_descriptor_extraction(
            img,
            mnt,
            self.patch_types,
            self.des_models,
            self.patchIndexV,
            batch_size=256,
            patch_size=self.patch_size)
        stop = timeit.default_timer()
        print('time for descriptor : %f' % (stop - start))

        dir_map, _ = get_maps.get_maps_STFT(img,
                                            patch_size=64,
                                            block_size=block_size,
                                            preprocess=True)

        blkH = h // block_size
        blkW = w // block_size

        minu_template = template.MinuTemplate(h=h,
                                              w=w,
                                              blkH=blkH,
                                              blkW=blkW,
                                              minutiae=mnt,
                                              des=des,
                                              oimg=dir_map,
                                              mask=mask)

        rolled_template = template.Template()
        rolled_template.add_minu_template(minu_template)

        start = timeit.default_timer()
        # texture templates
        stride = 16

        x = np.arange(24, w - 24, stride)
        y = np.arange(24, h - 24, stride)

        virtual_minutiae = []
        distFromBg = scipy.ndimage.morphology.distance_transform_edt(mask)
        for y_i in y:
            for x_i in x:
                if (distFromBg[y_i][x_i] <= 24):
                    continue
                ofY = int(y_i / 16)
                ofX = int(x_i / 16)

                ori = -dir_map[ofY][ofX]
                virtual_minutiae.append([x_i, y_i, ori])
        virtual_minutiae = np.asarray(virtual_minutiae)

        if len(virtual_minutiae) > 1000:
            virtual_minutiae = virtual_minutiae[:1000]
        print len(virtual_minutiae)
        if len(virtual_minutiae) > 3:
            virtual_des = descriptor.minutiae_descriptor_extraction(
                contrast_img,
                virtual_minutiae,
                self.patch_types,
                self.des_models,
                self.patchIndexV,
                batch_size=128)
            texture_template = template.TextureTemplate(
                h=h,
                w=w,
                minutiae=virtual_minutiae,
                des=virtual_des,
                mask=mask)
            rolled_template.add_texture_template(texture_template)
        stop = timeit.default_timer()
        print('time for texture : %f' % (stop - start))
        return rolled_template