def feature_patches(img_files, minu_files, output_path): patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) n = 0 for minu_file, img_file in zip(minu_files, img_files): img = cv2.imread(img_file) #cv2.imshow("latent", latent_img) #cv2.imshow("rolled", rolled_img) minutiae = np.loadtxt(minu_file, dtype='f', delimiter=',') minutiae[:, 2] = minutiae[:, 2] - math.pi / 2 #show_features(latent_img, latent_minutiae, ROI=None, fname=None) minutiae[:, 2] = -minutiae[:, 2] patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=6) for j in range(len(patches)): fname = "%05d" % n + '.jpeg' n = n + 1 cv2.imwrite(output_path + fname, patches[j])
def __init__(self, patch_types=None, des_model_dirs=None, minu_model_dir=None): self.des_models = None self.patch_types = patch_types self.minu_model = None self.minu_model_dir = minu_model_dir self.des_model_dirs = des_model_dirs if self.minu_model_dir is not None: self.minu_model = (minutiae_AEC.ImportGraph(minu_model_dir)) self.dict, self.spacing, self.dict_all, self.dict_ori, self.dict_spacing = get_maps.construct_dictionary( ori_num=24) patchSize = 160 oriNum = 64 self.patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) if self.des_model_dirs is not None: self.des_models = [] for model_dir in self.des_model_dirs: self.des_models.append(descriptor.ImportGraph(model_dir)) self.patch_size = 96 # self.minu_models = [] # for model_dir in self.des_model_dirs: # self.minu_models.append(descriptor.ImportGraph(model_dir)) self.gabor_filters = filtering.get_gabor_filters()
def __init__(self,patch_types=None,des_model_dirs=None,minu_model_dir=None, enhancement_model_dir = None, ROI_model_dir=None): self.des_models = None self.patch_types = patch_types self.minu_model = None self.minu_model_dir = minu_model_dir self.des_model_dirs = des_model_dirs self.enhancement_model_dir = enhancement_model_dir self.ROI_model_dir = ROI_model_dir self.dict, self.spacing, self.dict_all, self.dict_ori, self.dict_spacing = get_maps.construct_dictionary(ori_num=60) if self.minu_model_dir is not None: self.minu_model = (minutiae_AEC.ImportGraph(minu_model_dir)) patchSize = 160 oriNum = 64 self.patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) if self.des_model_dirs is not None: self.des_models = [] for model_dir in self.des_model_dirs: self.des_models.append(descriptor.ImportGraph(des_model_dirs)) if self.enhancement_model_dir is not None: self.enhancement_model = enhancement_AEC.ImportGraph(enhancement_model_dir) if self.ROI_model_dir is not None: self.ROI_model = (RCNN.ImportGraph(ROI_model_dir))
def __init__(self, patch_types=None, des_model_dirs=None, minu_model_dir=None): self.des_models = None self.patch_types = patch_types self.minu_model = None self.minu_model_dir = minu_model_dir self.des_model_dirs = des_model_dirs print("Loading models, this may take some time...") if self.minu_model_dir is not None: print("Loading minutiae model: " + minu_model_dir) self.minu_model = (minutiae_AEC.ImportGraph(minu_model_dir)) self.dict, self.spacing, self.dict_all, self.dict_ori, self.dict_spacing = get_maps.construct_dictionary( ori_num=24) patchSize = 160 oriNum = 64 if des_model_dirs is not None and len(des_model_dirs) > 0: self.patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) if self.des_model_dirs is not None: self.des_models = [] for i, model_dir in enumerate(des_model_dirs): print("Loading descriptor model (" + str(i + 1) + " of " + str(len(des_model_dirs)) + "): " + model_dir) self.des_models.append( descriptor.ImportGraph(model_dir, input_name="inputs:0", output_name='embedding:0')) self.patch_size = 96
def __init__(self, patch_types=None, des_model_dirs=None, minu_model_dirs=None, enhancement_model_dir=None, ROI_model_dir=None, coarsenet_dir=None, FineNet_dir=None): self.des_models = None self.patch_types = patch_types self.minu_model = None self.minu_model_dirs = minu_model_dirs self.des_model_dirs = des_model_dirs self.enhancement_model_dir = enhancement_model_dir self.ROI_model_dir = ROI_model_dir self.dict, self.spacing, self.dict_all, self.dict_ori, self.dict_spacing = get_maps.construct_dictionary( ori_num=60) print("Loading models, this may take some time...") if self.minu_model_dirs is not None: self.minu_model = [] for i, minu_model_dir in enumerate(minu_model_dirs): print("Loading minutiae model (" + str(i + 1) + " of " + str(len(minu_model_dirs)) + "): " + minu_model_dir) self.minu_model.append( minutiae_AEC.ImportGraph(minu_model_dir)) self.coarsenet_dir = coarsenet_dir patchSize = 160 oriNum = 64 self.patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) if self.des_model_dirs is not None: self.des_models = [] for i, model_dir in enumerate(des_model_dirs): print("Loading descriptor model (" + str(i + 1) + " of " + str(len(des_model_dirs)) + "): " + model_dir) self.des_models.append( descriptor.ImportGraph(model_dir, input_name="inputs:0", output_name='embedding:0')) if self.enhancement_model_dir is not None: print("Loading enhancement model: " + self.enhancement_model_dir) self.enhancement_model = enhancement_AEC.ImportGraph( enhancement_model_dir) print("Finished loading models.")
def main_new_minutiae(args, patch_types=None, model_dirs=None, rolled_range=None): minutiae_path = args.minutiae_path img_path = args.img_path new_template_path = args.new_template_path mask_path = args.mask_path template_path = args.template_path if not os.path.exists(new_template_path): os.makedirs(new_template_path) # for latents isLatent = (args.image_type == 'latent') if isLatent: minutiae_files = [] for i in range(len(minutiae_path)): minutiae_files.append(glob.glob(minutiae_path[i] + '*.txt')) minutiae_files[-1].sort() img_files = glob.glob(img_path + '*.bmp') img_files.sort() mask_files = glob.glob(mask_path + '*.bmp') mask_files.sort() template_files = glob.glob(template_path + '*.dat') template_files.sort() else: template_files = [ str(i + 1) + '.dat' for i in range(rolled_range[0], rolled_range[1]) ] assert (len(minutiae_files) > 0) patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) assert (len(patch_types) == len(model_dirs)) models = [] for model_dir in model_dirs: models.append(ImportGraph(model_dir)) #for template in template.minu_template: batch_size = args.batch_size nrof_imgs = len(img_files) nrof_minutiae_set = len(minutiae_files) for print_ind in range(nrof_imgs): # minutiae templates img_file = img_files[print_ind] img = cv2.imread(img_file) # cv2.IMREAD_GRAYSCALE img = img.astype(float) #mask = cv2.imread(mask_files[print_ind], cv2.IMREAD_GRAYSCALE) template_file = template_files[print_ind] template = Template.Bin2Template_Byte(template_file, isLatent=isLatent) for n in range(nrof_minutiae_set): minutiae = np.loadtxt(minutiae_files[n][print_ind]) nrof_minutiae = len(minutiae) mask = template.minu_template[0].mask #show_minutiae_sets(img,[minutiae],ROI=None) #plt.imshow(patch, cmap='gray') #plt.show() #remove minutiae in the background h, w = mask.shape flag = np.ones((nrof_minutiae, ), dtype=bool) for i in range(nrof_minutiae): x = int(minutiae[i, 0]) y = int(minutiae[i, 1]) if y < 10 or x < 10 or x > w - 10 or y > h - 10: flag[i] = False elif np.sum(mask[y - 1:y + 2, x - 1:x + 2]) == 0: flag[i] = False minutiae = minutiae[flag, :] if len(minutiae) < 3: print(len(minutiae)) #show_minutiae_sets(img,[minutiae], ROI=None, fname=None, block=True) template.minu_template[n].des = [] template.minu_template[n].minutiae = minutiae for k, patch_type in enumerate(patch_types): embedding_size = models[k].embedding_size patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=patch_type) # for i in range(len(patches)): # patch = patches[i, :, :, 0] # plt.imshow(patch, cmap='gray') # plt.show() nrof_patches = len(patches) emb_array = np.zeros((nrof_patches, embedding_size)) nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size)) for i in range(nrof_batches): #print(i) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_patches) patches_batch = patches[start_index:end_index, :, :] emb_array[start_index:end_index, :] = models[k].run( patches_batch) for i in range(nrof_patches): norm = np.linalg.norm(emb_array[i, :]) + 0.0000001 emb_array[i, :] = emb_array[i, :] / norm template.minu_template[n].des.append(emb_array) for n, t in enumerate(template.texture_template): template.texture_template[n].minutiae = [] #minutiae = t.minutiae minutiae = None template.texture_template[n].des = [] continue fname = new_template_path + os.path.basename(template_file) Template.Template2Bin_Byte_TF(fname, template, isLatent=isLatent)
def main(args, patch_types=None, model_dirs=None, rolled_range=None): template_path = args.template_path img_path = args.img_path new_template_path = args.new_template_path if not os.path.exists(new_template_path): os.makedirs(new_template_path) # for latents isLatent = (args.image_type == 'latent') if isLatent: template_files = os.listdir(template_path) template_files.sort() else: template_files = [ str(i + 1) + '.dat' for i in range(rolled_range[0], rolled_range[1]) ] assert (len(template_files) > 0) patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) assert (len(patch_types) == len(model_dirs)) models = [] for model_dir in model_dirs: models.append(ImportGraph(model_dir)) #for template in template.minu_template: batch_size = args.batch_size for print_ind, file in enumerate(template_files): print(print_ind) template = Template.Bin2Template_Byte(template_path + file, isLatent=isLatent) if template is None: continue # minutiae templates img_file = img_path + file.split('.')[0] + '.bmp' img = cv2.imread(img_file) # cv2.IMREAD_GRAYSCALE img = img.astype(float) for n, t in enumerate(template.minu_template): minutiae = t.minutiae template.minu_template[n].des = [] for k, patch_type in enumerate(patch_types): embedding_size = models[k].embedding_size patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=patch_type) # for i in range(len(patches)): # patch = patches[i, :, :, 0] # plt.imshow(patch, cmap='gray') # plt.show() nrof_patches = len(patches) emb_array = np.zeros((nrof_patches, embedding_size)) nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size)) for i in range(nrof_batches): #print(i) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_patches) patches_batch = patches[start_index:end_index, :, :] emb_array[start_index:end_index, :] = models[k].run( patches_batch) for i in range(nrof_patches): norm = np.linalg.norm(emb_array[i, :]) + 0.0000001 emb_array[i, :] = emb_array[i, :] / norm template.minu_template[n].des.append(emb_array) for n, t in enumerate(template.texture_template): template.texture_template[n].minutiae = [] #minutiae = t.minutiae minutiae = None template.texture_template[n].des = [] continue for k, patch_type in enumerate(patch_types): embedding_size = models[k].embedding_size patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=patch_type) nrof_patches = len(patches) emb_array = np.zeros((nrof_patches, embedding_size)) nrof_batches = int(math.ceil(1.0 * nrof_patches / batch_size)) for i in range(nrof_batches): #print(i) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_patches) patches_batch = patches[start_index:end_index, :, :] emb_array[start_index:end_index, :] = models[k].run( patches_batch) for i in range(nrof_patches): norm = np.linalg.norm(emb_array[i, :]) + 0.0000001 emb_array[i, :] = emb_array[i, :] / norm #template.texture_template[n].des[patch_type] = emb_array template.texture_template[n].des.append(emb_array) fname = new_template_path + file Template.Template2Bin_Byte_TF(fname, template, isLatent=isLatent)
def main_single(args): template_path = args.template_path img_path = args.img_path new_template_path = args.new_template_path # for latents if args.image_type == 'latent': template_files = os.listdir(template_path) template_files.sort() isLatent = 1 assert (len(template_files) > 0) batch_size = args.batch_size patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) #for template in template.minu_template: # a = 1 batch_size = args.batch_size with tf.Graph().as_default(): with tf.Session() as sess: # Get the paths for the corresponding images # paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "batch_join:0") # embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") # images_placeholder = tf.get_default_graph4().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("Add:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on testing images') for file in template_files: template = Template.Bin2Template_Byte(template_path + file, isLatent=isLatent) # minutiae templates img_file = img_path + file.split('.')[0] + '.bmp' img = cv2.imread(img_file) # cv2.IMREAD_GRAYSCALE img = img.astype(float) for t in template.minu_template: minutiae = t.minutiae patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=6) nrof_patches = len(patches) emb_array = np.zeros((nrof_patches, embedding_size)) nrof_batches = int( math.ceil(1.0 * nrof_patches / batch_size)) for i in range(nrof_batches): print(i) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_patches) patches_batch = patches[start_index:end_index, :, :] feed_dict = { images_placeholder: patches_batch, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) for i in range(nrof_patches): norm = np.linalg.norm(emb_array[i, :]) + 0.0000001 emb_array[i, :] = emb_array[i, :] / norm print(i)
def feature_extraction_single_latent(raw_img_file, AEC_img_file, mask_file, patch_types=None, des_models=None): ### # input: # raw_img, original latent image # AEC_img, enhanced latent image by Autoencoder # mask: ROI # main idea: # 1) Use AEC_img to estimate ridge flow and ridge spacing # 2) use AEC_image and raw_img to extract two different minutiae set ### raw_img = io.imread(raw_img_file) AEC_img = io.imread(AEC_img_file) mask = io.imread(mask_file) #mask = mask_dilation(mask, block_size=16) texture_img = preprocessing.FastCartoonTexture(raw_img, sigma=2.5, show=False) dir_map, fre_map, rec_img = get_maps.get_maps_STFT(AEC_img, patch_size=64, block_size=16, preprocess=True) descriptor_img = filtering.gabor_filtering_pixel(texture_img, dir_map + math.pi / 2, fre_map, mask=mask, block_size=16, angle_inc=3) bin_img = binarization.binarization(texture_img, dir_map, block_size=16, mask=mask) enhanced_img = filtering.gabor_filtering_block(bin_img, dir_map + math.pi / 2, fre_map, patch_size=64, block_size=16) enhanced_img = filtering.gabor_filtering_block(enhanced_img, dir_map + math.pi / 2, fre_map, patch_size=64, block_size=16) # plt.subplot(131), plt.imshow(raw_img, cmap='gray') # plt.title('Input image'), plt.xticks([]), plt.yticks([]) # plt.subplot(132), plt.imshow(descriptor_img, cmap='gray') # plt.title('Feature image'), plt.xticks([]), plt.yticks([]) # # plt.subplot(133), plt.imshow(enhanced_img, cmap='gray') # plt.title('Feature image'), plt.xticks([]), plt.yticks([]) # plt.show(block=True) # plt.close() enhanced_AEC_img = filtering.gabor_filtering_block(AEC_img, dir_map + math.pi / 2, fre_map, patch_size=64, block_size=16) bin_img = binarization.binarization(enhanced_AEC_img, dir_map, block_size=16, mask=mask) # plt.imshow(AEC_img,cmap='gray') # plt.show() # plt.close() bin_img2 = 1 - bin_img thin_img = skeletonize(bin_img2) # thin_img2 = thin_img.astype(np.uint8) # thin_img2[thin_img2 > 0] = 255 mnt, thin_img2 = crossnumber.extract_minutiae(1 - thin_img, mask=mask, R=10) crossnumber.show_minutiae(thin_img, mnt) patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) if len(descriptor_img.shape) == 2: h, w = descriptor_img.shape ret = np.empty((h, w, 3), dtype=np.float) ret[:, :, :] = descriptor_img[:, :, np.newaxis] descriptor_img = ret if len(enhanced_AEC_img.shape) == 2: h, w = enhanced_AEC_img.shape ret = np.empty((h, w, 3), dtype=np.float) ret[:, :, :] = enhanced_AEC_img[:, :, np.newaxis] enhanced_AEC_img = ret des = descriptor.minutiae_descriptor_extraction(enhanced_AEC_img, mnt, patch_types, des_models, patchIndexV, batch_size=128) h, w = mask.shape blkH, blkW = dir_map.shape minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=mnt, des=des, oimg=dir_map, mask=mask) latent_template = template.Template() latent_template.add_minu_template(minu_template) print des
def feature_extraction(img_files, minu_files, model_dir, output_file, patch_type=6): patchSize = 160 oriNum = 64 patchIndexV = descriptor.get_patch_index(patchSize, patchSize, oriNum, isMinu=1) with tf.Graph().as_default(): with tf.Session() as sess: # Get the paths for the corresponding images # paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "batch_join:0") # embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") # images_placeholder = tf.get_default_graph4().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("Add:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] features = np.array([], dtype=np.float32).reshape(0, embedding_size) for minu_file, img_file in zip(minu_files, img_files): img = cv2.imread(img_file) #cv2.imshow("latent", latent_img) #cv2.imshow("rolled", rolled_img) minutiae = np.loadtxt(minu_file, dtype='f', delimiter=',') minutiae[:, 2] = minutiae[:, 2] - math.pi / 2 #show_features(latent_img, latent_minutiae, ROI=None, fname=None) #minutiae[:, 2] = -minutiae[:, 2] patches = descriptor.extract_patches(minutiae, img, patchIndexV, patch_type=patch_type) # for i in range(len(patches)): # patch = patches[i, :, :, 0] # plt.imshow(patch, cmap='gray') # plt.show() feed_dict = { images_placeholder: patches, phase_train_placeholder: False } latent_emb = sess.run(embeddings, feed_dict=feed_dict) features = np.vstack([features, latent_emb]) #cv2.imshow("latent patch", latent_patches[0]/255) #cv2.imshow("rolled patch", rolled_patches[0]/255) #cv2.waitKey(0) np.save(output_file, features)