def test_process_minutiae(self): # Tests minutiae creation output. path = '../../data/Fingerprints - Set A/101_1.tif' img = load_image(path, True) img = enhance_image(img, padding=5) minutiae = process_minutiae(img) self.assertIsNotNone(minutiae, msg='ERROR: Wrong output.')
def test_enhance_image(self): # Tests image enhancing output and data loss. # Revert gray colour levels. Match scale with the raw image for comparison. path = '../../data/Fingerprints - Set A/101_1.tif' # Image loading img = load_image(path, True) img = enhance_image(img, padding=5)
def trainData(self): """ Loads model on the given dataset. """ start = time.time() print(f'INFO: Loading model features. Model: {self.model.lower()}') if self.model.lower() == 'tree': for i in range(len(self.images)): # Extract minutiae. self.images[i].image_enhanced = enhance_image( self.images[i].image_raw, skeletonise=True) minutiae = process_minutiae(self.images[i].image_enhanced) # Confirmed point matching. self.images[i].profile = generate_tuple_profile(minutiae) # Rewriting to the loaded data. self.images[i].minutiae = minutiae elif self.model.lower() == 'orb': # Base data. print('INFO: Training skipped.') elif self.model.lower() == 'bf': for i in range(len(self.images)): # BFMatcher descriptors generation. self.images[i].image_enhanced = enhance_image( self.images[i].image_raw, skeletonise=False) points, descriptors = edge_processing( self.images[i].image_enhanced, threshold=self.threshold) # points, descriptors = minutiae_points(self.images[i].image_enhanced) self.images[i].descriptors = descriptors print( f'INFO: Training completed in {round(time.time() - start, 2)} sec')
from libs.basics import load_image, display_image from libs.minutiae import process_minutiae, generate_tuple_profile from libs.enhancing import enhance_image from libs.matching import match_tuples, build_edges, edge_matching import unittest path_base = '../../data/Fingerprints - Set A/101_2.tif' path_test = '../../data/Fingerprints - Set A/101_2.tif' img_base = load_image(path_base, True) img_base = enhance_image(img_base, padding=5) img_test = load_image(path_test, True) img_test = enhance_image(img_test, padding=5) # Confirmed point matching. TUPLE_BASE = generate_tuple_profile(process_minutiae(img_base)) TUPLE_TEST = generate_tuple_profile(process_minutiae(img_test)) class TestMatching(unittest.TestCase): def test_match_tuples(self): # Tests minutiae creation output. ccpb_base, ccpb_test = match_tuples(TUPLE_BASE, TUPLE_TEST) print('---') def test_edge_matching(self):
def matchFingerprint(self, image: np.array, verbose: bool = False, match_th: int = 33): """ The given image is compared against the loaded templates. A similarity score is computed and used to determine the most likely match, if any. """ if self.model.lower() == 'bf': # BFMatcher and MES scorings. scores = {} # Returns score, aims to minimise them for computing the best match. # Test descriptors. img = enhance_image(image, skeletonise=False) points, descriptors = edge_processing(img, threshold=self.threshold) for i in range(len(self.images)): # Matching. try: matches = match_edge_descriptors( self.images[i].descriptors, descriptors) except AttributeError: raise Exception( 'ERROR: Model not trained - run trainData first.') # Calculate score score = sum([match.distance for match in matches]) # Using mes (mean edge score) = sum(match distance) / len(matches) if len(matches) > 0: mes = score / len(matches) scores[self.images[i].img_id] = mes scores = sorted(scores.items(), key=operator.itemgetter(1)) # Display most likely match results = [{ 'img_id': s[0], 'score': round(s[1], 2), 'match': s[1] < match_th } for s in scores] matches = [m for m in results if m['match']] if len(matches) == 0: print( f'No match found. Most similar fingerprint is {results[:5]}' ) else: print(f'INFO: Matches found, score: {matches}') return scores elif self.model.lower() == 'orb': # Basic SIFT based ORB matcher. for i in self.images: sift_match(i.image_raw, image) elif self.model.lower() == 'tree': img_test = enhance_image(image, skeletonise=True) minutiae_test = process_minutiae(img_test) # Confirmed point matching. img_profile = generate_tuple_profile(minutiae_test) for i in range(len(self.images)): # Matching. common_points_base, common_points_test = match_tuples( self.images[i].profile, img_profile) if evaluate(common_points_base, self.images[i].minutiae, minutiae_test): print(f'Match with {self.images[i].img_id}') else: print(f'Not a match with {self.images[i].img_id}') elif self.model == 'cnn': pass else: print('INFO: Not implemented yet.')