print("[INFO] computing hashes for needles...") needle_paths = list(paths.list_images(args["needles"])) # Dictionary containing needle images and their correspondent haystack image path results = {} # Loop over the needle paths for p in needle_paths: # Load the image from disk image = cv2.imread(p) # If the image is None then we could not load it from disk (so skip it) if image is None: continue # Convert the image to grayscale and compute the hash image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) imageHash = dhash(image) # Grab all image paths that match the hash matched_paths = hash_dict.get(imageHash, []) # Save the results results[p] = matched_paths # Display results found print("[INFO] input images and their correspondents:") # Loop over each subdirectory and display it for key in results: print('Image: {}'.format(key)) if len(results[key]) > 0: for img in results[key]: print('-> {}'.format(img)) else: print('-> No similar images found.')
args = vars(ap.parse_args()) # Grab the paths to images print("[INFO] Computing hashes for images...") images_paths1 = list(paths.list_images(args["compare1"])) images_paths2 = list(paths.list_images(args["compare2"])) # Remove the `\` character from any filenames containing a space # (assuming you're executing the code on a Unix machine) if sys.platform != "win32": images_paths1 = [p.replace("\\", "") for p in images_paths1] images_paths2 = [p.replace("\\", "") for p in images_paths2] # Load the input query image and compute hash query_image = cv2.imread(args["query"]) query_hash = convert_hash(dhash(query_image)) query_image_resized = cv2.resize(query_image, (160, 160)) # Results results1 = {'hashing': [], 'resnet': []} results2 = {'hashing': [], 'resnet': []} # Loop over the images paths (first folder) for (i, p) in enumerate(images_paths1): # Load the image from disk print("[INFO] Folder #1: Processing image {}/{}".format( i + 1, len(images_paths1))) image = cv2.imread(p) # If the image is None then we could not load it from disk (so skip it) if image is None: continue
images_paths = [p.replace("\\", "") for p in images_paths] # Initialize the dictionary that will map the image hash to # corresponding image hash_dict = {} # Loop over the images paths for (i, p) in enumerate(images_paths): # Load the image from disk print("[INFO] processing image {}/{}".format(i + 1, len(images_paths))) image = cv2.imread(p) # If the image is None then we could not load it from disk (so skip it) if image is None: continue # Compute the hash h = convert_hash(dhash(image)) # Update the dictionary list_images = hash_dict.get(h, []) list_images.append(p) hash_dict[h] = list_images # Save dictionary to database hash_filename = database_path + ".dict.pickle" with open(hash_filename, "wb") as f: pickle.dump(hash_dict, f) print("[INFO] Hash data saved: " + hash_filename) print("[INFO] Generating VPTree...") tree = vptree.VPTree(list(hash_dict.keys()), hamming) tree_filename = database_path + ".tree.pickle"
default=10, help="Maximum hamming distance") args = vars(ap.parse_args()) # Load the VP-Tree and hashes dictionary print("[INFO] Loading VP-Tree and hashes...") tree = pickle.loads(open(args["data"] + ".tree.pickle", "rb").read()) hashes = pickle.loads(open(args["data"] + ".dict.pickle", "rb").read()) # Load the input query image image = cv2.imread(args["query"]) cv2.imshow("Query image", image) # Compute the hash for the query image, then convert it queryHash = convert_hash(dhash(image)) # Perform the search print("[INFO] Performing search...") start = time.time() results = tree.get_all_in_range(queryHash, args["distance"]) results = sorted(results) end = time.time() print("[INFO] Search took {} seconds".format(end - start)) # Loop over the results for (d, h) in results: # Grab all image paths in our dataset with the same hash result_paths = hashes.get(h, []) print("[INFO] {} total image(s) with d: {}, h: {}".format( len(result_paths), d, h))
'hashing': [], 'resnet': [] } }, { 'filename': args["query22"], 'input': cv2.imread(args["query22"]), 'results': { 'hashing': [], 'resnet': [] } }]] # Loop over queries for (i, p) in enumerate(queries): # Loop over the images paths (first folder) query_hash1 = convert_hash(dhash(p[0]['input'])) query_hash2 = convert_hash(dhash(p[1]['input'])) query_image1_resized = cv2.resize(p[0]['input'], (160, 160)) query_image2_resized = cv2.resize(p[1]['input'], (160, 160)) results1 = p[0]['results'] results2 = p[1]['results'] for (j, q) in enumerate(images_paths): # Load the image from disk print("[INFO] Query #{}: Processing image {}/{}".format( i + 1, j + 1, len(images_paths))) image = cv2.imread(q) # If the image is None then we could not load it from disk (so skip it) if image is None: continue # Compute the hash and difference hash h = convert_hash(dhash(image))