def calc_metric(candidate_img_blurred: np.ndarray, candidate_kpts: np.ndarray, candidate_length: int, url_img_blurred: np.ndarray, url_kpts: np.ndarray, url_length: int, kpt_pos: tuple) -> float: candidate_kpts, url_kpts = serial.deserialize_keypoints(candidate_kpts), serial.deserialize_keypoints(url_kpts) m, n = kpt_pos[0], kpt_pos[1] #print((m,n)) if(m == -1): return 1 candidate_img_blurred, url_img_blurred = image.adjust_size(candidate_img_blurred, url_img_blurred) c_x, c_y = candidate_img_blurred.shape u_x, u_y = url_img_blurred.shape x, y = np.array(candidate_kpts[n].pt) - np.array(url_kpts[m].pt) if(x + c_x < 0 or x + u_x < 0): return 1 if(y + c_y < 0 or y + u_y < 0): return 1 candidate_img_blurred = image.align_text(candidate_img_blurred, (int(x), int(y))) candidate_img_blurred, url_img_blurred = candidate_img_blurred.astype(int), url_img_blurred.astype(int) img_diff = abs(candidate_img_blurred - url_img_blurred) img_diff = img_diff.astype(int) divisor = max(candidate_img_blurred.size, url_img_blurred.size) diff = len(np.where(img_diff > 10)[0]) / float(divisor) return diff penalty = abs((float(candidate_length) - url_length)) / max(candidate_length, url_length) diff = diff / (1.0 - penalty * 10) return abs(diff)
def get_best_match(matches, candidate_kpts, domain_kpts): d_accu = float('Inf') best = (-1, -1) candidate_kpts, domain_kpts = serial.deserialize_keypoints(candidate_kpts), serial.deserialize_keypoints(domain_kpts) for match in matches: n, m = match[0], match[1] c_x_y = candidate_kpts[m].pt d_x_y = domain_kpts[n].pt d = abs(sum(np.array(c_x_y) - np.array(d_x_y))) if(d < d_accu): d_accu = d best = match return best