def main(): #K parameter for map@k k = 1 # Get images and denoise query set. print("Reading images...") qs = get_imgs("datasets/qsd1_w4") db = get_imgs("datasets/DDBB") """ Denoising methods "Gaussian" "Median" "bilateral" "FastNl" """ print("Denoising images...") #qs_denoised = [utils.denoise_image(img, method="FastNl") for img in tqdm(qs)] #Separating paitings inside images to separate images qs_split = [background_remover.remove_background(img) for img in qs] print("\nComputing histograms...") hogs_qs = [[utils.get_hog_histogram(painting) for painting in img] for img in qs_split] hogs_ddbb = utils.get_hog_histograms(db) print("\nComputing distances") distances = [] #Generating distances between qs images and db images for im in tqdm(hogs_qs): current_im = [] for painting_hog in im: current_pt = [] for db_hog in hogs_ddbb: current_pt.append(sum(np.abs(painting_hog - db_hog))) current_im.append(current_pt) distances.append(current_im) print("Done calculating hogs") #Generating predictions predictions = [] for im in distances: current_im = [] for painting_dst in im: current_im.append(utils.list_argsort(painting_dst)[:k]) predictions.append(current_im) #Remove nesting of lists hypo = [] for im in predictions: current_im = [] for painting in im: for pred in painting: current_im.append(pred) hypo.append(current_im) #Generate map@k gt = utils.get_pickle("datasets/qsd1_w4/gt_corresps.pkl") mapAtK = metrics.mapk(gt, hypo, k) print("\nMap@ " + str(k) + " is " + str(mapAtK))
def proc_and_copy_image (src, dest) : #print src img = cv2.imread(src) fore = 255 * np.ones([img.shape[0],img.shape[1]]) skinfore = 255 * np.ones([img.shape[0],img.shape[1]]) if args.remove_background : fore = remove_background(src) if args.remove_skin : skinfore = skin_detect(img) fore = cv2.bitwise_and(fore, fore, mask = skinfore) # the foreground mask b = img[:,:,0] g = img[:,:,1] r = img[:,:,2] img_merge = cv2.merge((b,g,r,fore)) dot_index = dest.rfind('.') dest = dest[:dot_index] + '.png' cv2.imwrite(dest,img_merge)
def proc_and_copy_image (src, dest) : """Process image from src and write the result to dest.""" if not args.remove_background and not args.remove_skin : shutil.copyfile(src, dest) return dest img = cv2.imread(src) fore = 255 * np.ones([img.shape[0],img.shape[1]]) skinfore = 255 * np.ones([img.shape[0],img.shape[1]]) if args.remove_background : fore = remove_background(src) if args.remove_skin : skinfore = skin_detect(img) fore = cv2.bitwise_and(fore, fore, mask = skinfore) # the foreground mask b = img[:,:,0] g = img[:,:,1] r = img[:,:,2] img_merge = cv2.merge((b,g,r,fore)) dot_index = dest.rfind('.') dest = dest[:dot_index] + '.png' cv2.imwrite(dest, img_merge) return dest
def detect(self, image_file_path) : img = cv2.imread(image_file_path) start_time = time.time() # generate the foreground background_removed = remove_background(image_file_path) # background removal skin_removed = skin_detect(img) # skin removal foreground = cv2.bitwise_and(background_removed, background_removed, mask = skin_removed) # the foreground mask skin_percentage = 1 - sum(map(sum, foreground))/1.0/(sum(map(sum, background_removed))+0.1) # In case of clothes in skin color. # print "%f skin detected." % skin_percentage if skin_percentage > self._max_skin_percentage: # print "Too much skin" foreground = background_removed # Find the nearest reference color for each pixel and count color_histogram = [0] * self._color_num image_foreground_pixel = 0 for i in range(len(foreground)): for j in range(len(foreground[0])): if foreground[i][j] != 255: continue image_foreground_pixel += 1 if self._color_table_builder.rgb_to_color[img[i][j][2], img[i][j][1], img[i][j][0]] < 0 : self._color_table_builder.reset_color(img[i][j][2], img[i][j][1], img[i][j][0]) color_index = int(self._color_table_builder.rgb_to_color[img[i][j][2], img[i][j][1], img[i][j][0]]) color_histogram[color_index] += 1 # Decay colors. for decay_color in self._decay_color_index : color_histogram[decay_color] *= self._decay_percentage max_color_count = max(color_histogram) return (color_histogram.index(max_color_count), color_histogram, max_color_count / (float(image_foreground_pixel)+0.1), foreground)
def main(): #K parameter for map@k k = 10 # Get images and denoise query set. print("Getting and denoising images...") qs = get_imgs("datasets/qst1_w4") db = get_imgs("datasets/DDBB") qs_denoised = [denoise_imgs(img) for img in tqdm(qs)] #Separating paitings inside images to separate images qs_split = [ background_remover.remove_background(img) for img in qs_denoised ] # Get masks without background and without text box of query sets. print("\nGetting text bounding box masks...") #Not needed since the above function already crops the background #qs_bck_masks = [get_mask_background(img) for img in tqdm(qs_denoised)] qs_txt_infos = [[get_text_bb_info(painting) for painting in img] for img in tqdm(qs_split)] qs_txt_masks = [[single.mask for single in qs_txt_info] for qs_txt_info in qs_txt_infos] for qs_mask in qs_txt_masks: for single_mask in qs_mask: single_mask[single_mask < 255] = 0 single_mask[single_mask > 255] = 255 qs_masks = [[single_mask.astype("uint8") for single_mask in qs_mask] for qs_mask in qs_txt_masks] # Detect and describe keypoints in images. print("\nDetecting and describing keypoints...") dt_type = cv.ORB_create() qs_kps = [[ detect_keypoints(dt_type, painting, painting_mask) for painting, painting_mask in zip(img, mask) ] for img, mask in zip(qs_split, qs_masks)] qs_dps = [[ describe_keypoints(dt_type, painting, painting_kp) for painting, painting_kp in zip(img, kp) ] for img, kp in zip(qs_split, qs_kps)] db_kps = [detect_keypoints(dt_type, img) for img in tqdm(db)] db_dps = [ describe_keypoints(dt_type, img, kp) for img, kp in tqdm(zip(db, db_kps)) ] # Match images print("\nMatching images...") class Match: def __init__(self, summed_dist, idx): self.summed_dist = summed_dist self.idx = idx tops = [] dists = [] # For all query images dst_thr = 35 for qs_dp in tqdm(qs_dps): # Get all descriptor matches between a query image and all database images. matches_s = [[ match_descriptions(qs_single_painting_dp, db_dp) for qs_single_painting_dp in qs_dp ] for db_dp in db_dps] # Evaluate quality of matches matches_s_ev = [[ evaluate_matches(painting_match) for painting_match in match ] for match in matches_s] # Sort for lowest matches_s_cl = [[ Match(painting_summed_dist, idx) for painting_summed_dist in summed_dist ] for idx, summed_dist in enumerate(matches_s_ev)] if len(qs_dp) > 1: p1 = [match[0] for match in matches_s_cl] p2 = [match[1] for match in matches_s_cl] p1 = sorted(p1, key=lambda x: x.summed_dist) p2 = sorted(p2, key=lambda x: x.summed_dist) sorted_list = [p1, p2] p1_tops = [matches.idx for matches in p1[0:k]] p1_dists = [matches.summed_dist for matches in p1[0:k]] p2_tops = [matches.idx for matches in p2[0:k]] p2_dists = [matches.summed_dist for matches in p2[0:k]] merged_tops = [] if p1_dists[0] > dst_thr: p2_tops.insert(0, -1) merged_tops = p2_tops elif p2_dists[0] > dst_thr: p1_tops.insert(1, -1) merged_tops = p1_tops else: for first_top, second_top in zip(p1_tops, p2_tops): merged_tops.append(first_top) merged_tops.append(second_top) tops.append(merged_tops) dists.append([p1_dists, p2_dists]) else: p1 = [match[0] for match in matches_s_cl] p1 = sorted(p1, key=lambda x: x.summed_dist) p1_tops = [matches.idx for matches in p1[0:k]] p1_dists = [matches.summed_dist for matches in p1[0:k]] if p1_dists[0] > dst_thr: p1_tops = [-1] tops.append(p1_tops) dists.append(p1_dists) #Removing results with too big of a distance comparing_with_ground_truth(tops, qs_txt_infos, k)