def task2(self, k=10): QS2 = HistDataset(self.QSD1_W3, method="combo", masking=False, bbox=True, multires=4, denoise=True, texture="LBP") GT = get_groundtruth("datasets/qsd1_w3/gt_corresps.pkl") print(f"Computing normalized histograms for {self.DDBB}") DB = list( tqdm( HistDataset(self.DDBB, masking=False, method="combo", multires=4, texture="LBP"))) print("Analyzing QS2") find_img_corresp(QS2, GT, DB, k)
def test_poor_read(self): d = HistDataset(cfg.IMAGE_FOLDER, cfg.LABEL_FOLDER, cfg.LABEL_2_ID) loader = DataLoader(d, batch_size=1, num_workers=4, collate_fn=lambda x: x[0]) sli = itertools.islice(loader, 11000, 12000) for sample in sli: img = sample.image
def task5(self): # Get text box pkl QS = [ text_removal.getpoints2(im) for im in text_removal.text_remover(self.QSD1_W2) ] boundingxys = [[element.boundingxy] for element in QS] with open("QSD1/text_boxes.pkl", "wb") as f: pickle.dump(boundingxys, f) # Get text box pngs TODO bbox QS1 = HistDataset("datasets/qsd1_w2", bbox=True, multires=2) predicted_masks = [QS1.get_mask(idx) for idx in range(len(QS1))] for i, img in enumerate(predicted_masks): filename = "QSD1/boxes/" + f"{i:05d}" + ".png" cv2.imwrite(filename, img) gt = np.asarray( get_groundtruth("datasets/qsd1_w2/text_boxes.pkl")).squeeze() mean_IoU = get_mean_IoU(gt, boundingxys) print(f"Mean IoU: {mean_IoU}")
def test_run(cfg): dataset = HistDataset(cfg['CLASS_TYPE'], get_path(cfg['TEST_FOLDER']), get_path(cfg['LABEL_FOLDER']), split=[0, 0, 100], label2id=cfg['LABEL_2_ID'], randomize=cfg['INIT_RANDOMIZE']) print("Prepping Dataset ...") test_loader = DataLoader(dataset.test, batch_size=cfg['BATCH_SIZE'], num_workers=cfg['NUM_WORKERS'], collate_fn=batchify) model = torch.load(cfg['MODEL_SAVE_FILE']) test_a_epoch("ultimate_test", test_loader, model, cfg['ULTI_RESULT_FILE'], cfg, 0)
def task6(self, k=10): QS = [ # noqa hists for hists in tqdm( MultiHistDataset(self.QSD2_W3, masking=True, bbox=True, multires=4, method="color", texture="LBP", denoise=True)) ] GT = get_pickle("datasets/qsd2_w3/gt_corresps.pkl") DB = list( tqdm( HistDataset(self.DDBB, masking=False, multires=4, method="color", texture="LBP"))) # noqa tops = find_multi_img_corresp_keep(QS, DB, k) dump_pickle("result_qst2.pkl", tops) mapAtK = metrics.mapk(GT, tops, k) print("Map@k is " + str(mapAtK)) exit() with open("outputs/resutls.pkl", "wb") as f: pickle.dump(tops, f) print(tops) # Generate pngs QS1 = Dataset(self.QSD2_W2, masking=True, bbox=True) for i in range(len(QS1)): im = QS1.get_mask(i) cv2.imwrite("outputs/" + str(i) + ".png", im) text_boxes = [BBox().get_bbox_cords(QS1[i]) for i in range(len(QS1))] with open("outputs/text_boxes.pkl", "wb") as f: pickle.dump(text_boxes, f)
def test_label_conversion(self): d = HistDataset(cfg.IMAGE_FOLDER, cfg.LABEL_FOLDER) loader = DataLoader(d, batch_size=1, num_workers=4, collate_fn=lambda x: x[0])
def test_collection(self): d = HistDataset().collection(cfg.IMAGE_FOLDER, cfg.LABEL_FOLDER) self.__test_folder(d) for s in d: self.__test_image_size(s)