def __call__(self, probs: Tensor, target: Tensor, bounds: Tensor) -> Tensor: assert simplex(probs) and simplex(target) and sset(target, [0, 1]) assert probs.shape == target.shape with torch.no_grad(): fake_mask: Tensor = torch.zeros_like(probs) for i in range(len(probs)): fake_mask[i] = self.pathak_generator(probs[i], target[i], bounds[i]) self.holder_size = fake_mask[i].sum() return super().__call__(probs, fake_mask, bounds)
def __call__(self, box_targets: Tensor) -> List[Tuple[Tensor, Tensor]]: K: int W: int H: int K, W, H = box_targets.shape assert sset(box_targets, [0, 1]) # Because computing the boxes on the background class, then discarding it, would destroy the memory boxes_per_class: List[List[BoxCoords]] boxes_per_class = [binary2boxcoords(box_targets[k]) if k in self.idc else [] for k in range(K)] res: List[Tuple[Tensor, Tensor]] res = [boxcoords2masks_bounds(boxes, (W, H), self.d) for boxes in boxes_per_class] # Some images won't have any bounding box, so their length can be > 0 # But make sure that the background classes do not have any result assert all(res[k][0].shape[0] == 0 if (k not in self.idc) else True for k in range(K)) return res
def runInference(args: argparse.Namespace, pred_folder: str): # print('>>> Loading the data') device = torch.device("cuda") if torch.cuda.is_available( ) and not args.cpu else torch.device("cpu") C: int = args.num_classes # Let's just reuse some code png_transform = transforms.Compose([ lambda img: np.array(img)[np.newaxis, ...], lambda nd: nd / 255, # max <= 1 lambda nd: torch.tensor(nd, dtype=torch.float32) ]) gt_transform = transforms.Compose([ lambda img: np.array(img)[np.newaxis, ...], lambda nd: torch.tensor(nd, dtype=torch.int64), partial(class2one_hot, C=C), itemgetter(0) ]) bounds_gen = [(lambda *a: torch.zeros(C, 1, 2)) for _ in range(2)] folders: List[Path] = [ Path(pred_folder), Path(pred_folder), Path(args.gt_folder) ] # First one is dummy names: List[str] = map_(lambda p: str(p.name), folders[0].glob("*.png")) are_hots = [False, True, True] dt_set = SliceDataset( names, folders, transforms=[png_transform, gt_transform, gt_transform], debug=False, C=C, are_hots=are_hots, in_memory=False, bounds_generators=bounds_gen) sampler = PatientSampler(dt_set, args.grp_regex) loader = DataLoader(dt_set, batch_sampler=sampler, num_workers=11) # print('>>> Computing the metrics') total_iteration, total_images = len(loader), len(loader.dataset) metrics = { "all_dices": torch.zeros((total_images, C), dtype=torch.float64, device=device), "batch_dices": torch.zeros((total_iteration, C), dtype=torch.float64, device=device), "sizes": torch.zeros((total_images, 1), dtype=torch.float64, device=device) } desc = f">> Computing" tq_iter = tqdm_(enumerate(loader), total=total_iteration, desc=desc) done: int = 0 for j, (filenames, _, pred, gt, _) in tq_iter: B = len(pred) pred = pred.to(device) gt = gt.to(device) assert simplex(pred) and sset(pred, [0, 1]) assert simplex(gt) and sset(gt, [0, 1]) dices: Tensor = dice_coef(pred, gt) b_dices: Tensor = dice_batch(pred, gt) assert dices.shape == (B, C) assert b_dices.shape == (C, ), b_dices.shape sm_slice = slice(done, done + B) # Values only for current batch metrics["all_dices"][sm_slice, ...] = dices metrics["sizes"][sm_slice, :] = torch.einsum("bwh->b", gt[:, 1, ...])[..., None] metrics["batch_dices"][j] = b_dices done += B print(f">>> {pred_folder}") for key, v in metrics.items(): print(key, map_("{:.4f}".format, v.mean(dim=0)))
def runInference(args: argparse.Namespace): # print('>>> Loading the data') # device = torch.device("cuda") if torch.cuda.is_available() and not args.cpu else torch.device("cpu") device = torch.device("cpu") C: int = args.num_classes # Let's just reuse some code png_transform = transforms.Compose([ lambda img: np.array(img)[np.newaxis, ...], lambda nd: nd / 255, # max <= 1 lambda nd: torch.tensor(nd, dtype=torch.float32) ]) gt_transform = transforms.Compose([ lambda img: np.array(img)[np.newaxis, ...], lambda nd: torch.tensor(nd, dtype=torch.int64), partial(class2one_hot, C=C), itemgetter(0) ]) bounds_gen = [(lambda *a: torch.zeros(C, 1, 2)) for _ in range(2)] metrics = None pred_folders = sorted(list(Path(args.pred_root).glob('iter*'))) assert len(pred_folders) == args.epochs, (len(pred_folders), args.epochs) for epoch, pred_folder in enumerate(pred_folders): if args.do_only and epoch not in args.do_only: continue # First one is dummy: folders: List[Path] = [Path(pred_folder, 'val'), Path(pred_folder, 'val'), Path(args.gt_folder)] names: List[str] = map_(lambda p: str(p.name), folders[0].glob("*.png")) are_hots = [False, True, True] # spacing_dict = pickle.load(open(Path(args.gt_folder, "..", "spacing.pkl"), 'rb')) spacing_dict = None dt_set = SliceDataset(names, folders, transforms=[png_transform, gt_transform, gt_transform], debug=False, C=C, are_hots=are_hots, in_memory=False, spacing_dict=spacing_dict, bounds_generators=bounds_gen, quiet=True) loader = DataLoader(dt_set, num_workers=2) # print('>>> Computing the metrics') total_iteration, total_images = len(loader), len(loader.dataset) if not metrics: metrics = {"all_dices": torch.zeros((args.epochs, total_images, C), dtype=torch.float64, device=device), "hausdorff": torch.zeros((args.epochs, total_images, C), dtype=torch.float64, device=device)} desc = f">> Computing" tq_iter = tqdm_(enumerate(loader), total=total_iteration, desc=desc) done: int = 0 for j, (filenames, _, pred, gt, _) in tq_iter: B = len(pred) pred = pred.to(device) gt = gt.to(device) assert simplex(pred) and sset(pred, [0, 1]) assert simplex(gt) and sset(gt, [0, 1]) dices: Tensor = dice_coef(pred, gt) assert dices.shape == (B, C) haussdorf_res: Tensor = haussdorf(pred, gt) assert haussdorf_res.shape == (B, C) sm_slice = slice(done, done + B) # Values only for current batch metrics["all_dices"][epoch, sm_slice, ...] = dices metrics["hausdorff"][epoch, sm_slice, ...] = haussdorf_res done += B for key, v in metrics.items(): print(epoch, key, map_("{:.4f}".format, v[epoch].mean(dim=0))) if metrics: savedir: Path = Path(args.save_folder) for k, e in metrics.items(): np.save(Path(savedir, f"{k}.npy"), e.cpu().numpy())