def __getitem__(self, index): section_name = self.sections[index] direction, number = section_name.split(sep="_") if direction == "i": im = self.seismic[int(number), :, :, :] lbl = self.labels[int(number), :, :] elif direction == "x": im = self.seismic[:, :, int(number), :] lbl = self.labels[:, int(number), :] im = np.swapaxes(im, 0, 1) # From WCH to CWH im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) # dump images before augmentation if self.debug: outdir = f"debug/testSectionLoaderWithDepth_{self.split}_raw" generate_path(outdir) # this needs to take the first dimension of image (no depth) but lbl only has 1 dim path_prefix = f"{outdir}/index_{index}_section_{section_name}" image_to_disk(im[0, :, :], path_prefix + "_img.png") mask_to_disk(lbl, path_prefix + "_lbl.png", self.n_classes) if self.augmentations is not None: im = _transform_CHW_to_HWC(im) augmented_dict = self.augmentations(image=im, mask=lbl) im, lbl = augmented_dict["image"], augmented_dict["mask"] im = _transform_HWC_to_CHW(im) if self.is_transform: im, lbl = self.transform(im, lbl) # dump images and labels to disk after augmentation if self.debug: outdir = ( f"debug/testSectionLoaderWithDepth_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}" ) generate_path(outdir) path_prefix = f"{outdir}/index_{index}_section_{section_name}" image_to_disk(np.array(im[0, :, :]), path_prefix + "_img.png") mask_to_disk(np.array(lbl[0, :, :]), path_prefix + "_lbl.png", self.n_classes) return im, lbl
def __getitem__(self, index): patch_name = self.patches[index] direction, idx, xdx, ddx = patch_name.split(sep="_") # Shift offsets the padding that is added in training # shift = self.patch_size if "test" not in self.split else 0 # Remember we are cancelling the shift since we no longer pad shift = 0 idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift if direction == "i": im = self.seismic[idx, :, xdx:xdx + self.patch_size, ddx:ddx + self.patch_size] lbl = self.labels[idx, xdx:xdx + self.patch_size, ddx:ddx + self.patch_size] elif direction == "x": im = self.seismic[idx:idx + self.patch_size, :, xdx, ddx:ddx + self.patch_size] lbl = self.labels[idx:idx + self.patch_size, xdx, ddx:ddx + self.patch_size] im = np.swapaxes(im, 0, 1) # From WCH to CWH im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) if self.augmentations is not None: im = _transform_CHW_to_HWC(im) augmented_dict = self.augmentations(image=im, mask=lbl) im, lbl = augmented_dict["image"], augmented_dict["mask"] im = _transform_HWC_to_CHW(im) # dump images and labels to disk if self.debug: outdir = f"patchLoaderWithSectionDepth_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}" generate_path(outdir) image_to_disk(im[0, :, :], f"{outdir}/{index}_img.png") mask_to_disk(lbl, f"{outdir}/{index}_lbl.png") if self.is_transform: im, lbl = self.transform(im, lbl) return im, lbl
def __getitem__(self, index): patch_name = self.patches[index] direction, idx, xdx, ddx = patch_name.split(sep="_") # Shift offsets the padding that is added in training # shift = self.patch_size if "test" not in self.split else 0 # Remember we are cancelling the shift since we no longer pad shift = 0 idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift if direction == "i": im = self.seismic[idx, xdx:xdx + self.patch_size, ddx:ddx + self.patch_size] lbl = self.labels[idx, xdx:xdx + self.patch_size, ddx:ddx + self.patch_size] elif direction == "x": im = self.seismic[idx:idx + self.patch_size, xdx, ddx:ddx + self.patch_size] lbl = self.labels[idx:idx + self.patch_size, xdx, ddx:ddx + self.patch_size] im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) # dump raw images before augmentation if self.debug: outdir = f"debug/patchLoader_{self.split}_raw" generate_path(outdir) path_prefix = f"{outdir}/index_{index}_section_{patch_name}" image_to_disk(im, path_prefix + "_img.png") mask_to_disk(lbl, path_prefix + "_lbl.png", self.n_classes) if self.augmentations is not None: augmented_dict = self.augmentations(image=im, mask=lbl) im, lbl = augmented_dict["image"], augmented_dict["mask"] # dump images and labels to disk if self.debug: outdir = f"patchLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}" generate_path(outdir) path_prefix = f"{outdir}/{index}" image_to_disk(im, path_prefix + "_img.png") mask_to_disk(lbl, path_prefix + "_lbl.png", self.n_classes) if self.is_transform: im, lbl = self.transform(im, lbl) # dump images and labels to disk if self.debug: outdir = f"debug/patchLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}" generate_path(outdir) path_prefix = f"{outdir}/index_{index}_section_{patch_name}" image_to_disk(np.array(im[0, :, :]), path_prefix + "_img.png") mask_to_disk(np.array(lbl[0, :, :]), path_prefix + "_lbl.png", self.n_classes) return im, lbl
def __getitem__(self, index): section_name = self.sections[index] direction, number = section_name.split(sep="_") if direction == "i": im = self.seismic[int(number), :, :] lbl = self.labels[int(number), :, :] elif direction == "x": im = self.seismic[:, int(number), :] lbl = self.labels[:, int(number), :] im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) if self.debug and "test" in self.split: outdir = f"debug/sectionLoader_{self.split}_raw" generate_path(outdir) path_prefix = f"{outdir}/index_{index}_section_{section_name}" image_to_disk(im, path_prefix + "_img.png") mask_to_disk(lbl, path_prefix + "_lbl.png", self.n_classes) if self.augmentations is not None: augmented_dict = self.augmentations(image=im, mask=lbl) im, lbl = augmented_dict["image"], augmented_dict["mask"] if self.is_transform: im, lbl = self.transform(im, lbl) if self.debug and "test" in self.split: outdir = f"debug/sectionLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}" generate_path(outdir) path_prefix = f"{outdir}/index_{index}_section_{section_name}" image_to_disk(np.array(im[0]), path_prefix + "_img.png") mask_to_disk(np.array(lbl[0]), path_prefix + "_lbl.png", self.n_classes) return im, lbl
def _evaluate_split( split, section_aug, model, pre_processing, output_processing, device, running_metrics_overall, config, data_flow, debug=False, ): logger = logging.getLogger(__name__) TestSectionLoader = get_test_loader(config) test_set = TestSectionLoader( config, split=split, is_transform=True, augmentations=section_aug, debug=debug, ) n_classes = test_set.n_classes if debug: data_flow[split] = dict() data_flow[split]["test_section_loader_length"] = len(test_set) data_flow[split]["test_input_shape"] = test_set.seismic.shape data_flow[split]["test_label_shape"] = test_set.labels.shape data_flow[split]["n_classes"] = n_classes test_loader = data.DataLoader(test_set, batch_size=1, num_workers=config.WORKERS, shuffle=False) if debug: data_flow[split]["test_loader_length"] = len(test_loader) logger.info("Running in Debug/Test mode") take_n = 2 test_loader = take(take_n, test_loader) data_flow[split]["take_n_sections"] = take_n pred_list, gt_list, img_list = [], [], [] try: output_dir = generate_path( f"{config.OUTPUT_DIR}/test/{split}", git_branch(), git_hash(), config.MODEL.NAME, current_datetime(), ) except: output_dir = generate_path( f"{config.OUTPUT_DIR}/test/{split}", config.MODEL.NAME, current_datetime(), ) running_metrics_split = runningScore(n_classes) # evaluation mode: with torch.no_grad(): # operations inside don't track history model.eval() for i, (images, labels) in enumerate(test_loader): logger.info(f"split: {split}, section: {i}") outputs = _patch_label_2d( model, images, pre_processing, output_processing, config.TRAIN.PATCH_SIZE, config.TEST.TEST_STRIDE, config.VALIDATION.BATCH_SIZE_PER_GPU, device, n_classes, split, debug, config.DATASET.MIN, config.DATASET.MAX, ) pred = outputs.detach().max(1)[1].numpy() gt = labels.numpy() if debug: pred_list.append((pred.shape, len(np.unique(pred)))) gt_list.append((gt.shape, len(np.unique(gt)))) img_list.append(images.numpy().shape) running_metrics_split.update(gt, pred) running_metrics_overall.update(gt, pred) # dump images to disk for review mask_to_disk(pred.squeeze(), os.path.join(output_dir, f"{i}_pred.png"), n_classes) mask_to_disk(gt.squeeze(), os.path.join(output_dir, f"{i}_gt.png"), n_classes) if debug: data_flow[split]["pred_shape"] = pred_list data_flow[split]["gt_shape"] = gt_list data_flow[split]["img_shape"] = img_list # get scores score, class_iou = running_metrics_split.get_scores() # Log split results logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') if debug: for cdx in range(n_classes): logger.info( f' Class_{cdx}_accuracy {score["Class Accuracy: "][cdx]:.3f}') else: for cdx, class_name in enumerate(_CLASS_NAMES): logger.info( f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}' ) logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') running_metrics_split.reset()
def _patch_label_2d( model, img, pre_processing, output_processing, patch_size, stride, batch_size, device, num_classes, split, debug, MIN, MAX, ): """Processes a whole section """ img = torch.squeeze(img) h, w = img.shape[-2], img.shape[-1] # height and width # Pad image with patch_size/2: ps = int(np.floor(patch_size / 2)) # pad size img_p = F.pad(img, pad=(ps, ps, ps, ps), mode="constant", value=0) output_p = torch.zeros([1, num_classes, h + 2 * ps, w + 2 * ps]) # generate output: for batch_indexes in _generate_batches(h, w, ps, patch_size, stride, batch_size=batch_size): batch = torch.stack( [ pipe( img_p, _extract_patch(hdx, wdx, ps, patch_size), pre_processing, ) for hdx, wdx in batch_indexes ], dim=0, ) model_output = model(batch.to(device)) for (hdx, wdx), output in zip(batch_indexes, model_output.detach().cpu()): output = output_processing(output) output_p[:, :, hdx + ps:hdx + ps + patch_size, wdx + ps:wdx + ps + patch_size, ] += output # dump the data right before it's being put into the model and after scoring if debug: outdir = f"debug/test/batch_{split}" generate_path(outdir) for i in range(batch.shape[0]): path_prefix = f"{outdir}/{batch_indexes[i][0]}_{batch_indexes[i][1]}" model_output = model_output.detach().cpu() # save image: image_to_disk(np.array(batch[i, 0, :, :]), path_prefix + "_img.png", MIN, MAX) # dump model prediction: mask_to_disk(model_output[i, :, :, :].argmax(dim=0).numpy(), path_prefix + "_pred.png", num_classes) # dump model confidence values for nclass in range(num_classes): image_to_disk(model_output[i, nclass, :, :].numpy(), path_prefix + f"_class_{nclass}_conf.png", MIN, MAX) # crop the output_p in the middle output = output_p[:, :, ps:-ps, ps:-ps] return output
def _evaluate_split( split, section_aug, model, pre_processing, output_processing, device, running_metrics_overall, config, debug=False, ): logger = logging.getLogger(__name__) TestSectionLoader = get_test_loader(config) test_set = TestSectionLoader( config.DATASET.ROOT, config.DATASET.NUM_CLASSES, split=split, is_transform=True, augmentations=section_aug, debug=debug, ) n_classes = test_set.n_classes test_loader = data.DataLoader(test_set, batch_size=1, num_workers=config.WORKERS, shuffle=False) if debug: logger.info("Running in Debug/Test mode") test_loader = take(2, test_loader) try: output_dir = generate_path( f"debug/{config.OUTPUT_DIR}_test_{split}", git_branch(), git_hash(), config.MODEL.NAME, current_datetime(), ) except: output_dir = generate_path( f"debug/{config.OUTPUT_DIR}_test_{split}", config.MODEL.NAME, current_datetime(), ) running_metrics_split = runningScore(n_classes) # evaluation mode: with torch.no_grad(): # operations inside don't track history model.eval() total_iteration = 0 for i, (images, labels) in enumerate(test_loader): logger.info(f"split: {split}, section: {i}") total_iteration = total_iteration + 1 outputs = _patch_label_2d( model, images, pre_processing, output_processing, config.TRAIN.PATCH_SIZE, config.TEST.TEST_STRIDE, config.VALIDATION.BATCH_SIZE_PER_GPU, device, n_classes, split, debug, ) pred = outputs.detach().max(1)[1].numpy() gt = labels.numpy() running_metrics_split.update(gt, pred) running_metrics_overall.update(gt, pred) # dump images to disk for review mask_to_disk(pred.squeeze(), os.path.join(output_dir, f"{i}_pred.png"), n_classes) mask_to_disk(gt.squeeze(), os.path.join(output_dir, f"{i}_gt.png"), n_classes) # get scores score, class_iou = running_metrics_split.get_scores() # Log split results logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') if debug: for cdx in range(n_classes): logger.info( f' Class_{cdx}_accuracy {score["Class Accuracy: "][cdx]:.3f}') else: for cdx, class_name in enumerate(_CLASS_NAMES): logger.info( f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}' ) logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') running_metrics_split.reset()