Beispiel #1
0
def test_load_volume():
    path = "../datasets/original/train/ct/volume-0.nii"
    vol_np = utils.load_volume(path)

    # Type
    msg = "Problem in utils.load_volume: Return type is {} and should be {}."
    assert isinstance(vol_np, np.ndarray), msg.format(type(vol_np), np.ndarray)
Beispiel #2
0
    def view_volume(self, idx, focus):
        """
        Function to view one or multiple volumes simultaniously.

            Arguments
            --------
                idx : int
                    Index of volume to view. Assumes there exists

        """
        ## Get volumes list based on path and index.
        assert isinstance(idx, int), "idx must be an integer."
        assert focus in self._focus_options, f"focus must be among: {self._focus_options}."
        paths = self.get_paths(idx, focus)
        volslist = [load_volume(p) for p in paths.values()]
        names = list(paths.keys())

        self._remove_keymap_conflicts({'j', 'k', 's'})

        fig, ax = plt.subplots(ncols=len(volslist))
        for i in range(len(volslist)):
            print(volslist[i].shape)
            ax[i].volume = volslist[i]
            ax[i].index = volslist[i].shape[0] // 2
            ax[i].imshow(volslist[i][ax[i].index], cmap='magma')
            ax[i].set_title(names[i])
        fig.canvas.mpl_connect('key_press_event', self._process_key)
        plt.subplots_adjust(hspace=0.1, wspace=0.1)
        plt.show()
Beispiel #3
0
def main():
    args = parse_args()

    vol = load_volume(args.input, stop=args.depth, ext='.bmp')

    with h5py.File('downsampled_histogram.h5', 'r') as f:
        hist = f['hist'][:]
    hist = np.cumsum(hist)
    vol = np.floor(255 * hist[vol])
    vol[vol > 255] = 255
    vol[vol < 0] = 0
    vol = vol.astype(np.uint8)

    if not os.path.isdir(args.output):
        os.makedirs(args.output)

    sums, counts = summate(integral_image(vol), np.asarray(args.window))

    for t in np.arange(0, 1, 0.01):
        seg = threshold_bradley_nd(vol,
                                   t=t,
                                   s=np.asarray(args.window),
                                   sums=sums,
                                   counts=counts)
        dirname = os.path.join(args.output, '{0:.2f}'.format(t))
        save_imgs(seg * 255, dirname)
Beispiel #4
0
def test_store():
    path1 = "../datasets/preprocessed_2d/train/slices/slice_00001.npy"
    path2 = "../datasets/preprocessed_2d/train/slices/slice_00002.npy"
    slice1 = torch.tensor(np.load(path1)).unsqueeze(dim=0)
    slice2 = torch.tensor(np.load(path2)).unsqueeze(dim=0)
    batch1 = torch.cat([slice1, slice2], dim=0)

    path1 = "../datasets/original/train/ct/volume-0.nii"
    path2 = "../datasets/original/train/ct/volume-1.nii"
    vol1 = torch.tensor(utils.load_volume(path1))[None, 0:20]
    vol2 = torch.tensor(utils.load_volume(path2))[None, 0:20]
    batch2 = torch.cat([vol1, vol2], dim=0)

    dst = "deleteme/"
    if not os.path.exists(dst):
        os.mkdir(dst)
    utils.store(batch1, dst, format='npy')
    utils.store(batch1, dst, format='nii')
    utils.store(batch2, dst, format='npy')
    utils.store(batch2, dst, format='nii')
    shutil.rmtree(dst)
Beispiel #5
0
def main():
    args = parse_args()

    start = time.time()

    print("Loading image volume")
    vol = load_volume(args.input)

    if args.show:
        plt.imshow(vol[0], cmap='Greys_r')
        plt.show()

    print("Prepping threshold subvolume shape")
    if len(args.shape) < len(vol.shape):
        shape = list(vol.shape[:len(vol.shape) - len(args.shape)])
        shape.extend(args.shape)
    else:
        shape = args.shape

    if len(args.step) < len(shape):
        step = list(vol.shape[:len(vol.shape) - len(args.step)])
        step.extend(args.step)
    else:
        step = args.step

    print("Thresholding subvolumes")
    step = args.step

    thresh = np.zeros(vol.shape)
    for i in range(0, vol.shape[0], step[0] if step else vol.shape[0]):
        endi = i + shape[0] if i + shape[0] < vol.shape[0] else vol.shape[0]
        for j in range(0, vol.shape[1], step[1] if step else vol.shape[1]):
            endj = j + shape[1] if j + shape[1] < vol.shape[1] else vol.shape[1]
            for k in range(0, vol.shape[2], step[2] if step else vol.shape[2]):
                endk = k + shape[2] if k + shape[2] < vol.shape[
                    2] else vol.shape[2]
                subvol = np.copy(vol[i:endi, j:endj, k:endk])
                subvol = threshold_bradley_nd(subvol,
                                              s=(4, shape[1], shape[2]),
                                              t=args.threshold)
                subvol = np.abs(1 - subvol) if np.max(subvol) > 0 else subvol
                subvol = binary_opening(subvol)
                subvol = binary_fill_holes(subvol)
                subvol[subvol > 0] = 1
                thresh[i:endi, j:endj, k:endk] += subvol

    if args.show:
        plt.imshow(thresh[0], cmap='Greys_r')
        plt.show()
    thresh[thresh > 0] = 255
    print("Saving segmentation")
    save_imgs(thresh, args.output)
    print("Running Time: {}s".format(time.time() - start))
Beispiel #6
0
    def __getitem__(self, idx):
        """
            Returns
            -------
                sample : dict
                    'vol': torch.tensor of input volumetric image
                    'lab': torch.tensor of input volumetric image
                idx : int
                    Index of image as stored in files.
        """
        vol = load_volume(os.path.join(self.volpath, self.volnames[idx]))
        vol = vol[None, ...]
        lab = load_volume(os.path.join(self.labpath, self.labnames[idx]))

        vol = torch.tensor(vol, dtype=torch.float32)
        lab = torch.tensor(lab, dtype=torch.float32)

        sample = {'vol': vol, 'lab': lab}

        if self.transform:
            self.transform(sample)
        return sample
def calculate_metrics(prediction_volume,
                      gt_path,
                      classification_threshold=0.5):
    """helper function for metric calculation"""
    if gt_path is not None:
        logging.info(
            "calculating performance metrics against test dataset {}".format(
                str(gt_path)))
        ground_truth = load_volume(gt_path)
        metrics = PerformanceMetrics(
            y_true=ground_truth,
            y_pred=prediction_volume[:, :, :, 0],
            thr=classification_threshold,
        )
        performance_metrics_dict = metrics.measure_dict
    else:
        performance_metrics_dict = None

    return performance_metrics_dict
Beispiel #8
0
def main():
    args = parse_args()
    images = args.input
    vol = load_volume(images, ext='.bmp')

    with h5py.File('downsampled_histogram.h5', 'r') as f:
        hist = f['hist'][:]
        hist = np.cumsum(hist)

    vol = np.floor(255 * hist[vol])
    vol[vol > 255] = 255
    vol[vol < 0] = 0
    vol = vol.astype(np.uint8)

    sel = ndi.generate_binary_structure(3, 1)
    sel[0] = 0
    sel[2] = 0

    subject = os.path.basename(args.input)
    print(subject)
    params = {}

    if args.recover_parameters:
        # try:
        with open(args.parameter_file, 'r') as f:
            params = json.load(f)
            t_iris = params[subject]['t_iris']
            t_pupil = params[subject]['t_pupil']
            window_iris = tuple(params[subject]['window_iris'])
            window_pupil = tuple(params[subject]['window_pupil'])
    # except KeyError:
    #    pass

    if args.t_iris is not None:
        t_iris = args.t_iris

    if args.t_pupil is not None:
        t_pupil = args.t_pupil

    if args.window_iris is not None:
        window_iris = tuple(args.window_iris)

    if args.window_pupil is not None:
        window_pupil = tuple(args.window_pupil)

    depth = args.depth
    radius = int(min(vol.shape[1], vol.shape[2]) / 4)

    if args.save_iris:
        iris_seg = np.zeros(vol.shape, dtype=np.uint8)
    if args.save_pupil:
        pupil_seg = np.zeros(vol.shape, dtype=np.uint8)
    seg = np.zeros(vol.shape, dtype=np.uint8)

    sums, counts = None, None

    for i in range(0, vol.shape[0], depth):
        subvol = np.copy(vol[i:i + depth])
        orig_shape = subvol.shape
        if subvol.shape[0] < depth:
            subvol = np.concatenate([
                subvol,
                np.repeat(subvol[-1].reshape(1, subvol.shape[1],
                                             subvol.shape[2]),
                          depth - subvol.shape[0],
                          axis=0)
            ],
                                    axis=0)

        if all([
                window_iris[j] == window_pupil[j]
                for j in range(len(window_iris))
        ]):
            sums, counts = summate(integral_image(subvol),
                                   np.asarray(window_iris))

        # Iris Segmentation
        iris = 1.0 - threshold_bradley_nd(
            subvol, t=t_iris, s=window_iris, sums=sums, counts=counts)
        iris = ndi.binary_fill_holes(iris, structure=sel)

        # Pupil Segmentation
        pupil = 1.0 - threshold_bradley_nd(
            subvol, t=t_pupil, s=window_pupil, sums=sums, counts=counts)
        pupil = ndi.binary_fill_holes(pupil, structure=sel)
        pupil = ndi.binary_erosion(pupil, structure=sel)
        pupil = ndi.binary_dilation(pupil, structure=sel)
        pupil = ndi.binary_dilation(pupil, structure=sel).astype(np.uint8)
        pupil_collapsed = (np.sum(pupil, axis=0) > 1).astype(np.uint8)
        pupil_collapsed = remove_small_objects(label(pupil_collapsed),
                                               min_size=200).astype(np.uint8)
        circle_mask = np.zeros(pupil_collapsed.shape, dtype=np.uint8)

        try:
            objs = regionprops(label(pupil_collapsed),
                               intensity_image=np.mean(subvol, axis=0).astype(
                                   np.uint8))
            for obj in objs:
                if obj.convex_area > 1000 and obj.solidity < 0.5 or np.sum(
                        obj.inertia_tensor_eigvals) == 0:
                    pupil_collapsed[obj.coords[:, 0], obj.coords[:, 1]] = 0

            pupil_idx = np.argmax([
                o.area * np.abs(o.orientation) * o.solidity /
                (o.eccentricity + 1e-7) /
                (o.inertia_tensor_eigvals[0] - o.inertia_tensor_eigvals[1])
                for o in objs
            ])
            pupil_obj = objs[pupil_idx]
            circle_coords = circle(pupil_obj.centroid[0],
                                   pupil_obj.centroid[1],
                                   radius,
                                   shape=pupil_collapsed.shape)
            circle_mask[circle_coords] = 1
        except (ValueError, IndexError):
            pass

        pupil = np.logical_and(
            pupil,
            np.repeat(pupil_collapsed.reshape((1, ) + pupil_collapsed.shape),
                      pupil.shape[0],
                      axis=0))

        # Final Segmentation
        final = np.logical_xor(iris, pupil).astype(np.uint8)
        final = ndi.binary_dilation(final, structure=sel)
        final[:, circle_mask == 0] = 0

        # Save it
        seg[i:i + depth] = final[:orig_shape[0]]
        if args.save_iris:
            iris_seg[i:i + depth] += iris[:orig_shape[0]]
        if args.save_pupil:
            pupil_seg[i:i + depth] += pupil[:orig_shape[0]]

    seg[:, np.sum(seg, axis=0) < 20] = 0
    seg = ndi.binary_erosion(seg, structure=sel)
    seg = ndi.binary_erosion(seg, structure=sel).astype(np.uint8)

    outdir = os.path.join(args.output, subject)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    seg[seg.nonzero()] = 255
    save_imgs(seg, outdir, prefix=subject)

    if args.save_iris:
        outdir = os.path.join(args.output, 'iris', subject)
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        iris_seg[iris_seg.nonzero()] = 255
        save_imgs(iris_seg, outdir)

    if args.save_pupil:
        outdir = os.path.join(args.output, 'pupil', subject)
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        pupil_seg[pupil_seg.nonzero()] = 255
        save_imgs(pupil_seg, outdir)

    shutil.copy(args.parameter_file, args.parameter_file + '.bak')

    with open(args.parameter_file, 'w') as f:
        params[subject] = {
            't_iris': t_iris,
            't_pupil': t_pupil,
            'window_iris': window_iris,
            'window_pupil': window_pupil,
        }
        json.dump(params, f)
def main():
    parser = ArgumentParser()

    parser.add_argument(
        "-i",
        "--img",
        action="store",
        type=str,
        dest="img_path_str",
        default="test_frames.tif",
        help="Input images path",
    )

    parser.add_argument(
        "-o",
        "--output",
        action="store",
        type=str,
        dest="out_path_str",
        default="out",
        help="Output path",
    )

    parser.add_argument(
        "-m",
        "--model",
        action="store",
        type=str,
        dest="model_path_str",
        default="out/model.hdf5",
        help="Model location",
    )

    parser.add_argument(
        "-g",
        "--groundtruth",
        action="store",
        type=str,
        dest="gt_path_str",
        default=None,
        help="Ground truth path, optional",
    )

    parser.add_argument(
        "--thr",
        action="store",
        type=float,
        dest="threshold",
        default=0.5,
        help="Threshold for crisp performance metrics evaluation, optional",
    )

    parser.add_argument(
        "-b",
        "--batch_size",
        action="store",
        type=int,
        dest="batch_size",
        default=60,
        help="Batch size",
    )

    parser.add_argument(
        "-t",
        "--temp",
        action="store",
        type=str,
        dest="tmp_path_str",
        default="tmp",
        help="temp directory path, optional",
    )

    parser.add_argument(
        "--descriptorpath",
        action="store",
        type=str,
        dest="descriptor_path",
        help="RunDescriptor file path",
    )

    parser.add_argument("--notes",
                        action="store",
                        type=str,
                        dest="notes",
                        help="Notes for RunDescriptor")

    # Loading args from file
    parser.add_argument("--file", type=open, action=LoadArgsFromFile)

    # Option parsing
    args = parser.parse_args()

    # Path definitions
    model_path = Path(args.model_path_str)
    img_path = Path(args.img_path_str)
    out_path = Path(args.out_path_str)
    tmp_path = Path(args.tmp_path_str)
    gt_path = Path(args.gt_path_str) if args.gt_path_str is not None else None

    # Output directories
    out_path.mkdir(exist_ok=True, parents=True)

    if args.descriptor_path is not None:
        descriptor_path = Path(args.descriptor_path)
    else:
        descriptor_path = out_path

    # Logs directory
    logs_path = out_path.joinpath("logs")
    logs_path.mkdir(exist_ok=True, parents=True)
    logfile_path = logs_path.joinpath("logging_log.log")
    fh = logging.FileHandler(str(logfile_path))
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    # Custom metrics
    custom_objects = {
        "jaccard_index": jaccard_index,
        "dice_coefficient": dice_coefficient
    }

    keras_model = load_model(str(model_path), custom_objects=custom_objects)

    # Data loading
    in_volume = load_volume(img_path, expand_dims=False)

    # Prediction and Reconstruction
    predictions = TiledPredictor(input_volume=in_volume,
                                 batch_size=args.batch_size,
                                 tmp_path=tmp_path,
                                 num_rotations=0,
                                 model=keras_model)

    # Saving results
    save_volume(
        volume=predictions.out_volume[:, :, :, 0],
        out_path=out_path,
        filename=img_path.stem,
    )

    # Calculating performances on test dataset
    performance_metrics_dict = calculate_metrics(
        predictions.out_volume,
        gt_path=gt_path,
        classification_threshold=args.threshold)

    # Run descriptor output
    RunDescriptor(
        descriptor_dir_path=descriptor_path,
        entry_type="predict",
        model_type="2d",
        model_path=model_path,
        log_dir_path=logs_path,
        input_data_path=img_path,
        script_options=vars(args),
        git_repo=git.Repo(".."),
        notes=args.notes,
        predictions_path=out_path,
        ground_truth_path=gt_path,
        performance_metrics_dict=performance_metrics_dict,
    )