예제 #1
0
def _multi_view_predict_on(image_pair, image_pair_loader, model,
                           views, hparams, results, per_view_results,
                           out_dir, args):
    from mpunet.utils.fusion import predict_volume, map_real_space_pred
    from mpunet.interpolation.sample_grid import get_voxel_grid_real_space

    # Set image_pair_loader object with only the given file
    image_pair_loader.images = [image_pair]
    n_classes = hparams["build"]["n_classes"]

    # Load views
    kwargs = hparams["fit"]
    kwargs.update(hparams["build"])
    seq = image_pair_loader.get_sequencer(views=views, **kwargs)

    # Get voxel grid in real space
    voxel_grid_real_space = get_voxel_grid_real_space(image_pair)

    # Prepare tensor to store combined prediction
    d = image_pair.image.shape[:-1]
    combined = np.empty(
        shape=(len(views), d[0], d[1], d[2], n_classes),
        dtype=np.float32
    )
    print("Predicting on brain hyper-volume of shape:", combined.shape)

    # Predict for each view
    for n_view, view in enumerate(views):
        print("\n[*] (%i/%i) View: %s" % (n_view + 1, len(views), view))
        # for each view, predict on all voxels and map the predictions
        # back into the original coordinate system

        # Sample planes from the image at grid_real_space grid
        # in real space (scanner RAS) coordinates.
        X, y, grid, inv_basis = seq.get_view_from(image_pair.id, view,
                                                  n_planes="same+20")

        # Predict on volume using model
        pred = predict_volume(model, X, axis=2, batch_size=seq.batch_size)

        # Map the real space coordiante predictions to nearest
        # real space coordinates defined on voxel grid
        mapped_pred = map_real_space_pred(pred, grid, inv_basis,
                                          voxel_grid_real_space,
                                          method="nearest")
        combined[n_view] = mapped_pred

        if not args.no_eval:
            _per_view_evaluation(image_id=image_pair.id,
                                 pred=pred,
                                 true=y,
                                 mapped_pred=mapped_pred,
                                 mapped_true=image_pair.labels,
                                 view=view,
                                 n_classes=n_classes,
                                 results=results,
                                 per_view_results=per_view_results,
                                 out_dir=out_dir,
                                 args=args)
    return combined
예제 #2
0
def predict_and_map(model,
                    seq,
                    image,
                    view,
                    batch_size=None,
                    voxel_grid_real_space=None,
                    targets=None,
                    eval_prob=1.0,
                    n_planes='same+20'):
    """


    Args:
        model:
        seq:
        image:
        view:
        batch_size:
        voxel_grid_real_space:
        targets:
        n_planes:

    Returns:

    """

    # Sample planes from the image at grid_real_space grid
    # in real space (scanner RAS) coordinates.
    X, y, grid, inv_basis = seq.get_view_from(image.id,
                                              view,
                                              n_planes=n_planes)

    # Predict on volume using model
    bs = seq.batch_size if batch_size is None else batch_size
    from mpunet.utils.fusion import predict_volume
    pred = predict_volume(model, X, axis=2, batch_size=bs)

    # Map the real space coordiante predictions to nearest
    # real space coordinates defined on voxel grid
    if voxel_grid_real_space is None:
        from mpunet.interpolation.sample_grid import get_voxel_grid_real_space
        voxel_grid_real_space = get_voxel_grid_real_space(image)

    # Map the predicted volume to real space
    mapped = map_real_space_pred(pred, grid, inv_basis, voxel_grid_real_space)

    # Print dice scores
    if targets is not None and np.random.rand(1)[0] <= eval_prob:
        print("Computing evaluations...")
        print("View dice scores:   ",
              dice_all(y, pred.argmax(-1), ignore_zero=False))
        print(
            "Mapped dice scores: ",
            dice_all(targets,
                     mapped.argmax(-1).reshape(-1, 1),
                     ignore_zero=False))
    else:
        print("-- Skipping evaluation")

    return mapped
예제 #3
0
def _run_fusion_training(sets, logger, hparams, min_val_images, is_validation,
                         views, n_classes, unet, fusion_model, early_stopping,
                         fm_batch_size, epochs, eval_prob,
                         fusion_weights_path):
    """
    TODO
    """

    for _round, _set in enumerate(sets):
        s = "Set %i/%i:\n%s" % (_round + 1, len(sets), _set)
        logger("\n%s" % highlighted(s))

        # Reload data
        images = ImagePairLoader(**hparams["val_data"])
        if len(images) < min_val_images:
            images.add_images(ImagePairLoader(**hparams["train_data"]))

        # Get list of ImagePair objects to run on
        image_set_dict = {
            m.identifier: m
            for m in images if m.identifier in _set
        }

        # Set scaler and bg values
        images.set_scaler_and_bg_values(
            bg_value=hparams.get_from_anywhere('bg_value'),
            scaler=hparams.get_from_anywhere('scaler'),
            compute_now=False)

        # Init LazyQueue and get its sequencer
        from mpunet.sequences.utils import get_sequence
        seq = get_sequence(data_queue=images,
                           is_validation=True,
                           views=views,
                           **hparams["fit"],
                           **hparams["build"])

        # Fetch points from the set images
        points_collection = []
        targets_collection = []
        N_im = len(image_set_dict)
        for num_im, image_id in enumerate(list(image_set_dict.keys())):
            logger("")
            logger(
                highlighted("(%i/%i) Running on %s (%s)" %
                            (num_im + 1, N_im, image_id,
                             "val" if is_validation[image_id] else "train")))

            with seq.image_pair_queue.get_image_by_id(image_id) as image:
                # Get voxel grid in real space
                voxel_grid_real_space = get_voxel_grid_real_space(image)

                # Get array to store predictions across all views
                targets = image.labels.reshape(-1, 1)
                points = np.empty(shape=(len(targets), len(views), n_classes),
                                  dtype=np.float32)
                points.fill(np.nan)

                # Predict on all views
                for k, v in enumerate(views):
                    print("\n%s" % "View: %s" % v)
                    points[:, k, :] = predict_and_map(
                        model=unet,
                        seq=seq,
                        image=image,
                        view=v,
                        voxel_grid_real_space=voxel_grid_real_space,
                        n_planes='same+20',
                        targets=targets,
                        eval_prob=eval_prob).reshape(-1, n_classes)

                # add to collections
                points_collection.append(points)
                targets_collection.append(targets)
            print(image.is_loaded)

        # Stack points into one matrix
        logger("Stacking points...")
        X, y = stack_collections(points_collection, targets_collection)

        # Shuffle train
        print("Shuffling points...")
        X, y = shuffle(X, y)

        print("Getting validation set...")
        val_ind = int(0.20 * X.shape[0])
        X_val, y_val = X[:val_ind], y[:val_ind]
        X, y = X[val_ind:], y[val_ind:]

        # Prepare dice score callback for validation data
        val_cb = ValDiceScores((X_val, y_val), n_classes, 50000, logger)

        # Callbacks
        cbs = [
            val_cb,
            CSVLogger(filename="logs/fusion_training.csv",
                      separator=",",
                      append=True),
            PrintLayerWeights(fusion_model.layers[-1],
                              every=1,
                              first=1000,
                              per_epoch=True,
                              logger=logger)
        ]

        es = EarlyStopping(monitor='val_dice',
                           min_delta=0.0,
                           patience=early_stopping,
                           verbose=1,
                           mode='max')
        cbs.append(es)

        # Start training
        try:
            fusion_model.fit(X,
                             y,
                             batch_size=fm_batch_size,
                             epochs=epochs,
                             callbacks=cbs,
                             verbose=1)
        except KeyboardInterrupt:
            pass
        fusion_model.save_weights(fusion_weights_path)
예제 #4
0
def predict_single(image, model, hparams, verbose=1):
    """
    A generic prediction function that sets up a ImagePairLoader object for the
    given image, prepares the image and predicts.

    Note that this function should only be used for convinience in scripts that
    work on single images at a time anyway, as batch-preparing the entire
    ImagePairLoader object prior to prediction is faster.

    NOTE: Only works with iso_live intrp modes at this time
    """
    mode = hparams["fit"]["intrp_style"].lower()
    assert mode in ("iso_live", "iso_live_3d")

    # Prepare image for prediction
    kwargs = hparams["fit"]
    kwargs.update(hparams["build"])

    # Set verbose memory
    verb_mem = kwargs["verbose"]
    kwargs["verbose"] = verbose

    # Create a ImagePairLoader with only the given file
    from mpunet.image import ImagePairLoader
    image_pair_loader = ImagePairLoader(predict_mode=True,
                                        initialize_empty=True,
                                        no_log=bool(verbose))
    image_pair_loader.add_image(image)

    # Get N classes
    n_classes = kwargs["n_classes"]

    if mode == "iso_live":
        # Add views if SMMV model
        kwargs["views"] = np.load(hparams.project_path + "/views.npz")["arr_0"]

        # Get sequence object
        sequence = image_pair_loader.get_sequencer(**kwargs)

        # Get voxel grid in real space
        voxel_grid_real_space = get_voxel_grid_real_space(image)

        # Prepare tensor to store combined prediction
        d = image.image.shape
        predicted = np.empty(shape=(len(kwargs["views"]), d[0], d[1], d[2],
                                    n_classes),
                             dtype=np.float32)
        print("Predicting on brain hyper-volume of shape:", predicted.shape)

        for n_view, v in enumerate(kwargs["views"]):
            print("\nView %i/%i: %s" % (n_view + 1, len(kwargs["views"]), v))
            # Sample the volume along the view
            X, y, grid, inv_basis = sequence.get_view_from(image.id,
                                                           v,
                                                           n_planes="same+20")

            # Predict on volume using model
            pred = predict_volume(model, X, axis=2)

            # Map the real space coordiante predictions to nearest
            # real space coordinates defined on voxel grid
            predicted[n_view] = map_real_space_pred(pred,
                                                    grid,
                                                    inv_basis,
                                                    voxel_grid_real_space,
                                                    method="nearest")
    else:
        predicted = pred_3D_iso(
            model=model,
            sequence=image_pair_loader.get_sequencer(**kwargs),
            image=image,
            extra_boxes="3x",
            min_coverage=None)

    # Revert verbose mem
    kwargs["verbose"] = verb_mem

    return predicted
예제 #5
0
def predict_single(image, model, hparams, verbose=1):
    """
    A generic prediction function that sets up a ImagePairLoader object for the
    given image, prepares the image and predicts.

    Note that this function should only be used for convinience in scripts that
    work on single images at a time anyway, as batch-preparing the entire
    ImagePairLoader object prior to prediction is faster.

    NOTE: Only works with iso_live intrp modes at this time
    """
    mode = hparams["fit"]["intrp_style"].lower()
    assert mode in ("iso_live", "iso_live_3d")

    # Create a ImagePairLoader with only the given file
    from mpunet.image import ImagePairLoader
    image_pair_loader = ImagePairLoader(predict_mode=True,
                                        initialize_empty=True,
                                        no_log=bool(verbose))
    image_pair_loader.add_image(image)

    # Set scaler and bg values
    image_pair_loader.set_scaler_and_bg_values(
        bg_value=hparams.get_from_anywhere('bg_value'),
        scaler=hparams.get_from_anywhere('scaler'),
        compute_now=False)

    if mode == "iso_live":
        # Init LazyQueue and get its sequencer
        seq = get_sequence(data_queue=image_pair_loader,
                           views=np.load(hparams.project_path +
                                         "/views.npz")["arr_0"],
                           is_validation=True,
                           **hparams["fit"],
                           **hparams["build"])

        with seq.image_pair_queue.get_image_by_id(image.identifier) as image:
            # Get voxel grid in real space
            voxel_grid_real_space = get_voxel_grid_real_space(image)

            # Prepare tensor to store combined prediction
            d = image.image.shape
            predicted = np.empty(shape=(len(seq.views), d[0], d[1], d[2],
                                        seq.n_classes),
                                 dtype=np.float32)
            print("Predicting on brain hyper-volume of shape:",
                  predicted.shape)

            for n_view, v in enumerate(seq.views):
                print("\nView %i/%i: %s" % (n_view + 1, len(seq.views), v))
                # Sample the volume along the view
                X, y, grid, inv_basis = seq.get_view_from(image,
                                                          v,
                                                          n_planes="same+20")

                # Predict on volume using model
                pred = predict_volume(model, X, axis=2)

                # Map the real space coordiante predictions to nearest
                # real space coordinates defined on voxel grid
                predicted[n_view] = map_real_space_pred(pred,
                                                        grid,
                                                        inv_basis,
                                                        voxel_grid_real_space,
                                                        method="nearest")
    else:
        # Init LazyQueue and get its sequencer
        seq = get_sequence(data_queue=image_pair_loader,
                           is_validation=True,
                           **hparams["fit"],
                           **hparams["build"])
        predicted = pred_3D_iso(model=model,
                                sequence=seq,
                                image=image,
                                extra_boxes="3x",
                                min_coverage=None)

    return predicted