예제 #1
0
def _transform_and_predict(model,
                           x,
                           block_shape,
                           rotation,
                           translation=[0, 0, 0],
                           verbose=False):
    """Predict on rigidly transformed features.

    The rigid transformation is applied to the volumes prior to prediction, and
    the prediced labels are transformed with the inverse warp, so that they are
    in the same space.

    Parameters
    ----------
    model: `tf.keras.Model`, model used for prediction.
    x: 3D array, volume of features.
    block_shape: tuple of length 3, shape of non-overlapping blocks to take
        from the features. This also corresponds to the input of the model, not
        including the batch or channel dimensions.
    rotation: tuple of length 3, rotation angle in radians in each dimension.
    translation: tuple of length 3, units of translation in each dimension.
    verbose: bool, whether to print progress bar.

    Returns
    -------
    Array of predictions with the same shape and in the same space as the
    original input features.
    """

    x = np.asarray(x).astype(np.float32)
    affine = get_affine(x.shape, rotation=rotation, translation=translation)
    inverse_affine = tf.linalg.inv(affine)
    x_warped = warp(x, affine, order=1)

    x_warped_blocks = to_blocks_numpy(x_warped, block_shape)
    x_warped_blocks = x_warped_blocks[..., np.newaxis]  # add grayscale channel
    x_warped_blocks = standardize_numpy(x_warped_blocks)
    y = model.predict(x_warped_blocks, batch_size=1, verbose=verbose)

    n_classes = y.shape[-1]
    if n_classes == 1:
        y = y.squeeze(-1)
    else:
        # Usually, the argmax would be taken to get the class membership of
        # each voxel, but if we get hard values, then we cannot average
        # multiple predictions.
        raise ValueError(
            "This function is not compatible with multi-class predictions.")

    y = from_blocks_numpy(y, x.shape)
    y = warp(y, inverse_affine, order=0).numpy()

    return y
예제 #2
0
def save_output(output_prefix,
                model,
                data,
                volume_shape,
                block_shape,
                one_hot_label=False):
    '''volume_shape and block_shape are tuple of 3'''
    num_blocks = int((volume_shape[0] / block_shape[0])**3)
    labels = np.empty(shape=(num_blocks, *block_shape))
    results = np.empty(shape=(num_blocks, *block_shape))
    data = data.unbatch().batch(1)
    for batch, (feat, label) in enumerate(data.take(num_blocks)):
        pred = model(feat)
        pred = np.argmax(pred, -1)
        if one_hot_label:
            label = tf.math.argmax(label, axis=-1)
        labels[batch, :, :, :] = label.numpy()
        results[batch, :, :, :] = pred

    labels = from_blocks_numpy(labels, volume_shape)
    results = from_blocks_numpy(results, volume_shape)
    np.savez(output_prefix, label=labels, result=results)
예제 #3
0
def predict_from_array(inputs,
                       model,
                       block_shape,
                       batch_size=1,
                       normalizer=None,
                       n_samples=1,
                       return_variance=False,
                       return_entropy=False):
    """Return a prediction given a filepath and an ndarray of features.

    Parameters
    ----------
    inputs: ndarray, array of features.
    model: `tf.keras.Model`, trained model.
    block_shape: tuple of length 3, shape of sub-volumes on which to
        predict.
    batch_size: int, number of sub-volumes per batch for predictions.
    normalizer: callable, function that accepts an ndarray and returns an
        ndarray. Called before separating volume into blocks.
    n_samples: The number of sampling. If set as 1, it will just return the
        single prediction value. The default value is 1
    return_variance: Boolean. If set True, it returns the running population
        variance along with mean. Note, if the n_samples is smaller or equal to 1,
        the variance will not be returned; instead it will return None
    return_entropy: Boolean. If set True, it returns the running entropy.
        along with mean.

    Returns
    -------
    ndarray of predictions.
    """
    if normalizer:
        features = normalizer(inputs)
    else:
        features = inputs
    if block_shape is not None:
        features = to_blocks_numpy(features, block_shape=block_shape)
    else:
        features = features[None]  # Add batch dimension.

    # Add a dimension for single channel.
    features = features[..., None]

    # Predict per block to reduce memory consumption.
    n_blocks = features.shape[0]
    n_batches = math.ceil(n_blocks / batch_size)

    if not return_variance and not return_entropy and n_samples == 1:
        outputs = model.predict(features, batch_size=1, verbose=0)
        if outputs.shape[-1] == 1:
            # Binarize according to threshold.
            outputs = outputs > 0.3
            outputs = outputs.squeeze(-1)
            # Nibabel doesn't like saving boolean arrays as Nifti.
            outputs = outputs.astype(np.uint8)
        else:
            # Hard classes for multi-class segmentation.
            outputs = np.argmax(outputs, -1)
        outputs = from_blocks_numpy(outputs, output_shape=inputs.shape)
        return outputs

    raise NotImplementedError(
        "Predicting from Bayesian nets is not implemented yet.")

    means = np.zeros_like(features)
    variances = np.zeros_like(features)
    entropies = np.zeros_like(features)
    progbar = tf.keras.utils.Progbar(n_batches)
    progbar.update(0)
    for j in range(0, n_blocks, batch_size):

        this_x = features[j:j + batch_size]

        new_prediction = model.predict(this_x, batch_size=1, verbose=0)

        prev_mean = np.zeros_like(new_prediction['probabilities'])
        curr_mean = new_prediction['probabilities']

        M = np.zeros_like(new_prediction['probabilities'])
        for n in range(1, n_samples):

            new_prediction = model.predict(this_x)
            prev_mean = curr_mean
            curr_mean = prev_mean + (new_prediction['probabilities'] -
                                     prev_mean) / float(n + 1)
            M = M + np.multiply(prev_mean - new_prediction['probabilities'],
                                curr_mean - new_prediction['probabilities'])

        means[j:j + batch_size] = np.argmax(curr_mean, axis=-1)  # max mean
        variances[j:j + batch_size] = np.sum(M / n_samples, axis=-1)
        entropies[j:j + batch_size] = -np.sum(np.multiply(
            np.log(curr_mean + 0.001), curr_mean),
                                              axis=-1)  # entropy
        progbar.add(1)

    total_means = from_blocks_numpy(means, output_shape=inputs.shape)
    total_variance = from_blocks_numpy(variances, output_shape=inputs.shape)
    total_entropy = from_blocks_numpy(entropies, output_shape=inputs.shape)

    mean_var_voxels = np.mean(total_variance)
    std_var_voxels = np.std(total_variance)

    include_variance = ((n_samples > 1) and (return_variance))
    if include_variance:
        if return_entropy:
            return total_means, total_variance, total_entropy
        else:
            return total_means, total_variance
    else:
        if return_entropy:
            return total_means, total_entropy
        else:
            return total_means,
    val_accs.append(np.mean(epoch_eval_accuracy))

    print(
        "Loss: {:.3f}, Accuracy: {:.3}, Eval-loss: {:.3}, Eval-accuracy: {:.3f}"
        .format(np.mean(epoch_train_loss), np.mean(epoch_train_accuracy),
                np.mean(epoch_eval_loss), np.mean(epoch_eval_accuracy)))

# test the model
print("------------ test--------------")

test_dataset = get_dataset(train_pattern,
                           volume_shape,
                           BATCH_SIZE,
                           block_shape,
                           n_classes,
                           training=False)
#import pdb; pdb.set_trace()
num_blocks = int((volume_shape[0] / block_shape[0])**3)
labels = np.empty(shape=(num_blocks, *block_shape))
results = np.empty(shape=(num_blocks, *block_shape))

for batch, (feat, label) in enumerate(test_dataset):
    pred = model(feat)
    pred = np.argmax(pred, -1)
    labels[batch, :, :, :] = label.numpy()
    results[batch, :, :, :] = pred

labels = from_blocks_numpy(labels, volume_shape)
results = from_blocks_numpy(results, volume_shape)
#import pdb; pdb.set_trace()
np.savez("output_var_b32", label=labels, result=results)