def test_from_blocks():
    data = np.ones((256, 256, 256))
    blocks = to_blocks(data, (128, 128, 128))
    assert_array_equal(data, from_blocks(blocks, (256, 256, 256)))

    data = np.arange(12**3).reshape(12, 12, 12)
    blocks = to_blocks(data, (4, 4, 4))
    assert_array_equal(data, from_blocks(blocks, (12, 12, 12)))
Beispiel #2
0
def predict_from_array(inputs,
                       predictor,
                       block_shape,
                       normalizer=normalize_zero_one,
                       batch_size=4):
    """Return a prediction given a filepath and an ndarray of features.

    Args:
        inputs: ndarray, array of features.
        predictor: TensorFlow Predictor object, predictor from previously
            trained model.
        block_shape: tuple of len 3, shape of blocks on which to predict.
        normalizer: callable, function that accepts an ndarray and returns an
            ndarray. Called before separating volume into blocks.
        batch_size: int, number of sub-volumes per batch for prediction.

    Returns:
        ndarray of predictions.
    """
    if normalizer:
        features = normalizer(inputs)
    features = to_blocks(features, block_shape=block_shape)
    outputs = np.zeros_like(features)
    features = features[..., None]  # Add a dimension for single channel.

    # Predict per block to reduce memory consumption.
    n_blocks = features.shape[0]
    n_batches = math.ceil(n_blocks / batch_size)
    progbar = tf.keras.utils.Progbar(n_batches)
    progbar.update(0)
    for j in range(0, n_blocks, batch_size):
        outputs[j:j + batch_size] = predictor(
            {'volume': features[j:j + batch_size]})[_INFERENCE_CLASSES_KEY]
        progbar.add(1)

    return from_blocks(outputs, output_shape=inputs.shape)
Beispiel #3
0
def test_from_blocks():
    x = np.arange(64).reshape(4, 4, 4)
    block_shape = (2, 2, 2)
    outputs = volume.from_blocks(volume.to_blocks(x, block_shape), x.shape)
    assert_array_equal(outputs, x)
Beispiel #4
0
def predict_from_array(inputs,
                       predictor,
                       block_shape,
                       return_variance=False,
                       return_entropy=False,
                       return_array_from_images=False,
                       n_samples=1,
                       normalizer=None,
                       batch_size=4):
    """Return a prediction given a filepath and an ndarray of features.

    Args:
        inputs: ndarray, array of features.
        predictor: TensorFlow Predictor object, predictor from previously
            trained model.
        block_shape: tuple of len 3, shape of blocks on which to predict.
        return_variance: 'y' or 'n'. If set True, it returns the running population
            variance along with mean. Note, if the n_samples is smaller or equal to 1,
            the variance will not be returned; instead it will return None
        return_entropy: Boolean. If set True, it returns the running entropy.
            along with mean.
        return_array_from_images: Boolean. If set True and the given input is either image,
            filepath, or filepaths, it will return arrays of [mean, variance, entropy]
            instead of images of them. Also, if the input is array, it will
            simply return array, whether or not this flag is True or False.
        n_samples: The number of sampling. If set as 1, it will just return the
            single prediction value.
        normalizer: callable, function that accepts an ndarray and returns an
            ndarray. Called before separating volume into blocks.
        batch_size: int, number of sub-volumes per batch for prediction.

    Returns:
        ndarray of predictions.
    """

    print("Normalizer being used {n}".format(n = normalizer))
    if normalizer:
        features = normalizer(inputs)
        print(features.mean())
        print(features.std())
    else:
        features = inputs
    features = to_blocks(features, block_shape=block_shape)
    means = np.zeros_like(features)
    variances = np.zeros_like(features)
    entropies = np.zeros_like(features)

    features = features[..., None]  # Add a dimension for single channel.

    # Predict per block to reduce memory consumption.
    n_blocks = features.shape[0]
    n_batches = math.ceil(n_blocks / batch_size)
    progbar = tf.keras.utils.Progbar(n_batches)
    progbar.update(0)
    for j in range(0, n_blocks, batch_size):

        new_prediction = predictor( {'volume': features[j:j + batch_size]})

        prev_mean = np.zeros_like(new_prediction['probabilities'])
        curr_mean = new_prediction['probabilities']
        
        M = np.zeros_like(new_prediction['probabilities'])
        for n in range(1, n_samples):

            new_prediction = predictor( {'volume': features[j:j + batch_size]})
            prev_mean = curr_mean
            curr_mean = prev_mean + (new_prediction['probabilities'] - prev_mean)/float(n+1)
            M = M + np.multiply(prev_mean - new_prediction['probabilities'], curr_mean - new_prediction['probabilities'])

        progbar.add(1)
        means[j:j + batch_size] = np.argmax(curr_mean, axis = -1 ) # max mean
        variances[j:j + batch_size] = np.sum(M/n_samples, axis = -1)
        entropies[j:j + batch_size] = -np.sum(np.multiply(np.log(curr_mean+1e-7),curr_mean), axis = -1) # entropy
    total_means =from_blocks(means, output_shape=inputs.shape)
    total_variance = from_blocks(variances, output_shape=inputs.shape)
    total_entropy = from_blocks(entropies, output_shape=inputs.shape)

    mean_var_voxels = np.mean(total_variance)
    std_var_voxels = np.std(total_variance)

    include_variance = ((n_samples > 1) and (return_variance))
    if include_variance:
        if return_entropy:
            return total_means, total_variance, total_entropy
        else:
            return total_means, total_variance
    else:
        if return_entropy:
            return total_means, total_entropy
        else:
            return total_means,