def test_dice():
    shape = (2, 10)

    foo = np.zeros(shape, dtype=np.float64)
    foo[:, 4:] = 1

    bar = np.zeros(shape, dtype=np.float64)
    bar[0, :7] = 1
    bar[1, :5] = 1

    true_dices = np.zeros(foo.shape[0])
    for idx in range(foo.shape[0]):
        true_dices[idx] = sp.spatial.distance.dice(foo[idx].flatten(),
                                                   bar[idx].flatten())

    with tf.Session() as sess:
        u_ = tf.placeholder(tf.float64)
        v_ = tf.placeholder(tf.float64)
        dice_coeffs = dice(u_, v_, axis=-1)

        test_dices = sess.run(dice_coeffs, feed_dict={u_: foo, v_: bar})

    # Test TensorFlow implementation.
    np.testing.assert_almost_equal(1 - test_dices, true_dices)

    # Test NumPy implementation.
    test_dices_np = dice_numpy(foo, bar, axis=-1)
    np.testing.assert_almost_equal(1 - test_dices_np, true_dices)
Exemple #2
0
def test_generalized_dice():
    shape = (8, 32, 32, 32, 16)
    x = np.zeros(shape)
    y = np.zeros(shape)
    assert_array_equal(metrics.generalized_dice(x, y), np.ones(shape[0]))

    shape = (8, 32, 32, 32, 16)
    x = np.ones(shape)
    y = np.ones(shape)
    assert_array_equal(metrics.generalized_dice(x, y), np.ones(shape[0]))

    shape = (8, 32, 32, 32, 16)
    x = np.ones(shape)
    y = np.zeros(shape)
    # Why aren't the scores exactly zero? Could it be the propogation of floating
    # point inaccuracies when summing?
    assert_allclose(metrics.generalized_dice(x, y),
                    np.zeros(shape[0]),
                    atol=1e-03)

    x = np.ones((4, 32, 32, 32, 1), dtype=np.float64)
    y = x.copy()
    x[:2, :10, 10:] = 0
    y[:2, :3, 20:] = 0
    y[3:, 10:] = 0
    # Dice is similar to generalized Dice for one class. The weight factor
    # makes the generalized form slightly different from Dice.
    gd = metrics.generalized_dice(x, y, axis=(1, 2, 3)).numpy()
    dd = metrics.dice(x, y, axis=(1, 2, 3, 4)).numpy()
    assert_allclose(gd, dd, rtol=1e-02)  # is this close enough?
Exemple #3
0
def test_tversky():
    shape = (4, 32, 32, 32, 1)
    y_pred = np.random.rand(*shape).astype(np.float64)
    y_true = np.random.randint(2, size=shape).astype(np.float64)

    # Test that tversky and dice are same when alpha = beta = 0.5
    dice = metrics.dice(y_true, y_pred).numpy()
    tversky = metrics.tversky(y_true,
                              y_pred,
                              axis=(1, 2, 3),
                              alpha=0.5,
                              beta=0.5).numpy()
    assert_allclose(dice, tversky)

    # Test that tversky and jaccard are same when alpha = beta = 1.0
    jaccard = metrics.jaccard(y_true, y_pred).numpy()
    tversky = metrics.tversky(y_true,
                              y_pred,
                              axis=(1, 2, 3),
                              alpha=1.,
                              beta=1.).numpy()
    assert_allclose(jaccard, tversky)

    with pytest.raises(ValueError):
        metrics.tversky([0., 0., 1.], [1., 0., 1.], axis=0)
Exemple #4
0
def test_dice():
    x = np.zeros(4)
    y = np.zeros(4)
    out = metrics.dice(x, y, axis=None).numpy()
    assert_allclose(out, 1)

    x = np.ones(4)
    y = np.ones(4)
    out = metrics.dice(x, y, axis=None).numpy()
    assert_allclose(out, 1)

    x = [0., 0., 1., 1.]
    y = [1., 1., 1., 1.]
    out = metrics.dice(x, y, axis=None).numpy()
    ref = 1. - scipy.spatial.distance.dice(x, y)
    assert_allclose(out, ref)
    jac_out = metrics.jaccard(x, y, axis=None).numpy()
    assert_allclose(out, 2. * jac_out / (1. + jac_out))

    x = [0., 0., 1., 1.]
    y = [1., 1., 0., 0.]
    out = metrics.dice(x, y, axis=None).numpy()
    ref = 1. - scipy.spatial.distance.dice(x, y)
    assert_allclose(out, ref, atol=1e-07)
    assert_allclose(out, 0, atol=1e-07)

    x = np.ones((4, 32, 32, 32, 1), dtype=np.float32)
    y = x.copy()
    x[:2, :10, 10:] = 0
    y[:2, :3, 20:] = 0
    y[3:, 10:] = 0
    dices = np.empty(x.shape[0])
    for i in range(x.shape[0]):
        dices[i] = 1. - scipy.spatial.distance.dice(x[i].flatten(),
                                                    y[i].flatten())
    assert_allclose(metrics.dice(x, y, axis=(1, 2, 3, 4)), dices)
def model_fn(features, labels, mode, params, config=None):
    """HighRes3DNet model function.

    Args:
        features: 5D float `Tensor`, input tensor. This is the first item
            returned from the `input_fn` passed to `train`, `evaluate`, and
            `predict`. Use `NDHWC` format.
        labels: 4D float `Tensor`, labels tensor. This is the second item
            returned from the `input_fn` passed to `train`, `evaluate`, and
            `predict`. Labels should not be one-hot encoded.
        mode: Optional. Specifies if this training, evaluation or prediction.
        params: `dict` of parameters. All parameters below are required.
            - n_classes: (required) number of classes to classify.
            - optimizer: instance of TensorFlow optimizer. Required if
                training.
            - one_batchnorm_per_resblock: (default false) if true, only apply
                first batch normalization layer in each residually connected
                block. Empirically, only using first batch normalization layer
                allowed the model to model to be trained on 128**3 float32
                inputs.
            - dropout_rate: (default 0), value between 0 and 1, dropout rate
                to be applied immediately before last convolution layer. If 0
                or false, dropout is not applied.
        config: configuration object.

    Returns:
        `tf.estimator.EstimatorSpec`

    Raises:
        `ValueError` if required parameters are not in `params`.
    """
    volume = features
    if isinstance(volume, dict):
        volume = features['volume']

    required_keys = {'n_classes'}
    default_params = {
        'optimizer': None,
        'one_batchnorm_per_resblock': False,
        'dropout_rate': 0,
    }
    check_required_params(params=params, required_keys=required_keys)
    set_default_params(params=params, defaults=default_params)
    check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)

    tf.logging.debug("Parameters for model:")
    tf.logging.debug(params)

    training = mode == tf.estimator.ModeKeys.TRAIN

    with tf.variable_scope('conv_0'):
        x = tf.layers.conv3d(volume, filters=16, kernel_size=3, padding='SAME')
    with tf.variable_scope('batchnorm_0'):
        x = tf.layers.batch_normalization(x,
                                          training=training,
                                          fused=FUSED_BATCH_NORM)
    with tf.variable_scope('relu_0'):
        x = tf.nn.relu(x)

    layer_num = 0
    one_batchnorm = params['one_batchnorm_per_resblock']

    # 16-filter residually connected blocks.
    for ii in range(3):
        layer_num += 1
        x = _resblock(x,
                      mode=mode,
                      layer_num=layer_num,
                      filters=16,
                      kernel_size=3,
                      dilation_rate=1,
                      one_batchnorm=one_batchnorm)

    # 32-filter residually connected blocks. Pad inputs immediately before
    # first elementwise sum to match shape of last dimension.
    layer_num += 1
    paddings = [[0, 0], [0, 0], [0, 0], [0, 0], [8, 8]]
    x = _resblock(x,
                  mode=mode,
                  layer_num=layer_num,
                  filters=32,
                  kernel_size=3,
                  dilation_rate=2,
                  paddings=paddings,
                  one_batchnorm=one_batchnorm)
    for ii in range(2):
        layer_num += 1
        x = _resblock(x,
                      mode=mode,
                      layer_num=layer_num,
                      filters=32,
                      kernel_size=3,
                      dilation_rate=2,
                      one_batchnorm=one_batchnorm)

    # 64-filter residually connected blocks. Pad inputs immediately before
    # first elementwise sum to match shape of last dimension.
    layer_num += 1
    paddings = [[0, 0], [0, 0], [0, 0], [0, 0], [16, 16]]
    x = _resblock(x,
                  mode=mode,
                  layer_num=layer_num,
                  filters=64,
                  kernel_size=3,
                  dilation_rate=4,
                  paddings=paddings,
                  one_batchnorm=one_batchnorm)
    for ii in range(2):
        layer_num += 1
        x = _resblock(x,
                      mode=mode,
                      layer_num=layer_num,
                      filters=64,
                      kernel_size=3,
                      dilation_rate=4,
                      one_batchnorm=one_batchnorm)

    with tf.variable_scope('conv_1'):
        x = tf.layers.conv3d(x, filters=80, kernel_size=1, padding='SAME')

    if params['dropout_rate']:
        x = tf.layers.dropout(x,
                              rate=params['dropout_rate'],
                              training=training)

    with tf.variable_scope('logits'):
        logits = tf.layers.conv3d(x,
                                  filters=params['n_classes'],
                                  kernel_size=1,
                                  padding='SAME')

    predictions = tf.nn.softmax(logits=logits)
    predicted_classes = tf.argmax(logits, axis=-1)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'class_ids': predicted_classes,
            'probabilities': predictions,
            'logits': logits
        }
        # Outputs for SavedModel.
        export_outputs = {
            'outputs': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                          predictions=predictions,
                                          export_outputs=export_outputs)

    onehot_labels = tf.one_hot(labels, params['n_classes'])
    # loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=onehot_labels, logits=logits)
    # loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)

    # loss = losses.dice(labels=labels, predictions=predictions[..., 1], axis=(1, 2, 3))
    loss = losses.tversky(labels=onehot_labels,
                          predictions=predictions,
                          axis=(1, 2, 3))
    # loss = losses.generalized_dice(labels=onehot_labels, predictions=predictions, axis=(1, 2, 3))

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          eval_metric_ops={
                                              'dice':
                                              metrics.streaming_dice(
                                                  labels,
                                                  predicted_classes,
                                                  axis=(1, 2, 3)),
                                          })

    assert mode == tf.estimator.ModeKeys.TRAIN, "unknown mode key {}".format(
        "mode")

    global_step = tf.train.get_global_step()
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = params['optimizer'].minimize(loss, global_step=global_step)

    # Get Dice score of each class.
    dice_coefficients = tf.reduce_mean(metrics.dice(onehot_labels,
                                                    tf.one_hot(
                                                        tf.argmax(predictions,
                                                                  axis=-1),
                                                        params['n_classes']),
                                                    axis=(1, 2, 3)),
                                       axis=0)

    logging_hook = tf.train.LoggingTensorHook(
        {
            "loss": loss,
            "dice": dice_coefficients
        }, every_n_iter=100)

    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      training_hooks=[logging_hook])
Exemple #6
0
def dice(y_true, y_pred, axis=(1, 2, 3, 4)):
    return 1.0 - metrics.dice(y_true=y_true, y_pred=y_pred, axis=axis)
    block_shape=(128,128,128)

    # prepare the evaluation dataset
    dataset_evaluate=get_dataset(pattern=eval_pattern, 
                                volume_shape=volume_shape, 
                                batch=batch_size, 
                                block_shape=block_shape, 
                                n_classes= n_classes, 
                                train=False)
    # Load the saved model
    model=tf.keras.models.load_model(model_path, compile=False)
    optimizer = tf.keras.optimizers.Adam()
    loss_fn = ELBO(model=model, num_examples=np.prod(block_shape),reduction=tf.keras.losses.Reduction.NONE)
    model.compile(loss=loss_fn,optimizer=optimizer,experimental_run_tf_function=False)
    
    #dice_scores=[]
    i=0
    for data in dataset_evaluate.take(10):
        i +=1
        #result=predict(data,model_path, block_shape=block_shape, n_samples=1)
        result = model.predict_on_batch(data)
        eval_error = model.test_on_batch(data)
        #print("batch {}, predicted value {}".format(i,result))
        (feat, label) = data
        label = tf.one_hot(label, depth= n_classes)
        dice_score = tf.reduce_mean(dice(label,result,axis=(1,2,3)))
        #print("batch {}, actual label {}".format(i,result))
        print("batch {}, eval loss {},dice score {}".format(i, eval_error, dice_score))


Exemple #8
0
def model_fn(features, labels, mode, params, config=None):
    """3D U-Net model function.

    Args:

    Returns:

    Raises:
    """
    volume = features
    if isinstance(volume, dict):
        volume = features['volume']

    required_keys = {'n_classes'}
    default_params = {
        'optimizer': None,
        'batchnorm': True,
    }

    check_required_params(params=params, required_keys=required_keys)
    set_default_params(params=params, defaults=default_params)
    check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)

    bn = params['batchnorm']

    # start encoding
    shortcut_1 = _conv_block(volume,
                             filters1=32,
                             filters2=64,
                             mode=mode,
                             layer_num=0,
                             batchnorm=bn)

    with tf.variable_scope('maxpool_1'):
        x = tf.layers.max_pooling3d(inputs=shortcut_1,
                                    pool_size=(2, 2, 2),
                                    strides=(2, 2, 2),
                                    padding='same')

    shortcut_2 = _conv_block(x,
                             filters1=64,
                             filters2=128,
                             mode=mode,
                             layer_num=1,
                             batchnorm=bn)

    with tf.variable_scope('maxpool_2'):
        x = tf.layers.max_pooling3d(inputs=shortcut_2,
                                    pool_size=(2, 2, 2),
                                    strides=(2, 2, 2),
                                    padding='same')

    shortcut_3 = _conv_block(x,
                             filters1=128,
                             filters2=256,
                             mode=mode,
                             layer_num=2,
                             batchnorm=bn)

    with tf.variable_scope('maxpool_3'):
        x = tf.layers.max_pooling3d(inputs=shortcut_3,
                                    pool_size=(2, 2, 2),
                                    strides=(2, 2, 2),
                                    padding='same')

    x = _conv_block(x,
                    filters1=256,
                    filters2=512,
                    mode=mode,
                    layer_num=3,
                    batchnorm=bn)

    # start decoding
    with tf.variable_scope("upconv_0"):
        x = tf.layers.conv3d_transpose(inputs=x,
                                       filters=512,
                                       kernel_size=(2, 2, 2),
                                       strides=(2, 2, 2),
                                       kernel_regularizer=_regularizer)

    with tf.variable_scope('concat_1'):
        x = tf.concat((shortcut_3, x), axis=-1)

    x = _conv_block(x,
                    filters1=256,
                    filters2=256,
                    mode=mode,
                    layer_num=4,
                    batchnorm=bn)

    with tf.variable_scope("upconv_1"):
        x = tf.layers.conv3d_transpose(inputs=x,
                                       filters=256,
                                       kernel_size=(2, 2, 2),
                                       strides=(2, 2, 2),
                                       kernel_regularizer=_regularizer)

    with tf.variable_scope('concat_2'):
        x = tf.concat((shortcut_2, x), axis=-1)

    x = _conv_block(x,
                    filters1=128,
                    filters2=128,
                    mode=mode,
                    layer_num=5,
                    batchnorm=bn)

    with tf.variable_scope("upconv_2"):
        x = tf.layers.conv3d_transpose(inputs=x,
                                       filters=128,
                                       kernel_size=(2, 2, 2),
                                       strides=(2, 2, 2),
                                       kernel_regularizer=_regularizer)

    with tf.variable_scope('concat_3'):
        x = tf.concat((shortcut_1, x), axis=-1)

    x = _conv_block(x,
                    filters1=64,
                    filters2=64,
                    mode=mode,
                    layer_num=6,
                    batchnorm=bn)

    with tf.variable_scope('logits'):
        logits = tf.layers.conv3d(inputs=x,
                                  filters=params['n_classes'],
                                  kernel_size=(1, 1, 1),
                                  padding='same',
                                  activation=None,
                                  kernel_regularizer=_regularizer)
    # end decoding

    with tf.variable_scope('predictions'):
        predictions = tf.nn.softmax(logits=logits)

    class_ids = tf.argmax(logits, axis=-1)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'class_ids': class_ids,
            'probabilities': predictions,
            'logits': logits
        }
        # Outputs for SavedModel.
        export_outputs = {
            'outputs': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                          predictions=predictions,
                                          export_outputs=export_outputs)

    onehot_labels = tf.one_hot(labels, params['n_classes'])

    loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=onehot_labels,
                                           logits=logits)
    # loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)

    l2_loss = tf.losses.get_regularization_loss()
    loss += l2_loss

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          eval_metric_ops={
                                              'dice':
                                              metrics.streaming_dice(labels,
                                                                     class_ids,
                                                                     axis=(1,
                                                                           2,
                                                                           3)),
                                          })

    assert mode == tf.estimator.ModeKeys.TRAIN

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = params['optimizer'].minimize(
            loss, global_step=tf.train.get_global_step())

    dice_coefficients = tf.reduce_mean(
        metrics.dice(onehot_labels,
                     tf.one_hot(class_ids, axis=(1, 2, 3)),
                     axis=0))

    logging_hook = tf.train.LoggingTensorHook(
        {
            "loss": loss,
            "dice": dice_coefficients
        }, every_n_iter=100)

    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      training_hooks=[logging_hook])
def run(block_shape, dropout_typ, model_name):

    # Constants
    root_path = '/om/user/satra/kwyk/tfrecords/'
    # to run the code on Satori
    #root_path = "/nobackup/users/abizeul/kwyk/tfrecords/"

    train_pattern = root_path + 'data-train_shard-*.tfrec'
    eval_pattern = root_path + "data-evaluate_shard-*.tfrec"

    n_classes = 115
    volume_shape = (256, 256, 256)
    EPOCHS = 1
    BATCH_SIZE_PER_REPLICA = 1

    #Setting up the multi gpu strategy
    strategy = tf.distribute.MirroredStrategy()
    print("Number of replicas {}".format(strategy.num_replicas_in_sync))
    GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync

    # Create a `tf.data.Dataset` instance.
    dataset_train = get_dataset(train_pattern, volume_shape, GLOBAL_BATCH_SIZE,
                                block_shape, n_classes)
    dataset_eval = get_dataset(eval_pattern, volume_shape, GLOBAL_BATCH_SIZE,
                               block_shape, n_classes)

    # Distribute dataset.
    train_dist_dataset = strategy.experimental_distribute_dataset(
        dataset_train)

    # Create a checkpoint directory to store the checkpoints.
    checkpoint_dir = os.path.join("training_files", model_name,
                                  "training_checkpoints")
    # Name of the checkpoint files
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")

    with strategy.scope():
        optimizer = tf.keras.optimizers.Adam(1e-03)
        model = variational_meshnet(n_classes=n_classes,
                                    input_shape=block_shape + (1, ),
                                    filters=96,
                                    dropout=dropout_typ,
                                    is_monte_carlo=True,
                                    receptive_field=129)
        loss_fn = losses.ELBO(model=model,
                              num_examples=np.prod(block_shape),
                              reduction=tf.keras.losses.Reduction.NONE)
        #dice_metric = generalized_dice()
        checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
        model.compile(loss=loss_fn,
                      optimizer=optimizer,
                      experimental_run_tf_function=False)

        # training loop
        train_loss = []
        start = time()
        for epoch in range(EPOCHS):
            print('Epoch number ', epoch)
            i = 0
            for data in dataset_train:
                i += 1
                error = model.train_on_batch(data)
                train_loss.append(error)
                print('Batch {}, error : {}'.format(i, error))

            checkpoint.save(checkpoint_prefix)
        training_time = time() - start

        # evaluating loop
        print("---------- evaluating ----------")
        i = 0
        eval_loss = []
        dice_scores = []
        for data in dataset_eval:
            i += 1
            eval_error = model.test_on_batch(data)
            eval_loss.append(eval_error)
            print('Batch {}, eval_loss : {}'.format(i, eval_error))

            # calculate dice
            result = model.predict_on_batch(data)
            (feat, label) = data
            label = tf.one_hot(label, depth=n_classes)
            dice_scores.append(
                tf.reduce_mean(dice(label, result,
                                    axis=(1, 2, 3))).numpy().tolist())

        # Save model and variables
        variables = {
            "train_loss": train_loss,
            "eval_loss": eval_loss,
            "eval_dice": dice_scores
        }
        file_path = os.path.join("training_files", model_name,
                                 "data-{}.json".format(model_name))
        with open(file_path, 'w') as fp:
            json.dump(variables, fp, indent=4)

        #model_name="kwyk_128_full.h5"
        #saved_model_path=os.path.join("./training_files",model_name,"saved_model/{}.h5".format(model_name))
        #model.save(saved_model_path, save_format='h5')
        saved_model_path = os.path.join("./training_files", model_name,
                                        "saved_model/")
        model.save(saved_model_path, save_format='tf')

    return training_time