def test_generalized_dice(): shape = (8, 32, 32, 32, 16) x = np.zeros(shape) y = np.zeros(shape) assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0])) shape = (8, 32, 32, 32, 16) x = np.ones(shape) y = np.ones(shape) assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0])) shape = (8, 32, 32, 32, 16) x = np.ones(shape) y = np.zeros(shape) # Why aren't the losses exactly one? Could it be the propogation of floating # point inaccuracies when summing? assert_allclose(losses.generalized_dice(x, y), np.ones(shape[0]), atol=1e-03) assert_allclose( losses.GeneralizedDice(axis=(1, 2, 3))(x, y), losses.generalized_dice(x, y)) x = np.ones((4, 32, 32, 32, 1), dtype=np.float64) y = x.copy() x[:2, :10, 10:] = 0 y[:2, :3, 20:] = 0 y[3:, 10:] = 0 # Dice is similar to generalized Dice for one class. The weight factor # makes the generalized form slightly different from Dice. gd = losses.generalized_dice(x, y, axis=(1, 2, 3)).numpy() dd = losses.dice(x, y, axis=(1, 2, 3, 4)).numpy() assert_allclose(gd, dd, rtol=1e-02) # is this close enough?
def test_dice(): x = np.zeros(4) y = np.zeros(4) out = losses.dice(x, y, axis=None).numpy() assert_allclose(out, 0) x = np.ones(4) y = np.ones(4) out = losses.dice(x, y, axis=None).numpy() assert_allclose(out, 0) x = [0., 0., 1., 1.] y = [1., 1., 1., 1.] out = losses.dice(x, y, axis=None).numpy() ref = scipy.spatial.distance.dice(x, y) assert_allclose(out, ref) x = [0., 0., 1., 1.] y = [1., 1., 0., 0.] out = losses.dice(x, y, axis=None).numpy() ref = scipy.spatial.distance.dice(x, y) assert_allclose(out, ref) assert_allclose(out, 1) x = np.ones((4, 32, 32, 32, 1), dtype=np.float32) y = x.copy() x[:2, :10, 10:] = 0 y[:2, :3, 20:] = 0 y[3:, 10:] = 0 dices = np.empty(x.shape[0]) for i in range(x.shape[0]): dices[i] = scipy.spatial.distance.dice(x[i].flatten(), y[i].flatten()) assert_allclose(losses.dice(x, y, axis=(1, 2, 3, 4)), dices, rtol=1e-05) assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(x, y), dices.mean(), rtol=1e-05) assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(y, x), dices.mean(), rtol=1e-05)
def test_dice(): with tf.Session() as sess: shape = (2, 4, 4) dtype = np.float32 labels = np.zeros(shape=shape, dtype=dtype) predictions = np.zeros(shape=shape, dtype=dtype) labels[0, :2, :2] = 1 labels[1, 2:, 2:] = 1 predictions[0, 1:3, 1:3] = 1 predictions[1, :, 2:] = 1 ll = sess.run( losses.dice(labels=labels, predictions=predictions, axis=(1, 2), reduction='none')) assert np.allclose(ll[0], 0.75) assert np.allclose(ll[1], 0.3333333333) assert np.allclose( sess.run( losses.dice(labels=labels, predictions=predictions, axis=(1, 2))), 0.5416667) zeros = np.zeros((2, 2), dtype=np.float32) ones = np.ones((2, 2), dtype=np.float32) # Perfect (all zero). ll = sess.run(losses.dice(labels=zeros, predictions=zeros, axis=(1, ))) assert np.allclose(ll, 0) # Perfect (all one). ll = sess.run(losses.dice(labels=ones, predictions=ones, axis=(1, ))) assert np.allclose(ll, 0) # All wrong. ll = sess.run(losses.dice(labels=zeros, predictions=ones, axis=(1, ))) assert np.allclose(ll, 1)