def test_masked_binary_crossentropy(): y_true = np.array([-1., -1, 0., 1.]) y_pred = np.array([1., 0., 0., 1.]) mbc = masked_binary_crossentropy(y_true, y_pred) assert isinstance(mbc, tf.Tensor) assert np.sum(mbc.numpy()) < 1e-5
def training_step(x, y): with tf.GradientTape() as tape: pred = model(x, training=True) if focal_loss: loss = masked_binary_focal_loss(y, pred, 2) else: loss = masked_binary_crossentropy(y, pred) variables = model.trainable_variables grads = tape.gradient(loss, variables) opt.apply_gradients(zip(grads, variables)) return loss
def test_masked_focal_loss(): y_true = np.array([-1., -1, 0., 1.], dtype=np.float32) y_pred = np.array([1., 0., 0.25, 0.75], dtype=np.float32) mbc = masked_binary_crossentropy(y_true, y_pred) focal_loss_gamma_0 = masked_binary_focal_loss(y_true, y_pred, 0) focal_loss_gamma_2 = masked_binary_focal_loss(y_true, y_pred, 2) assert isinstance(focal_loss_gamma_0, tf.Tensor) # at gamma=0, focal loss collapses to crossentropy loss assert abs(mbc.numpy() - focal_loss_gamma_0.numpy()) < 1e-5 # at higher gamma values the loss is pushed down assert mbc.numpy() > focal_loss_gamma_2.numpy()
def loss(y_true, y_pred): return masked_binary_crossentropy( y_true, y_pred, label_smoothing=self.label_smoothing)