def _weighted_loss(y_true, y_pred, sample_weight): def _standardize(losses, sample_weight): if isinstance(sample_weight, np.ndarray): if (sample_weight.shape[-1] != y_true.shape[-1]) and ( sample_weight.ndim < y_true.ndim): sample_weight = np.expand_dims(sample_weight, -1) if losses.ndim < sample_weight.ndim: losses = np.expand_dims(losses, -1) return losses, sample_weight yt, yp = _maybe_cast_to_tensor(y_true, y_pred) losses = K.eval(getattr(keras_losses, name)(yt, yp)) # negative is standard in TF2, but was positive in TF1 if name == 'cosine_similarity' and (not TF_2 and TF_KERAS): losses = -losses losses, sample_weight = _standardize(losses, sample_weight) return np.mean(losses * sample_weight)
def _fn(y_true, y_pred): yt, yp = _maybe_cast_to_tensor(y_true, y_pred) # sample_weight makes no sense for keras `metrics` return K.eval(getattr(keras_metrics, name)(yt, yp))