예제 #1
0
파일: losses.py 프로젝트: rgcl/astroNN
def categorical_crossentropy(y_true, y_pred, from_logits=False):
    """
    Categorical cross-entropy between an output tensor and a target tensor, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param from_logits: From logits space or not. If you want to use logits, please use from_logits=True
    :type from_logits: boolean
    :return: Categorical Cross-Entropy
    :rtype: tf.Tensor
    :History: 2018-Jan-14 - Written - Henry Leung (University of Toronto)
    """
    # calculate correction term first
    correction = magic_correction_term(y_true)

    # Deal with magic number
    y_true = tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true),
                      y_true)

    # Note: tf.nn.softmax_cross_entropy_with_logits_v2 expects logits, we expects probabilities by default.
    if not from_logits:
        epsilon_tensor = tf.cast(tf.constant(keras.backend.epsilon()),
                                 tf.float32)
        # scale preds so that the class probas of each sample sum to 1
        y_pred /= tf.reduce_sum(y_pred, len(y_pred.get_shape()) - 1, True)
        # manual computation of crossentropy
        y_pred = tf.clip_by_value(y_pred, epsilon_tensor, 1. - epsilon_tensor)
        return -tf.reduce_sum(y_true * tf.log(y_pred),
                              len(y_pred.get_shape()) - 1) * correction
    else:
        return tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=y_true, logits=y_pred) * correction
예제 #2
0
 def test_loss_magic(self):
     # =============Magic correction term============= #
     with tf.device("/cpu:0"), context.eager_mode():
         y_true = tf.constant([[2., MAGIC_NUMBER, MAGIC_NUMBER],
                               [2., MAGIC_NUMBER, 4.]])
         npt.assert_array_equal(
             magic_correction_term(y_true).numpy(), [3., 1.5])
예제 #3
0
파일: losses.py 프로젝트: rgcl/astroNN
def binary_crossentropy(y_true, y_pred, from_logits=False):
    """
    Binary cross-entropy between an output tensor and a target tensor, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param from_logits: From logits space or not. If you want to use logits, please use from_logits=True
    :type from_logits: boolean
    :return: Binary Cross-Entropy
    :rtype: tf.Tensor
    :History: 2018-Jan-14 - Written - Henry Leung (University of Toronto)
    """
    # Note: tf.nn.sigmoid_cross_entropy_with_logits expects logits, we expects probabilities by default.
    if not from_logits:
        epsilon_tensor = tf.cast(tf.constant(keras.backend.epsilon()),
                                 tf.float32)
        # transform back to logits
        y_pred = tf.clip_by_value(y_pred, epsilon_tensor, 1. - epsilon_tensor)
        y_pred = tf.log(y_pred / (1. - y_pred))

    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true,
                                                            logits=y_pred)
    corrected_cross_entropy = tf.where(tf.equal(y_true, MAGIC_NUMBER),
                                       tf.zeros_like(cross_entropy),
                                       cross_entropy)

    return tf.reduce_mean(corrected_cross_entropy,
                          axis=-1) * magic_correction_term(y_true)
예제 #4
0
def mean_squared_logarithmic_error(y_true, y_pred, sample_weight=None):
    """
    Calculate mean squared logarithmic error, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param sample_weight: Sample weights
    :type sample_weight: Union(tf.Tensor, tf.Variable, list)
    :return: Mean Squared Logarithmic Error
    :rtype: tf.Tensor
    :History: 2018-Feb-17 - Written - Henry Leung (University of Toronto)
    """
    tf_inf = tf.cast(tf.constant(1) / tf.constant(0), tf.float32)
    epsilon_tensor = tf.cast(tf.constant(tfk.backend.epsilon()), tf.float32)

    first_log = tf.math.log(
        tf.clip_by_value(y_pred, epsilon_tensor, tf_inf) + 1.)
    second_log = tf.math.log(
        tf.clip_by_value(y_true, epsilon_tensor, tf_inf) + 1.)
    log_diff = tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true),
                        tf.square(first_log - second_log))
    losses = tf.reduce_mean(log_diff, axis=-1) * magic_correction_term(y_true)
    return weighted_loss(losses, sample_weight)
예제 #5
0
파일: losses.py 프로젝트: rgcl/astroNN
def robust_binary_crossentropy(y_true, y_pred, logit_var):
    """
    Calculate binary accuracy, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction in logits space
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param logit_var: Predictive variance in logits space
    :type logit_var: Union(tf.Tensor, tf.Variable)
    :return: categorical cross-entropy
    :rtype: tf.Tensor
    :History: 2018-Mar-15 - Written - Henry Leung (University of Toronto)
    """
    variance_depressor = tf.reduce_mean(
        tf.exp(logit_var) - tf.ones_like(logit_var))
    undistorted_loss = binary_crossentropy(y_true, y_pred, from_logits=True)
    dist = distributions.Normal(loc=y_pred, scale=logit_var)

    mc_result = tf.map_fn(lambda x: -tf.nn.elu(
        undistorted_loss - binary_crossentropy(y_true, x, from_logits=True)),
                          dist.sample([25]),
                          dtype=tf.float32)

    variance_loss = tf.reduce_mean(mc_result, axis=0) * undistorted_loss

    return (variance_loss + undistorted_loss +
            variance_depressor) * magic_correction_term(y_true)
예제 #6
0
파일: losses.py 프로젝트: henrysky/astroNN
def robust_binary_crossentropy(y_true, y_pred, logit_var, sample_weight):
    """
    Calculate binary accuracy, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction in logits space
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param logit_var: Predictive variance in logits space
    :type logit_var: Union(tf.Tensor, tf.Variable)
    :param sample_weight: Sample weights
    :type sample_weight: Union(tf.Tensor, tf.Variable, list)
    :return: categorical cross-entropy
    :rtype: tf.Tensor
    :History: 2018-Mar-15 - Written - Henry Leung (University of Toronto)
    """
    variance_depressor = tf.reduce_mean(tf.exp(logit_var) - tf.ones_like(logit_var))
    undistorted_loss = binary_crossentropy(y_true, y_pred, from_logits=True)
    dist = tfd.Normal(loc=y_pred, scale=logit_var)

    mc_num = 25
    batch_size = tf.shape(y_pred)[0]
    label_size = tf.shape(y_pred)[-1]
    mc_result = -tf.nn.elu(tf.tile(undistorted_loss, [mc_num]) - binary_crossentropy(tf.tile(y_true, [mc_num, 1]),
                                                                                     tf.reshape(dist.sample([mc_num]),
                                                                                                (batch_size * mc_num,
                                                                                                 label_size)),
                                                                                     from_logits=True))

    variance_loss = tf.reduce_mean(tf.reshape(mc_result, (mc_num, batch_size)), axis=0) * undistorted_loss

    losses = (variance_loss + undistorted_loss + variance_depressor) * magic_correction_term(y_true)
    return weighted_loss(losses, sample_weight)
예제 #7
0
파일: losses.py 프로젝트: rgcl/astroNN
def robust_mse(y_true, y_pred, variance, labels_err):
    """
    Calculate predictive variance, and takes account of labels error in Bayesian Neural Network

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param variance: Predictive Variance
    :type variance: Union(tf.Tensor, tf.Variable)
    :param labels_err: Known labels error, give zeros if unknown/unavailable
    :type labels_err: Union(tf.Tensor, tf.Variable)
    :return: Robust Mean Squared Error, can be used directly with Tensorflow
    :rtype: tf.Tensor
    :History: 2018-April-07 - Written - Henry Leung (University of Toronto)
    """
    # labels_err still contains magic_number
    labels_err_y = tf.where(tf.equal(y_true, MAGIC_NUMBER),
                            tf.zeros_like(y_true), labels_err)
    # Neural Net is predicting log(var), so take exp, takes account the target variance, and take log back
    y_pred_corrected = tf.log(tf.exp(variance) + tf.square(labels_err_y))

    wrapper_output = tf.where(
        tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true),
        0.5 * tf.square(y_true - y_pred) * (tf.exp(-y_pred_corrected)) +
        0.5 * y_pred_corrected)

    return tf.reduce_mean(wrapper_output,
                          axis=-1) * magic_correction_term(y_true)
예제 #8
0
    def test_loss_magic(self):
        # =============Magic correction term============= #
        y_true = tf.constant([[2., MAGIC_NUMBER, MAGIC_NUMBER],
                              [2., MAGIC_NUMBER, 4.]])

        npt.assert_array_equal(
            magic_correction_term(y_true).eval(session=get_session()),
            [3., 1.5])
예제 #9
0
def mean_error(y_true, y_pred):
    """
    Calculate mean error as a way to get the bias in prediction, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :return: Mean Error
    :rtype: tf.Tensor
    :History: 2018-May-22 - Written - Henry Leung (University of Toronto)
    """
    return tf.reduce_mean(tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true), y_true - y_pred),
                          axis=-1) * magic_correction_term(y_true)
예제 #10
0
def mean_squared_error(y_true, y_pred):
    """
    Calculate mean square error losses

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :return: Mean Squared Error
    :rtype: tf.Tensor
    :History: 2017-Nov-16 - Written - Henry Leung (University of Toronto)
    """
    return tf.reduce_mean(tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true),
                                   tf.square(y_true - y_pred)), axis=-1) * magic_correction_term(y_true)
예제 #11
0
def mean_absolute_error(y_true, y_pred):
    """
    Calculate mean absolute error, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :return: Mean Absolute Error
    :rtype: tf.Tensor
    :History: 2018-Jan-14 - Written - Henry Leung (University of Toronto)
    """
    return tf.reduce_mean(tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true),
                                   tf.abs(y_true - y_pred)), axis=-1) * magic_correction_term(y_true)
예제 #12
0
파일: losses.py 프로젝트: henrysky/astroNN
def categorical_accuracy(y_true, y_pred):
    """
    Calculate categorical accuracy, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :return: Categorical Classification Accuracy
    :rtype: tf.Tensor
    :History: 2018-Jan-21 - Written - Henry Leung (University of Toronto)
    """
    y_true = tf.where(magic_num_check(y_true), tf.zeros_like(y_true), y_true)
    return tf.cast(tf.equal(tf.argmax(y_true, axis=-1), tf.argmax(y_pred, axis=-1)),
                   tf.float32) * magic_correction_term(y_true)
예제 #13
0
파일: losses.py 프로젝트: henrysky/astroNN
def mean_squared_error(y_true, y_pred, sample_weight=None):
    """
    Calculate mean square error losses

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param sample_weight: Sample weights
    :type sample_weight: Union(tf.Tensor, tf.Variable, list)
    :return: Mean Squared Error
    :rtype: tf.Tensor
    :History: 2017-Nov-16 - Written - Henry Leung (University of Toronto)
    """
    losses = tf.reduce_mean(tf.where(magic_num_check(y_true), tf.zeros_like(y_true),
                                     tf.square(y_true - y_pred)), axis=-1) * magic_correction_term(y_true)
    return weighted_loss(losses, sample_weight)
예제 #14
0
파일: losses.py 프로젝트: henrysky/astroNN
def mean_error(y_true, y_pred, sample_weight=None):
    """
    Calculate mean error as a way to get the bias in prediction, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param sample_weight: Sample weights
    :type sample_weight: Union(tf.Tensor, tf.Variable, list)
    :return: Mean Error
    :rtype: tf.Tensor
    :History: 2018-May-22 - Written - Henry Leung (University of Toronto)
    """
    losses = tf.reduce_mean(tf.where(magic_num_check(y_true), tf.zeros_like(y_true), y_true - y_pred),
                            axis=-1) * magic_correction_term(y_true)
    return weighted_loss(losses, sample_weight)
예제 #15
0
def mean_percentage_error(y_true, y_pred):
    """
    Calculate mean percentage error, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :return: Mean Percentage Error
    :rtype: tf.Tensor
    :History: 2018-Jun-06 - Written - Henry Leung (University of Toronto)
    """
    tf_inf = tf.cast(tf.constant(1) / tf.constant(0), tf.float32)
    epsilon_tensor = tf.cast(tf.constant(keras.backend.epsilon()), tf.float32)

    diff = y_true - y_pred / tf.clip_by_value(y_true, epsilon_tensor, tf_inf)
    diff_corrected = tf.where(tf.equal(y_true, MAGIC_NUMBER), tf.zeros_like(y_true), diff)
    return 100. * tf.reduce_mean(diff_corrected, axis=-1) * magic_correction_term(y_true)
예제 #16
0
파일: losses.py 프로젝트: henrysky/astroNN
def mean_percentage_error(y_true, y_pred, sample_weight=None):
    """
    Calculate mean percentage error, ignoring the magic number

    :param y_true: Ground Truth
    :type y_true: Union(tf.Tensor, tf.Variable)
    :param y_pred: Prediction
    :type y_pred: Union(tf.Tensor, tf.Variable)
    :param sample_weight: Sample weights
    :type sample_weight: Union(tf.Tensor, tf.Variable, list)
    :return: Mean Percentage Error
    :rtype: tf.Tensor
    :History: 2018-Jun-06 - Written - Henry Leung (University of Toronto)
    """
    tf_inf = tf.cast(tf.constant(1) / tf.constant(0), tf.float32)
    epsilon_tensor = tf.cast(tf.constant(tfk.backend.epsilon()), tf.float32)

    diff = y_true - y_pred / tf.clip_by_value(y_true, epsilon_tensor, tf_inf)
    diff_corrected = tf.where(magic_num_check(y_true), tf.zeros_like(y_true), diff)
    losses = 100. * tf.reduce_mean(diff_corrected, axis=-1) * magic_correction_term(y_true)
    return weighted_loss(losses, sample_weight)
예제 #17
0
파일: losses.py 프로젝트: rgcl/astroNN
 def binary_accuracy_internal(y_true, y_pred):
     if from_logits:
         y_pred = tf.nn.sigmoid(y_pred)
     return tf.reduce_mean(tf.cast(tf.equal(y_true, tf.round(y_pred)),
                                   tf.float32),
                           axis=-1) * magic_correction_term(y_true)
예제 #18
0
    def test_loss_func(self):
        # make sure custom reduce_var works
        var_array = [1, 2, 3, 4, 5]
        self.assertEqual(reduce_var(tf.Variable(var_array)).eval(session=get_session()), np.var(var_array))

        # =============Magic correction term============= #
        y_true = tf.Variable([[2., MAGIC_NUMBER, MAGIC_NUMBER], [2., MAGIC_NUMBER, 4.]])
        npt.assert_array_equal(magic_correction_term(y_true).eval(session=get_session()), [3., 1.5])

        # =============MSE/MAE============= #
        y_pred = tf.Variable([[2., 3., 4.], [2., 3., 7.]])
        y_pred_2 = tf.Variable([[2., 9., 4.], [2., 0., 7.]])
        y_true = tf.Variable([[2., MAGIC_NUMBER, 4.], [2., MAGIC_NUMBER, 4.]])
        npt.assert_almost_equal(mean_absolute_error(y_true, y_pred).eval(session=get_session()), [0., 3. / 2.])
        npt.assert_almost_equal(mean_squared_error(y_true, y_pred).eval(session=get_session()), [0., 9. / 2])

        # make sure neural network prediction won't matter for magic number term
        npt.assert_almost_equal(mean_absolute_error(y_true, y_pred).eval(session=get_session()),
                                mean_absolute_error(y_true, y_pred_2).eval(session=get_session()))
        npt.assert_almost_equal(mean_squared_error(y_true, y_pred).eval(session=get_session()),
                                mean_squared_error(y_true, y_pred_2).eval(session=get_session()))

        # =============Mean Error============= #
        y_pred = tf.Variable([[1., 3., 4.], [2., 3., 7.]])
        y_true = tf.Variable([[2., MAGIC_NUMBER, 3.], [2., MAGIC_NUMBER, 7.]])
        npt.assert_almost_equal(mean_error(y_true, y_pred).eval(session=get_session()), [0., 0.])

        # =============Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [0., MAGIC_NUMBER, 1.]])
        npt.assert_array_equal(categorical_accuracy(y_true, y_pred).eval(session=get_session()), [1., 0.])
        npt.assert_almost_equal(binary_accuracy(from_logits=False)(y_true, y_pred).eval(session=get_session()),
                                [1. / 2., 0.])

        # =============Abs Percentage Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])

        npt.assert_array_almost_equal(mean_absolute_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      [50., 50.], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_absolute_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      mean_absolute_percentage_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Percentage Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])

        npt.assert_array_almost_equal(mean_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      [50., 50.], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      mean_percentage_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Mean Squared Log Error============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])
        npt.assert_array_almost_equal(mean_squared_logarithmic_error(y_true, y_pred).eval(session=get_session()),
                                      [0.24, 0.24], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_squared_logarithmic_error(y_true, y_pred).eval(session=get_session()),
                                      mean_squared_logarithmic_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Zeros Loss============= #
        y_pred = tf.Variable([[1., 0., 0.], [5., -9., 2.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])
        npt.assert_array_almost_equal(zeros_loss(y_true, y_pred).eval(session=get_session()), [0., 0.])