예제 #1
0
    def test_loss_acurrancy(self):
        # =============Accuracy============= #
        y_pred = tf.constant([[1., 0., 0.], [1., 0., 0.]])
        y_true = tf.constant([[1., MAGIC_NUMBER, 1.], [0., MAGIC_NUMBER, 1.]])

        npt.assert_array_equal(categorical_accuracy(y_true, y_pred).numpy(), [1., 0.])
        npt.assert_almost_equal(binary_accuracy(y_true, y_pred).numpy(), [1. / 2., 0.])
예제 #2
0
    def compile(self, optimizer=None, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None):
        if optimizer is not None:
            self.optimizer = optimizer
        elif self.optimizer is None or self.optimizer == 'adam':
            self.optimizer = Adam(lr=self.lr, beta_1=self.beta_1, beta_2=self.beta_2, epsilon=self.optimizer_epsilon,
                                  decay=0.0)

        if self.task == 'regression':
            self._last_layer_activation = 'linear'
            loss_func = mean_squared_error
            if self.metrics is None:
                self.metrics = [mean_absolute_error, mean_error]
        elif self.task == 'classification':
            self._last_layer_activation = 'softmax'
            loss_func = categorical_crossentropy
            if self.metrics is None:
                self.metrics = [categorical_accuracy]
        elif self.task == 'binary_classification':
            self._last_layer_activation = 'sigmoid'
            loss_func = binary_crossentropy
            if self.metrics is None:
                self.metrics = [binary_accuracy(from_logits=False)]
        else:
            raise RuntimeError('Only "regression", "classification" and "binary_classification" are supported')

        self.keras_model = self.model()

        self.keras_model.compile(loss=loss_func, optimizer=self.optimizer, metrics=self.metrics, loss_weights=None)

        return None
예제 #3
0
 def test_loss_acurrancy(self):
     # =============Accuracy============= #
     y_pred = tf.constant([[1., 0., 0.], [1., 0., 0.]])
     y_true = tf.constant([[1., MAGIC_NUMBER, 1.], [0., MAGIC_NUMBER, 1.]])
     
     npt.assert_array_equal(categorical_accuracy(y_true, y_pred).eval(session=get_session()), [1., 0.])
     npt.assert_almost_equal(binary_accuracy(from_logits=False)(y_true, y_pred).eval(session=get_session()),
                             [1. / 2., 0.])
예제 #4
0
    def test_loss_acurrancy(self):
        # =============Accuracy============= #
        with tf.device("/cpu:0"), context.eager_mode():
            y_pred = tf.constant([[1., 0., 0.], [1., 0., 0.]])
            y_true = tf.constant([[1., MAGIC_NUMBER, 1.],
                                  [0., MAGIC_NUMBER, 1.]])

            npt.assert_array_equal(
                categorical_accuracy(y_true, y_pred).numpy(), [1., 0.])
            npt.assert_almost_equal(
                binary_accuracy(y_true, y_pred).numpy(), [1. / 2., 0.])
예제 #5
0
    def compile(self, optimizer=None,
                loss=None,
                metrics=None,
                weighted_metrics=None,
                loss_weights=None,
                sample_weight_mode=None):
        if optimizer is not None:
            self.optimizer = optimizer
        elif self.optimizer is None or self.optimizer == 'adam':
            self.optimizer = Adam(lr=self.lr, beta_1=self.beta_1, beta_2=self.beta_2, epsilon=self.optimizer_epsilon,
                                  decay=0.0)
        if self.task == 'regression':
            if self._last_layer_activation is None:
                self._last_layer_activation = 'linear'
        elif self.task == 'classification':
            if self._last_layer_activation is None:
                self._last_layer_activation = 'softmax'
        elif self.task == 'binary_classification':
            if self._last_layer_activation is None:
                self._last_layer_activation = 'sigmoid'
        else:
            raise RuntimeError('Only "regression", "classification" and "binary_classification" are supported')

        self.keras_model, self.keras_model_predict, output_loss, variance_loss = self.model()

        if self.task == 'regression':
            self.metrics = [mean_absolute_error, mean_error] if not (metrics and self.metrics) else metrics
            self.keras_model.compile(loss={'output': output_loss, 'variance_output': variance_loss},
                                     optimizer=self.optimizer,
                                     metrics={'output': self.metrics},
                                     weighted_metrics=weighted_metrics,
                                     loss_weights={'output': .5,
                                                   'variance_output': .5} if not loss_weights else loss_weights,
                                     sample_weight_mode=sample_weight_mode)
        elif self.task == 'classification':
            self.metrics = [categorical_accuracy] if not (metrics and self.metrics) else metrics
            self.keras_model.compile(loss={'output': output_loss, 'variance_output': variance_loss},
                                     optimizer=self.optimizer,
                                     metrics={'output': self.metrics},
                                     weighted_metrics=weighted_metrics,
                                     loss_weights={'output': .5,
                                                   'variance_output': .5} if not loss_weights else loss_weights,
                                     sample_weight_mode=sample_weight_mode)
        elif self.task == 'binary_classification':
            self.metrics = [binary_accuracy(from_logits=True)] if not (metrics and self.metrics) else metrics
            self.keras_model.compile(loss={'output': output_loss, 'variance_output': variance_loss},
                                     optimizer=self.optimizer,
                                     metrics={'output': self.metrics},
                                     weighted_metrics=weighted_metrics,
                                     loss_weights={'output': .5,
                                                   'variance_output': .5} if not loss_weights else loss_weights,
                                     sample_weight_mode=sample_weight_mode)
        return None
예제 #6
0
    def test_loss_func(self):
        # make sure custom reduce_var works
        var_array = [1, 2, 3, 4, 5]
        self.assertEqual(reduce_var(tf.Variable(var_array)).eval(session=get_session()), np.var(var_array))

        # =============Magic correction term============= #
        y_true = tf.Variable([[2., MAGIC_NUMBER, MAGIC_NUMBER], [2., MAGIC_NUMBER, 4.]])
        npt.assert_array_equal(magic_correction_term(y_true).eval(session=get_session()), [3., 1.5])

        # =============MSE/MAE============= #
        y_pred = tf.Variable([[2., 3., 4.], [2., 3., 7.]])
        y_pred_2 = tf.Variable([[2., 9., 4.], [2., 0., 7.]])
        y_true = tf.Variable([[2., MAGIC_NUMBER, 4.], [2., MAGIC_NUMBER, 4.]])
        npt.assert_almost_equal(mean_absolute_error(y_true, y_pred).eval(session=get_session()), [0., 3. / 2.])
        npt.assert_almost_equal(mean_squared_error(y_true, y_pred).eval(session=get_session()), [0., 9. / 2])

        # make sure neural network prediction won't matter for magic number term
        npt.assert_almost_equal(mean_absolute_error(y_true, y_pred).eval(session=get_session()),
                                mean_absolute_error(y_true, y_pred_2).eval(session=get_session()))
        npt.assert_almost_equal(mean_squared_error(y_true, y_pred).eval(session=get_session()),
                                mean_squared_error(y_true, y_pred_2).eval(session=get_session()))

        # =============Mean Error============= #
        y_pred = tf.Variable([[1., 3., 4.], [2., 3., 7.]])
        y_true = tf.Variable([[2., MAGIC_NUMBER, 3.], [2., MAGIC_NUMBER, 7.]])
        npt.assert_almost_equal(mean_error(y_true, y_pred).eval(session=get_session()), [0., 0.])

        # =============Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [0., MAGIC_NUMBER, 1.]])
        npt.assert_array_equal(categorical_accuracy(y_true, y_pred).eval(session=get_session()), [1., 0.])
        npt.assert_almost_equal(binary_accuracy(from_logits=False)(y_true, y_pred).eval(session=get_session()),
                                [1. / 2., 0.])

        # =============Abs Percentage Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])

        npt.assert_array_almost_equal(mean_absolute_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      [50., 50.], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_absolute_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      mean_absolute_percentage_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Percentage Accuracy============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])

        npt.assert_array_almost_equal(mean_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      [50., 50.], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_percentage_error(y_true, y_pred).eval(session=get_session()),
                                      mean_percentage_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Mean Squared Log Error============= #
        y_pred = tf.Variable([[1., 0., 0.], [1., 0., 0.]])
        y_pred_2 = tf.Variable([[1., 9., 0.], [1., -1., 0.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])
        npt.assert_array_almost_equal(mean_squared_logarithmic_error(y_true, y_pred).eval(session=get_session()),
                                      [0.24, 0.24], decimal=3)
        # make sure neural network prediction won't matter for magic number term
        npt.assert_array_almost_equal(mean_squared_logarithmic_error(y_true, y_pred).eval(session=get_session()),
                                      mean_squared_logarithmic_error(y_true, y_pred_2).eval(session=get_session()),
                                      decimal=3)

        # =============Zeros Loss============= #
        y_pred = tf.Variable([[1., 0., 0.], [5., -9., 2.]])
        y_true = tf.Variable([[1., MAGIC_NUMBER, 1.], [1., MAGIC_NUMBER, 1.]])
        npt.assert_array_almost_equal(zeros_loss(y_true, y_pred).eval(session=get_session()), [0., 0.])