Пример #1
0
 def test_sum_reduction(self):
     mae_obj = losses.MeanAbsoluteError(
         reduction=losses_utils.Reduction.SUM)
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
     assert np.isclose(K.eval(loss), 25.29999, atol=1e-3)
Пример #2
0
 def test_invalid_sample_weight(self):
     mae_obj = losses.MeanAbsoluteError()
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
     sample_weight = K.constant([3, 6, 5, 0], shape=(2, 2))
     with pytest.raises(Exception):
         mae_obj(y_true, y_pred, sample_weight=sample_weight)
Пример #3
0
 def test_no_reduction(self):
     mae_obj = losses.MeanAbsoluteError(
         reduction=losses_utils.Reduction.NONE)
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
     assert np.allclose(K.eval(loss), [10.7333, 14.5666], atol=1e-3)
Пример #4
0
 def test_sample_weighted(self):
     mae_obj = losses.MeanAbsoluteError()
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
     sample_weight = K.constant([1.2, 3.4], shape=(2, 1))
     loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
     assert np.isclose(K.eval(loss), 81.4 / 6, atol=1e-3)
Пример #5
0
 def test_timestep_weighted(self):
     mae_obj = losses.MeanAbsoluteError()
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
     sample_weight = K.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
     loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
     assert np.isclose(K.eval(loss), 13.833, atol=1e-3)
Пример #6
0
    def train(self):
        # Getting the untrained model
        empty_model = self.model

        # Initiating the optimizer
        optimizer = keras.optimizers.Adam(learning_rate=self.lr)

        # Compiling the model
        empty_model.compile(loss=losses.MeanAbsoluteError(),
                            optimizer=optimizer)

        if (self.Xval is not None) & (self.Yval is not None) & (self.Xauxval
                                                                is not None):
            history = empty_model.fit(
                [self.X, self.Xaux],
                self.Y,
                epochs=self.epochs,
                batch_size=self.batch,
                validation_data=([self.Xval, self.Xauxval], self.Yval),
                shuffle=False)
        else:
            history = empty_model.fit([self.X, self.Xaux],
                                      self.Y,
                                      epochs=self.epochs,
                                      batch_size=self.batch,
                                      shuffle=False)

        # Saving to original model attribute in the class
        self.model = empty_model

        # Returning the training history
        return history
Пример #7
0

def _get_multi_io_model():
    inp_1 = layers.Input(shape=(1, ), name='input_1')
    inp_2 = layers.Input(shape=(1, ), name='input_2')
    d = testing_utils.Bias(name='output')
    out_1 = d(inp_1)
    out_2 = d(inp_2)
    return keras.Model([inp_1, inp_2], [out_1, out_2])


@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
    dict(testcase_name='string', value='mae'),
    dict(testcase_name='built_in_fn', value=losses.mae),
    dict(testcase_name='built_in_class', value=losses.MeanAbsoluteError()),
    dict(testcase_name='custom_fn', value=my_mae),
    dict(testcase_name='custom_class', value=MyMeanAbsoluteError()),
    dict(testcase_name='list_of_strings', value=['mae', 'mae']),
    dict(testcase_name='list_of_built_in_fns', value=[losses.mae, losses.mae]),
    dict(testcase_name='list_of_built_in_classes',
         value=[losses.MeanAbsoluteError(),
                losses.MeanAbsoluteError()]),
    dict(testcase_name='list_of_custom_fns', value=[my_mae, my_mae]),
    dict(testcase_name='list_of_custom_classes',
         value=[MyMeanAbsoluteError(),
                MyMeanAbsoluteError()]),
    dict(testcase_name='dict_of_string',
         value={
             'output': 'mae',
             'output_1': 'mae',
Пример #8
0
 def test_scalar_weighted(self):
     mae_obj = losses.MeanAbsoluteError()
     y_true = K.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
     y_pred = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
     assert np.isclose(K.eval(loss), 12.65, atol=1e-3)
Пример #9
0
 def test_all_correct_unweighted(self):
     mae_obj = losses.MeanAbsoluteError()
     y_true = K.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
     loss = mae_obj(y_true, y_true)
     assert np.isclose(K.eval(loss), 0.0, atol=1e-3)
Пример #10
0
 def test_config(self):
     mae_obj = losses.MeanAbsoluteError(
         reduction=losses_utils.Reduction.SUM, name='mae_1')
     assert mae_obj.name == 'mae_1'
     assert mae_obj.reduction == losses_utils.Reduction.SUM
def _get_multi_io_model():
    inp_1 = layers.Input(shape=(1,), name="input_1")
    inp_2 = layers.Input(shape=(1,), name="input_2")
    d = test_utils.Bias(name="output")
    out_1 = d(inp_1)
    out_2 = d(inp_2)
    return keras.Model([inp_1, inp_2], [out_1, out_2])


@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
    [
        dict(testcase_name="string", value="mae"),
        dict(testcase_name="built_in_fn", value=losses.mae),
        dict(testcase_name="built_in_class", value=losses.MeanAbsoluteError()),
        dict(testcase_name="custom_fn", value=my_mae),
        dict(testcase_name="custom_class", value=MyMeanAbsoluteError()),
        dict(testcase_name="list_of_strings", value=["mae", "mae"]),
        dict(
            testcase_name="list_of_built_in_fns", value=[losses.mae, losses.mae]
        ),
        dict(
            testcase_name="list_of_built_in_classes",
            value=[losses.MeanAbsoluteError(), losses.MeanAbsoluteError()],
        ),
        dict(testcase_name="list_of_custom_fns", value=[my_mae, my_mae]),
        dict(
            testcase_name="list_of_custom_classes",
            value=[MyMeanAbsoluteError(), MyMeanAbsoluteError()],
        ),