Esempio n. 1
0
 def _get_simple_bias_model(self):
     model = test_utils.get_model_from_layers([test_utils.Bias()],
                                              input_shape=(1, ))
     model.compile(keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
                   'mae',
                   run_eagerly=test_utils.should_run_eagerly())
     return model
def _get_multi_io_model():
    inp_1 = layers.Input(shape=(1, ), name='input_1')
    inp_2 = layers.Input(shape=(1, ), name='input_2')
    d = test_utils.Bias(name='output')
    out_1 = d(inp_1)
    out_2 = d(inp_2)
    return keras.Model([inp_1, inp_2], [out_1, out_2])
Esempio n. 3
0
 def get_model_and_train_step():
     inputs = Input(shape=(1, ))
     targets = Input(shape=(1, ))
     outputs = test_utils.Bias()(inputs)
     model = Model([inputs, targets], outputs)
     model.add_loss(MAE()(targets, outputs))
     model.add_loss(tf.reduce_mean(mae(targets, outputs)))
     return get_ctl_train_step(model)
Esempio n. 4
0
 def test_invalid_variable_input(self):
   inputs = Input(shape=(1,))
   outputs = test_utils.Bias()(inputs)
   model = Model(inputs, outputs)
   with self.assertRaisesRegex(
       ValueError,
       'Expected a symbolic Tensors or a callable for the loss value'):
     model.add_loss(model.weights[0])
Esempio n. 5
0
def multi_input_functional():
    """Functional Model that adds its inputs and then adds a bias."""
    input_1 = keras.Input(shape=(1, ))
    input_2 = keras.Input(shape=(1, ))
    input_3 = keras.Input(shape=(1, ))
    added = keras.layers.Add()([input_1, input_2, input_3])
    output = test_utils.Bias()(added)
    return keras.Model([input_1, input_2, input_3], output)
Esempio n. 6
0
 def test_invalid_constant_input(self):
     inputs = Input(shape=(1, ))
     outputs = test_utils.Bias()(inputs)
     model = Model(inputs, outputs)
     with self.assertRaisesRegex(
             ValueError,
             "Expected a symbolic Tensors or a callable for the loss value",
     ):
         model.add_loss(1.0)
Esempio n. 7
0
        def get_model_and_train_step():
            inputs = Input(shape=(1, ))
            targets = Input(shape=(1, ))
            outputs = test_utils.Bias()(inputs)
            model = Model([inputs, targets], outputs)

            def callable_loss():
                return tf.reduce_sum(model.weights)

            model.add_loss(callable_loss)
            return get_ctl_train_step(model)
Esempio n. 8
0
 def test_add_entropy_loss_on_functional_model(self):
   inputs = Input(shape=(1,))
   targets = Input(shape=(1,))
   outputs = test_utils.Bias()(inputs)
   model = Model([inputs, targets], outputs)
   model.add_loss(losses.binary_crossentropy(targets, outputs))
   model.compile('sgd', run_eagerly=test_utils.should_run_eagerly())
   with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
     model.fit([self.x, self.y], batch_size=3, epochs=5)
     self.assertNotIn('Gradients do not exist for variables',
                      str(mock_log.call_args))
Esempio n. 9
0
    def test_loss_on_model_fit(self):
        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        outputs = test_utils.Bias()(inputs)
        model = Model([inputs, targets], outputs)
        model.add_loss(MAE()(targets, outputs))
        model.add_loss(tf.reduce_mean(mae(targets, outputs)))
        model.compile(optimizer_v2.gradient_descent.SGD(0.05),
                      run_eagerly=test_utils.should_run_eagerly())

        history = model.fit([self.x, self.y], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2],
                            1e-3)
Esempio n. 10
0
    def test_loss_callable_on_model_fit(self):
        model = test_utils.get_model_from_layers([test_utils.Bias()],
                                                 input_shape=(1, ))

        def callable_loss():
            return tf.reduce_sum(model.weights)

        model.add_loss(callable_loss)
        model.compile(optimizer_v2.gradient_descent.SGD(0.1),
                      run_eagerly=test_utils.should_run_eagerly())

        history = model.fit(self.x, batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4],
                            1e-3)
Esempio n. 11
0
    def test_loss_with_sample_weight_on_model_fit(self):
        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        sw = Input(shape=(1, ))
        outputs = test_utils.Bias()(inputs)
        model = Model([inputs, targets, sw], outputs)
        model.add_loss(MAE()(targets, outputs, sw))
        model.add_loss(3 * tf.reduce_mean(sw * mae(targets, outputs)))
        model.compile(optimizer_v2.gradient_descent.SGD(0.025),
                      run_eagerly=test_utils.should_run_eagerly())

        history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4],
                            1e-3)
Esempio n. 12
0
 def __init__(self):
     super(MyLayer, self).__init__()
     self.bias = test_utils.Bias()
Esempio n. 13
0
 def __init__(self):
     super(MyModel, self).__init__()
     self.bias = test_utils.Bias()
Esempio n. 14
0
 def __init__(self):
     super().__init__()
     self.bias = test_utils.Bias()
Esempio n. 15
0
 def __init__(self):
     super().__init__()
     self.add = keras.layers.Add()
     self.bias = test_utils.Bias()