Esempio n. 1
0
def _get_multi_io_model():
    inp_1 = layers.Input(shape=(1, ), name='input_1')
    inp_2 = layers.Input(shape=(1, ), name='input_2')
    d = testing_utils.Bias(name='output')
    out_1 = d(inp_1)
    out_2 = d(inp_2)
    return keras.Model([inp_1, inp_2], [out_1, out_2])
 def _get_simple_bias_model(self):
     model = testing_utils.get_model_from_layers([testing_utils.Bias()],
                                                 input_shape=(1, ))
     model.compile(keras.optimizer_v2.gradient_descent.SGD(0.1),
                   'mae',
                   run_eagerly=testing_utils.should_run_eagerly())
     return model
Esempio n. 3
0
 def get_model_and_train_step():
     inputs = Input(shape=(1, ))
     targets = Input(shape=(1, ))
     outputs = testing_utils.Bias()(inputs)
     model = Model([inputs, targets], outputs)
     model.add_loss(MAE()(targets, outputs))
     model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
     return get_ctl_train_step(model)
Esempio n. 4
0
def multi_input_functional():
  """Functional Model that adds its inputs and then adds a bias."""
  input_1 = keras.Input(shape=(1,))
  input_2 = keras.Input(shape=(1,))
  input_3 = keras.Input(shape=(1,))
  added = keras.layers.Add()([input_1, input_2, input_3])
  output = testing_utils.Bias()(added)
  return keras.Model([input_1, input_2, input_3], output)
 def test_invalid_variable_input(self):
   with context.eager_mode():
     inputs = Input(shape=(1,))
     outputs = testing_utils.Bias()(inputs)
     model = Model(inputs, outputs)
     with self.assertRaisesRegexp(
         ValueError,
         'Expected a symbolic Tensors or a callable for the loss value'):
       model.add_loss(model.weights[0])
Esempio n. 6
0
 def test_invalid_constant_input(self):
     inputs = Input(shape=(1, ))
     outputs = testing_utils.Bias()(inputs)
     model = Model(inputs, outputs)
     with self.assertRaisesRegex(
             ValueError,
             'Expected a symbolic Tensors or a callable for the loss value'
     ):
         model.add_loss(1.)
Esempio n. 7
0
            def get_model_and_train_step():
                inputs = Input(shape=(1, ))
                targets = Input(shape=(1, ))
                outputs = testing_utils.Bias()(inputs)
                model = Model([inputs, targets], outputs)

                def callable_loss():
                    return math_ops.reduce_sum(model.weights)

                model.add_loss(callable_loss)
                return get_ctl_train_step(model)
Esempio n. 8
0
 def test_add_entropy_loss_on_functional_model(self):
     inputs = Input(shape=(1, ))
     targets = Input(shape=(1, ))
     outputs = testing_utils.Bias()(inputs)
     model = Model([inputs, targets], outputs)
     model.add_loss(losses.binary_crossentropy(targets, outputs))
     model.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
     with test.mock.patch.object(logging, 'warning') as mock_log:
         model.fit([self.x, self.y], batch_size=3, epochs=5)
         self.assertNotIn('Gradients do not exist for variables',
                          str(mock_log.call_args))
  def test_loss_on_model_fit(self):
    inputs = Input(shape=(1,))
    targets = Input(shape=(1,))
    outputs = testing_utils.Bias()(inputs)
    model = Model([inputs, targets], outputs)
    model.add_loss(MAE()(targets, outputs))
    model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
    model.compile(
        optimizer_v2.gradient_descent.SGD(0.05),
        run_eagerly=testing_utils.should_run_eagerly())

    history = model.fit([self.x, self.y], batch_size=3, epochs=5)
    self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
  def test_loss_callable_on_model_fit(self):
    model = testing_utils.get_model_from_layers([testing_utils.Bias()],
                                                input_shape=(1,))

    def callable_loss():
      return math_ops.reduce_sum(model.weights)

    model.add_loss(callable_loss)
    model.compile(
        optimizer_v2.gradient_descent.SGD(0.1),
        run_eagerly=testing_utils.should_run_eagerly())

    history = model.fit(self.x, batch_size=3, epochs=5)
    self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
  def test_loss_with_sample_weight_on_model_fit(self):
    inputs = Input(shape=(1,))
    targets = Input(shape=(1,))
    sw = Input(shape=(1,))
    outputs = testing_utils.Bias()(inputs)
    model = Model([inputs, targets, sw], outputs)
    model.add_loss(MAE()(targets, outputs, sw))
    model.add_loss(3 * math_ops.reduce_mean(sw * mae(targets, outputs)))
    model.compile(
        optimizer_v2.gradient_descent.SGD(0.025),
        run_eagerly=testing_utils.should_run_eagerly())

    history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
    self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
Esempio n. 12
0
 def __init__(self):
     super(MyLayer, self).__init__()
     self.bias = testing_utils.Bias()
Esempio n. 13
0
 def __init__(self):
   super(MultiInputSubclassed, self).__init__()
   self.add = keras.layers.Add()
   self.bias = testing_utils.Bias()