예제 #1
0
  def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v1.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
      model_v1.fit(x, y, batch_size=5, epochs=1)

      model_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v2.set_weights(model_v1.get_weights())
      model_v2.compile(opt_v2, loss='categorical_crossentropy', metrics=[])
      model_v2._make_train_function()
      if test_weights:
        opt_v2.set_weights(opt_v1.get_weights())

      hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
                          rtol=1e-5, atol=1e-5)
      self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
                          rtol=1e-5, atol=1e-5)
예제 #2
0
  def testNumericEquivalenceForAmsgrad(self):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())

      opt_k_v1 = optimizers.Adam(amsgrad=True)
      opt_k_v2 = adam.Adam(amsgrad=True)

      model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[])
      model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[])

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
예제 #3
0
  def test_sequential_nesting(self):
    model = testing_utils.get_small_sequential_mlp(4, 3)
    inner_model = testing_utils.get_small_sequential_mlp(4, 5)
    model.add(inner_model)

    model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))
    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
예제 #4
0
 def get_model():
   if deferred:
     model = testing_utils.get_small_sequential_mlp(10, 4)
   else:
     model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3)
   model.compile(
       optimizer='rmsprop',
       loss='categorical_crossentropy',
       metrics=['accuracy'])
   return model
예제 #5
0
  def test_sequential_nesting(self):
    model = testing_utils.get_small_sequential_mlp(4, 3)
    inner_model = testing_utils.get_small_sequential_mlp(4, 5)
    model.add(inner_model)

    model.compile(
        loss='mse',
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly())
    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
예제 #6
0
  def test_sequential_build_deferred(self):
    model = testing_utils.get_small_sequential_mlp(4, 5)

    model.build((None, 10))
    self.assertTrue(model.built)
    self.assertEqual(len(model.weights), 4)

    # Test with nested model
    model = testing_utils.get_small_sequential_mlp(4, 3)
    inner_model = testing_utils.get_small_sequential_mlp(4, 5)
    model.add(inner_model)

    model.build((None, 10))
    self.assertTrue(model.built)
    self.assertEqual(len(model.weights), 8)
  def test_training_and_eval_methods_on_iterators_single_io(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3))
    targets = np.zeros((10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()

    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(iterator, steps=2, verbose=1)
    model.predict(iterator, steps=2)

    # Test with validation data
    model.fit(iterator,
              epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=iterator, validation_steps=2)
    # Test with validation split
    with self.assertRaisesRegexp(
        ValueError, '`validation_split` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(iterator,
                epochs=1, steps_per_epoch=2, verbose=0,
                validation_split=0.5, validation_steps=2)

    # Test with sample weight.
    sample_weight = np.random.random((10,))
    with self.assertRaisesRegexp(
        ValueError, '`sample_weight` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(
          iterator,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          sample_weight=sample_weight)

    # Test invalid usage
    with self.assertRaisesRegexp(ValueError,
                                 'you should not specify a target'):
      model.fit(iterator, iterator,
                epochs=1, steps_per_epoch=2, verbose=0)

    with self.assertRaisesRegexp(
        ValueError, 'you should specify the `steps_per_epoch` argument'):
      model.fit(iterator, epochs=1, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'you should specify the `steps` argument'):
      model.evaluate(iterator, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'you should specify the `steps` argument'):
      model.predict(iterator, verbose=0)
예제 #8
0
  def test_EarlyStopping_with_baseline(self):
    with self.cached_session():
      np.random.seed(1337)
      baseline = 0.5
      (data, labels), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=50,
          input_shape=(1,),
          num_classes=NUM_CLASSES)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=1, num_classes=1, input_dim=1)
      model.compile(
          optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])

      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) == 1

      patience = 3
      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              patience=patience,
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) >= patience
예제 #9
0
  def test_sequential_deferred_build_serialization(self):
    num_hidden = 5
    input_dim = 3
    batch_size = 5
    num_classes = 2

    model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        metrics=[keras.metrics.CategoricalAccuracy()],
        run_eagerly=testing_utils.should_run_eagerly())
    self.assertFalse(model.built)

    x = np.random.random((batch_size, input_dim))
    y = np.random.random((batch_size, num_classes))
    model.train_on_batch(x, y)
    self.assertTrue(model.built)

    config = model.get_config()
    self.assertIn('build_input_shape', config)

    new_model = keras.models.Sequential.from_config(config)
    self.assertEqual(len(new_model.layers), 2)
    self.assertEqual(len(new_model.weights), 4)
예제 #10
0
  def test_Tensorboard_eager(self):
    temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

    (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
        train_samples=TRAIN_SAMPLES,
        test_samples=TEST_SAMPLES,
        input_shape=(INPUT_DIM,),
        num_classes=NUM_CLASSES)
    y_test = keras.utils.to_categorical(y_test)
    y_train = keras.utils.to_categorical(y_train)

    model = testing_utils.get_small_sequential_mlp(
        num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
    model.compile(
        loss='binary_crossentropy',
        optimizer=adam.AdamOptimizer(0.01),
        metrics=['accuracy'])

    cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]

    model.fit(
        x_train,
        y_train,
        batch_size=BATCH_SIZE,
        validation_data=(x_test, y_test),
        callbacks=cbks,
        epochs=2,
        verbose=0)

    self.assertTrue(os.path.exists(temp_dir))
예제 #11
0
  def testOptimizerWithCallableVarList(self):
    train_samples = 20
    input_dim = 1
    num_classes = 2
    (x, y), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=10,
        input_shape=(input_dim,),
        num_classes=num_classes)
    y = keras.utils.to_categorical(y)

    num_hidden = 1
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=num_hidden, num_classes=num_classes)
    opt = adam.Adam()

    loss = lambda: losses.mean_squared_error(model(x), y)
    var_list = lambda: model.trainable_weights

    with self.assertRaisesRegexp(
        ValueError, 'Weights for model .* have not yet been created'):
      var_list()
    train_op = opt.minimize(loss, var_list)
    if not context.executing_eagerly():
      self.evaluate(variables.global_variables_initializer())
      self.assertEqual(
          [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
      self.evaluate(train_op)
    self.assertNotEqual(
        [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
    self.assertLen(var_list(), 4)
예제 #12
0
  def test_sequential_pop(self):
    num_hidden = 5
    input_dim = 3
    batch_size = 5
    num_classes = 2

    model = testing_utils.get_small_sequential_mlp(
        num_hidden, num_classes, input_dim)
    model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))
    x = np.random.random((batch_size, input_dim))
    y = np.random.random((batch_size, num_classes))
    model.fit(x, y, epochs=1)
    model.pop()
    self.assertEqual(len(model.layers), 1)
    self.assertEqual(model.output_shape, (None, num_hidden))
    model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))
    y = np.random.random((batch_size, num_hidden))
    model.fit(x, y, epochs=1)

    # Test popping single-layer model
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
    model.pop()
    self.assertEqual(model.layers, [])
    self.assertEqual(model.outputs, None)

    # Invalid use case
    model = keras.models.Sequential()
    with self.assertRaises(TypeError):
      model.pop()
예제 #13
0
  def test_TensorBoard_with_ReduceLROnPlateau(self):
    with self.cached_session():
      temp_dir = self.get_temp_dir()
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])

      cbks = [
          keras.callbacks.ReduceLROnPlateau(
              monitor='val_loss', factor=0.5, patience=4, verbose=1),
          keras.callbacks.TensorBoard(log_dir=temp_dir)
      ]

      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)

      assert os.path.exists(temp_dir)
예제 #14
0
  def test_sequential_deferred_build_with_dataset_iterators(self):
    num_hidden = 5
    input_dim = 3
    num_classes = 2
    num_samples = 50
    steps_per_epoch = 10

    model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        metrics=[keras.metrics.CategoricalAccuracy()],
        run_eagerly=testing_utils.should_run_eagerly())
    self.assertEqual(len(model.layers), 2)
    self.assertEqual(len(model.weights), 0)
    self.assertFalse(model.built)

    x = array_ops.ones((num_samples, input_dim))
    y = array_ops.zeros((num_samples, num_classes))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)

    model.fit(iterator, epochs=1, steps_per_epoch=steps_per_epoch)
    self.assertTrue(model.built)
    self.assertEqual(len(model.weights), 2 * 2)
    self.assertFalse(model._is_graph_network)
  def test_evaluate_generator_method(self, model_type):
    if model_type == 'sequential':
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=3, num_classes=4, input_dim=2)
    else:
      model = testing_utils.get_small_functional_mlp(
          num_hidden=3, num_classes=4, input_dim=2)
    model.compile(
        loss='mse',
        optimizer='sgd',
        metrics=['mae', metrics_module.CategoricalAccuracy()])
    model.summary()

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             verbose=1,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
예제 #16
0
 def make_model():
   np.random.seed(1337)
   model = testing_utils.get_small_sequential_mlp(
       num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
   model.compile(
       loss='categorical_crossentropy',
       optimizer=keras.optimizers.SGD(lr=0.1),
       metrics=['accuracy'])
   return model
예제 #17
0
  def test_build_before_fit(self):
    # Fix for b/112433577
    model = testing_utils.get_small_sequential_mlp(4, 5)
    model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))

    model.build((None, 6))

    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
예제 #18
0
  def testNumericEquivalenceForNesterovMomentum(self):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())
      model_tf = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_tf.set_weights(model_k_v2.get_weights())

      opt_k_v1 = optimizers.SGD(momentum=0.9, nesterov=True)
      opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
      opt_tf = momentum.MomentumOptimizer(
          learning_rate=0.01, momentum=0.9, use_nesterov=True)

      model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[])
      model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[])
      model_tf.compile(opt_tf, loss='categorical_crossentropy', metrics=[])

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
예제 #19
0
    def test_build_before_fit(self):
        # Fix for b/112433577
        model = testing_utils.get_small_sequential_mlp(4, 5)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())

        model.build((None, 6))

        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
예제 #20
0
  def test_specify_input_signature(self):
    model = testing_utils.get_small_sequential_mlp(10, 3, None)
    inputs = array_ops.ones((8, 5))

    with self.assertRaisesRegexp(ValueError, 'input shapes have not been set'):
      saving_utils.trace_model_call(model)

    fn = saving_utils.trace_model_call(
        model, [tensor_spec.TensorSpec(shape=[None, 5], dtype=dtypes.float32)])
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}
    self._assert_all_close(expected_outputs, signature_outputs)
  def test_specify_input_signature(self):
    model = testing_utils.get_small_sequential_mlp(10, 3, None)
    inputs = array_ops.ones((8, 5))

    with self.assertRaisesRegexp(ValueError, 'input shapes have not been set'):
      saving_utils.trace_model_call(model)

    fn = saving_utils.trace_model_call(
        model, [tensor_spec.TensorSpec(shape=[None, 5], dtype=dtypes.float32)])
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}
    self._assert_all_close(expected_outputs, signature_outputs)
예제 #22
0
    def test_save_load_without_compile_sequential(self):
        sequential_model = testing_utils.get_small_sequential_mlp(
            num_hidden=1, num_classes=2, input_dim=3)
        tiledb_uri = os.path.join(self.get_temp_dir(), "model_array")
        tiledb_model_obj = TensorflowTileDB(uri=tiledb_uri)
        tiledb_model_obj.save(model=sequential_model, include_optimizer=False)
        loaded_model = tiledb_model_obj.load(compile_model=False)
        data = np.random.rand(100, 3)

        # Assert model predictions are equal
        np.testing.assert_array_equal(loaded_model.predict(data),
                                      sequential_model.predict(data))
예제 #23
0
 def test_sequential_deferred_manual_build(self):
   model = testing_utils.get_small_sequential_mlp(4, 5)
   self.assertFalse(model.built)
   model(array_ops.zeros([1, 2]))
   self.assertTrue(model.built)
   self.assertEqual(len(model.outputs), 0)
   model.compile('rmsprop',
                 loss='mse',
                 run_eagerly=testing_utils.should_run_eagerly())
   self.assertEqual(len(model.outputs), 0)
   model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
   self.assertEqual(len(model.outputs), 1)
예제 #24
0
    def testOptimizersCompatibility(self, opt_str, test_weights):
        np.random.seed(1331)
        with self.cached_session():
            train_samples = 20
            input_dim = 3
            num_classes = 2
            (x,
             y), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                 test_samples=10,
                                                 input_shape=(input_dim, ),
                                                 num_classes=num_classes)
            y = keras.utils.to_categorical(y)

            num_hidden = 5
            model = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)

            old_mode = os.environ.get('TF2_BEHAVIOR', None)
            # Disable tf2 to create V1 optimizer.
            disable_tf2()
            if opt_str == 'momentum':
                opt_v1 = optimizers.SGD(momentum=0.9)
            else:
                opt_v1 = optimizers.get(opt_str)

            # Test compile and fit with v1 optimizer.
            model.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
            model.fit(x, y, batch_size=5, epochs=1)
            model_dir = tempfile.mkdtemp()
            gfile.MakeDirs(model_dir)
            file_name = os.path.join(model_dir, 'model.h5')
            model.save(file_name)

            enable_tf2()
            # Test load and fit with v2 optimizer.
            model_2 = saving.load_model(file_name)
            opt_v2 = model_2.optimizer
            self.assertIsInstance(opt_v2, optimizer_v2.OptimizerV2)
            # set_weights is called inside load_model but exception is swallowed,
            # this call checks the weights can be set correctly.
            if test_weights:
                opt_v2.set_weights(opt_v1.get_weights())

            hist_1 = model.fit(x, y, batch_size=5, epochs=1, shuffle=False)
            hist_2 = model_2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
            self.assertAllClose(model.get_weights(), model_2.get_weights())
            self.assertAllClose(model.get_weights(), model_2.get_weights())
            self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'])

            if old_mode is not None:
                os.environ['TF2_BEHAVIOR'] = old_mode
예제 #25
0
    def test_sequential_can_use_graph_functions(self):
        model = testing_utils.get_small_sequential_mlp(4, 3)
        self.assertTrue(model._can_use_graph_functions)
        inner_model = testing_utils.get_small_sequential_mlp(4, 5)
        model.add(inner_model)

        self.assertTrue(model._can_use_graph_functions)

        inner_model_two = testing_utils.get_small_sequential_mlp(5, 7)
        self.assertTrue(inner_model_two._can_use_graph_functions)

        layer = keras.layers.Lambda(lambda x: x)
        layer._can_use_graph_functions = False
        inner_model_two.add(layer)
        self.assertFalse(inner_model_two._can_use_graph_functions)

        model.add(inner_model_two)
        self.assertFalse(model._can_use_graph_functions)

        model.pop()
        self.assertTrue(model._can_use_graph_functions)
예제 #26
0
 def test_sequential_deferred_manual_build(self):
     model = testing_utils.get_small_sequential_mlp(4, 5)
     self.assertFalse(model.built)
     model(array_ops.zeros([1, 2]))
     self.assertTrue(model.built)
     self.assertEqual(len(model.outputs), 0)
     model.compile('rmsprop',
                   loss='mse',
                   run_eagerly=testing_utils.should_run_eagerly())
     self.assertEqual(len(model.outputs), 0)
     model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
     self.assertEqual(len(model.outputs), 1)
예제 #27
0
    def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
        np.random.seed(1331)
        with self.cached_session():
            train_samples = 20
            input_dim = 3
            num_classes = 2
            (x,
             y), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                 test_samples=10,
                                                 input_shape=(input_dim, ),
                                                 num_classes=num_classes)
            y = keras.utils.to_categorical(y)

            num_hidden = 5
            model_v1 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_v1.compile(opt_v1,
                             loss='categorical_crossentropy',
                             metrics=[])
            model_v1.fit(x, y, batch_size=5, epochs=1)

            model_v2 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_v2.set_weights(model_v1.get_weights())
            model_v2.compile(opt_v2,
                             loss='categorical_crossentropy',
                             metrics=[])
            model_v2._make_train_function()
            if test_weights:
                opt_v2.set_weights(opt_v1.get_weights())

            hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
            hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
            self.assertAllClose(model_v1.get_weights(), model_v2.get_weights())
            self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'])
예제 #28
0
  def test_build_before_fit(self):
    # Fix for b/112433577
    model = testing_utils.get_small_sequential_mlp(4, 5)
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly())

    model.build((None, 6))

    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
예제 #29
0
  def testOptimizersCompatibility(self, opt_str, test_weights, test_numeric):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)

      old_mode = os.environ.get('TF2_BEHAVIOR', None)
      # Disable tf2 to create V1 optimizer.
      disable_tf2()
      if opt_str == 'momentum':
        opt_v1 = optimizers.SGD(momentum=0.9)
      else:
        opt_v1 = optimizers.get(opt_str)

      # Test compile and fit with v1 optimizer.
      model.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
      model.fit(x, y, batch_size=5, epochs=1)
      model_dir = tempfile.mkdtemp()
      gfile.MakeDirs(model_dir)
      file_name = os.path.join(model_dir, 'model.h5')
      model.save(file_name)

      enable_tf2()
      # Test load and fit with v2 optimizer.
      model_2 = saving.load_model(file_name)
      opt_v2 = model_2.optimizer
      self.assertIsInstance(opt_v2, optimizer_v2.OptimizerV2)
      # set_weights is called inside load_model but exception is swallowed,
      # this call checks the weights can be set correctly.
      if test_weights:
        opt_v2.set_weights(opt_v1.get_weights())
      if test_numeric:
        hist_1 = model.fit(x, y, batch_size=5, epochs=1, shuffle=False)
        hist_2 = model_2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
        self.assertAllClose(model.get_weights(), model_2.get_weights())
        self.assertAllClose(model.get_weights(), model_2.get_weights())
        self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'])

      if old_mode is not None:
        os.environ['TF2_BEHAVIOR'] = old_mode
예제 #30
0
  def test_Tensorboard_histogram_summaries_with_generator(self):
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    def generator():
      x = np.random.randn(10, 100).astype(np.float32)
      y = np.random.randn(10, 10).astype(np.float32)
      while True:
        yield x, y

    with self.cached_session():
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=10, num_classes=10, input_dim=100)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      tsb = keras.callbacks.TensorBoard(
          log_dir=tmpdir,
          histogram_freq=1,
          write_images=True,
          write_grads=True,
          batch_size=5)
      cbks = [tsb]

      # fit with validation generator
      model.fit_generator(
          generator(),
          steps_per_epoch=2,
          epochs=2,
          validation_data=generator(),
          validation_steps=2,
          callbacks=cbks,
          verbose=0)

      with self.assertRaises(ValueError):
        # fit with validation generator but no
        # validation_steps
        model.fit_generator(
            generator(),
            steps_per_epoch=2,
            epochs=2,
            validation_data=generator(),
            callbacks=cbks,
            verbose=0)

      self.assertTrue(os.path.exists(tmpdir))
예제 #31
0
  def test_Tensorboard_histogram_summaries_with_generator(self):
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    def generator():
      x = np.random.randn(10, 100).astype(np.float32)
      y = np.random.randn(10, 10).astype(np.float32)
      while True:
        yield x, y

    with self.cached_session():
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=10, num_classes=10, input_dim=100)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      tsb = keras.callbacks.TensorBoard(
          log_dir=tmpdir,
          histogram_freq=1,
          write_images=True,
          write_grads=True,
          batch_size=5)
      cbks = [tsb]

      # fit with validation generator
      model.fit_generator(
          generator(),
          steps_per_epoch=2,
          epochs=2,
          validation_data=generator(),
          validation_steps=2,
          callbacks=cbks,
          verbose=0)

      with self.assertRaises(ValueError):
        # fit with validation generator but no
        # validation_steps
        model.fit_generator(
            generator(),
            steps_per_epoch=2,
            epochs=2,
            validation_data=generator(),
            callbacks=cbks,
            verbose=0)

      self.assertTrue(os.path.exists(tmpdir))
예제 #32
0
  def test_sequential_model_fails_with_dict_inputs(self):
    num_classes = 5
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=10, num_classes=num_classes)
    model.compile(
        rmsprop.RMSPropOptimizer(learning_rate=0.001),
        metrics=['acc'],
        weighted_metrics=['mae'],
        loss='categorical_crossentropy')

    x = {'dense_input': np.random.random((10, 1))}
    y = np.random.randint(num_classes, size=(10, 1))

    with self.assertRaisesRegexp(
        ValueError, 'Passing a dictionary input to a Sequential Model which '
        'doesn\'t have FeatureLayer as the first layer is an error'):
      model.fit(x, y, batch_size=5, epochs=1)
예제 #33
0
    def test_sequential_model_fails_with_dict_inputs(self):
        num_classes = 5
        model = testing_utils.get_small_sequential_mlp(num_hidden=10,
                                                       num_classes=num_classes)
        model.compile(rmsprop.RMSPropOptimizer(learning_rate=0.001),
                      metrics=['acc'],
                      weighted_metrics=['mae'],
                      loss='categorical_crossentropy')

        x = {'dense_input': np.random.random((10, 1))}
        y = np.random.randint(num_classes, size=(10, 1))

        with self.assertRaisesRegexp(
                ValueError,
                'Passing a dictionary input to a Sequential Model which '
                'doesn\'t have FeatureLayer as the first layer is an error'):
            model.fit(x, y, batch_size=5, epochs=1)
예제 #34
0
  def test_LearningRateScheduler(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])

      cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=5,
          verbose=0)
      assert (
          float(keras.backend.get_value(
              model.optimizer.lr)) - 0.2) < keras.backend.epsilon()

      cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)
      assert (
          float(keras.backend.get_value(
              model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
예제 #35
0
  def test_LearningRateScheduler(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])

      cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=5,
          verbose=0)
      assert (
          float(keras.backend.get_value(
              model.optimizer.lr)) - 0.2) < keras.backend.epsilon()

      cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)
      assert (
          float(keras.backend.get_value(
              model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
예제 #36
0
  def test_dataset_with_sparse_labels(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)

    for loss in ['sparse_categorical_crossentropy',
                 losses_impl.sparse_softmax_cross_entropy]:
      optimizer = RMSPropOptimizer(learning_rate=0.001)
      model.compile(optimizer, loss)

      inputs = np.zeros((10, 3), dtype=np.float32)
      targets = np.random.randint(0, 4, size=10, dtype=np.int32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
  def test_dataset_with_sparse_labels(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)

    for loss in ['sparse_categorical_crossentropy',
                 losses_impl.sparse_softmax_cross_entropy]:
      optimizer = RMSPropOptimizer(learning_rate=0.001)
      model.compile(optimizer, loss)

      inputs = np.zeros((10, 3), dtype=np.float32)
      targets = np.random.randint(0, 4, size=10, dtype=np.int32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
예제 #38
0
  def test_sequential_model_fails_with_dict_inputs(self):
    num_classes = 5
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=10, num_classes=num_classes)
    model.compile(
        'rmsprop',
        metrics=['acc'],
        weighted_metrics=['mae'],
        loss='categorical_crossentropy',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    x = {'dense_input': np.random.random((10, 1))}
    y = np.random.randint(num_classes, size=(10, 1))

    with self.assertRaisesRegexp(
        ValueError, 'Passing a dictionary input to a Sequential Model which '
        'doesn\'t have FeatureLayer as the first layer is an error'):
      model.fit(x, y, batch_size=5, epochs=1)
예제 #39
0
    def test_sequential_deferred_build_with_np_arrays(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSPropOptimizer(1e-3),
                      metrics=[keras.metrics.CategoricalAccuracy()])
        self.assertEqual(len(model.layers), 2)
        self.assertEqual(len(model.weights), 0)
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.fit(x, y, epochs=1)
        self.assertTrue(model.built)
        self.assertFalse(model._is_graph_network)
        self.assertEqual(len(model.weights), 2 * 2)
예제 #40
0
    def test_predict_generator_method(self, model_type):
        if model_type == 'sequential':
            model = testing_utils.get_small_sequential_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        else:
            model = testing_utils.get_small_functional_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
예제 #41
0
  def test_sequential_deferred_build_with_np_arrays(self):
    num_hidden = 5
    input_dim = 3
    batch_size = 5
    num_classes = 2

    model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
    model.compile(
        loss='mse',
        optimizer=rmsprop.RMSPropOptimizer(1e-3),
        metrics=[keras.metrics.CategoricalAccuracy()])
    self.assertEqual(len(model.layers), 2)
    self.assertEqual(len(model.weights), 0)
    self.assertFalse(model.built)

    x = np.random.random((batch_size, input_dim))
    y = np.random.random((batch_size, num_classes))
    model.fit(x, y, epochs=1)
    self.assertTrue(model.built)
    self.assertFalse(model._is_graph_network)
    self.assertEqual(len(model.weights), 2 * 2)
예제 #42
0
 def test_clone_optimizer_in_different_graph(self):
   with ops.Graph().as_default():
     with self.session():
       model = testing_utils.get_small_sequential_mlp(3, 4)
       optimizer = keras.optimizer_v2.adam.Adam()
       model.compile(
           optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
           )
       model.fit(
           x=np.array([[1., 2., 3., 4.]]),
           y=np.array([[1., 1., 1., 1.]]),
           epochs=1)
       optimizer_config = optimizer.get_config()
   with ops.Graph().as_default():
     with self.session():
       with self.assertRaisesRegex(ValueError, 'Cannot use the given session'):
         models.clone_and_build_model(model, compile_clone=True)
       # The optimizer_config object allows the model to be cloned in a
       # different graph.
       models.clone_and_build_model(model, compile_clone=True,
                                    optimizer_config=optimizer_config)
예제 #43
0
    def test_fit_generator_method(self, model_type):
        if model_type == 'sequential':
            model = testing_utils.get_small_sequential_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        else:
            model = testing_utils.get_small_functional_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
예제 #44
0
  def test_fit_generator_with_callback(self):

    class TestCallback(keras.callbacks.Callback):

      def set_model(self, model):
        # Check the model operations for the optimizer operations that
        # the _make_train_function adds under a named scope for the
        # optimizer. This ensurs the full model is populated before the
        # set_model callback is called.
        optimizer_name_scope = 'training/' + model.optimizer.__class__.__name__
        graph_def = ops.get_default_graph().as_graph_def()
        for node in graph_def.node:
          if node.name.startswith(optimizer_name_scope):
            return
        raise RuntimeError('The optimizer operations are not present in the '
                           'model graph when the Callback.set_model function '
                           'is called')
    np.random.seed(1337)

    def generator():
      x = np.random.randn(10, 100).astype(np.float32)
      y = np.random.randn(10, 10).astype(np.float32)
      while True:
        yield x, y

    with self.cached_session():
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=10, num_classes=10, input_dim=100)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      model.fit_generator(
          generator(),
          steps_per_epoch=2,
          epochs=1,
          validation_data=generator(),
          validation_steps=2,
          callbacks=[TestCallback()],
          verbose=0)
예제 #45
0
  def test_fit_generator_with_callback(self):

    class TestCallback(keras.callbacks.Callback):

      def set_model(self, model):
        # Check the model operations for the optimizer operations that
        # the _make_train_function adds under a named scope for the
        # optimizer. This ensurs the full model is populated before the
        # set_model callback is called.
        optimizer_name_scope = 'training/' + model.optimizer.__class__.__name__
        graph_def = ops.get_default_graph().as_graph_def()
        for node in graph_def.node:
          if node.name.startswith(optimizer_name_scope):
            return
        raise RuntimeError('The optimizer operations are not present in the '
                           'model graph when the Callback.set_model function '
                           'is called')
    np.random.seed(1337)

    def generator():
      x = np.random.randn(10, 100).astype(np.float32)
      y = np.random.randn(10, 10).astype(np.float32)
      while True:
        yield x, y

    with self.cached_session():
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=10, num_classes=10, input_dim=100)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      model.fit_generator(
          generator(),
          steps_per_epoch=2,
          epochs=1,
          validation_data=generator(),
          validation_steps=2,
          callbacks=[TestCallback()],
          verbose=0)
예제 #46
0
  def test_load_compiled_metrics(self):
    model = testing_utils.get_small_sequential_mlp(1, 3)

    # Compile with dense categorical accuracy
    model.compile('rmsprop', 'mse', 'acc')
    x = np.random.random((5, 10)).astype(np.float32)
    y_true = np.random.random((5, 3)).astype(np.float32)
    model.train_on_batch(x, y_true)

    model.save(self.path, include_optimizer=True, save_format='tf')
    revived = keras_load.load(self.path, compile=True)
    self.assertAllClose(model.test_on_batch(x, y_true),
                        revived.test_on_batch(x, y_true))

    # Compile with sparse categorical accuracy
    model.compile('rmsprop', 'mse', 'acc')
    y_true = np.random.randint(0, 3, (5, 1)).astype(np.float32)
    model.train_on_batch(x, y_true)
    model.save(self.path, include_optimizer=True, save_format='tf')
    revived = keras_load.load(self.path, compile=True)
    self.assertAllClose(model.test_on_batch(x, y_true),
                        revived.test_on_batch(x, y_true))
예제 #47
0
    def test_sequential_deferred_build_with_np_arrays(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertEqual(len(model.layers), 2)
        with self.assertRaisesRegex(
                ValueError, 'Weights for model .* have not yet been created'):
            len(model.weights)
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.fit(x, y, epochs=1)
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 2 * 2)
예제 #48
0
  def test_EarlyStopping(self):
    with self.cached_session():
      np.random.seed(123)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='rmsprop',
          metrics=['accuracy'])

      cases = [
          ('max', 'val_acc'),
          ('min', 'val_loss'),
          ('auto', 'val_acc'),
          ('auto', 'loss'),
          ('unknown', 'unknown')
      ]
      for mode, monitor in cases:
        patience = 0
        cbks = [
            keras.callbacks.EarlyStopping(
                patience=patience, monitor=monitor, mode=mode)
        ]
        model.fit(
            x_train,
            y_train,
            batch_size=BATCH_SIZE,
            validation_data=(x_test, y_test),
            callbacks=cbks,
            epochs=5,
            verbose=0)
예제 #49
0
  def test_sequential_pop(self):
    num_hidden = 5
    input_dim = 3
    batch_size = 5
    num_classes = 2

    model = testing_utils.get_small_sequential_mlp(
        num_hidden, num_classes, input_dim)
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    x = np.random.random((batch_size, input_dim))
    y = np.random.random((batch_size, num_classes))
    model.fit(x, y, epochs=1)
    model.pop()
    self.assertEqual(len(model.layers), 1)
    self.assertEqual(model.output_shape, (None, num_hidden))
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    y = np.random.random((batch_size, num_hidden))
    model.fit(x, y, epochs=1)

    # Test popping single-layer model
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
    model.pop()
    self.assertEqual(model.layers, [])
    self.assertEqual(model.outputs, None)

    # Invalid use case
    model = keras.models.Sequential()
    with self.assertRaises(TypeError):
      model.pop()
예제 #50
0
  def test_EarlyStopping(self):
    with self.cached_session():
      np.random.seed(123)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='categorical_crossentropy',
          optimizer='rmsprop',
          metrics=['accuracy'])

      cases = [
          ('max', 'val_acc'),
          ('min', 'val_loss'),
          ('auto', 'val_acc'),
          ('auto', 'loss'),
          ('unknown', 'unknown')
      ]
      for mode, monitor in cases:
        patience = 0
        cbks = [
            keras.callbacks.EarlyStopping(
                patience=patience, monitor=monitor, mode=mode)
        ]
        model.fit(
            x_train,
            y_train,
            batch_size=BATCH_SIZE,
            validation_data=(x_test, y_test),
            callbacks=cbks,
            epochs=5,
            verbose=0)
예제 #51
0
    def test_TensorBoard_with_ReduceLROnPlateau(self):
        with self.cached_session():
            temp_dir = self.get_temp_dir()
            self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            model = testing_utils.get_small_sequential_mlp(
                num_hidden=NUM_HIDDEN,
                num_classes=NUM_CLASSES,
                input_dim=INPUT_DIM)
            model.compile(loss='binary_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            cbks = [
                keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.5,
                                                  patience=4,
                                                  verbose=1),
                callbacks_v1.TensorBoard(log_dir=temp_dir)
            ]

            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            assert os.path.exists(temp_dir)
예제 #52
0
    def test_sequential_deferred_build_serialization(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSPropOptimizer(1e-3),
                      metrics=[keras.metrics.CategoricalAccuracy()])
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.train_on_batch(x, y)
        self.assertTrue(model.built)

        config = model.get_config()
        self.assertIn('build_input_shape', config)

        new_model = keras.models.Sequential.from_config(config)
        self.assertEqual(len(new_model.layers), 2)
        self.assertEqual(len(new_model.weights), 4)
예제 #53
0
    def test_save_load_with_compile_sequential(self):
        sequential_model = testing_utils.get_small_sequential_mlp(
            num_hidden=1, num_classes=2, input_dim=3)
        sequential_model = add_optimizer(sequential_model)
        tiledb_uri = os.path.join(self.get_temp_dir(), "model_array")
        tiledb_model_obj = TensorflowTileDB(uri=tiledb_uri)
        tiledb_model_obj.save(model=sequential_model, include_optimizer=True)
        loaded_model = tiledb_model_obj.load(compile_model=True)
        data = np.random.rand(100, 3)

        model_opt_weights = batch_get_value(
            getattr(sequential_model.optimizer, "weights"))
        loaded_opt_weights = batch_get_value(
            getattr(loaded_model.optimizer, "weights"))

        # Assert optimizer weights are equal
        for weight_model, weight_loaded_model in zip(model_opt_weights,
                                                     loaded_opt_weights):
            np.testing.assert_array_equal(weight_model, weight_loaded_model)

        # Assert model predictions are equal
        np.testing.assert_array_equal(loaded_model.predict(data),
                                      sequential_model.predict(data))
예제 #54
0
    def test_training_and_eval_methods_on_iterators_single_io(self, model):
        if model == 'functional':
            model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        elif model == 'subclass':
            model = testing_utils.get_small_sequential_mlp(1, 4)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(iterator, steps=2, verbose=1)
        model.predict(iterator, steps=2)

        # Test with validation data
        model.fit(iterator,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=iterator,
                  validation_steps=2)
        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(iterator,
                      iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError,
                'you should specify the `steps_per_epoch` argument'):
            model.fit(iterator, epochs=1, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.evaluate(iterator, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.predict(iterator, verbose=0)
예제 #55
0
 def test_sequential_shape_inference_deferred(self):
     model = testing_utils.get_small_sequential_mlp(4, 5)
     output_shape = model.compute_output_shape((None, 7))
     self.assertEqual(tuple(output_shape.as_list()), (None, 5))
예제 #56
0
 def setUp(self):
     self.model = testing_utils.get_small_sequential_mlp(1, 2, 3)
     self.subclassed_model = testing_utils.get_small_subclass_mlp(1, 2)
예제 #57
0
 def setUp(self):
   super(TestSaveModel, self).setUp()
   self.model = testing_utils.get_small_sequential_mlp(1, 2, 3)
   self.subclassed_model = testing_utils.get_small_subclass_mlp(1, 2)
예제 #58
0
    def test_LearningRateScheduler(self):
        with self.cached_session():
            np.random.seed(1337)
            batch_size = 5
            num_classes = 2
            input_dim = 3

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=10,
                test_samples=10,
                input_shape=(input_dim, ),
                num_classes=num_classes)
            y_test = tf.keras.utils.to_categorical(y_test)
            y_train = tf.keras.utils.to_categorical(y_train)
            model = testing_utils.get_small_sequential_mlp(
                num_hidden=5, num_classes=num_classes, input_dim=input_dim)

            epochs = [1, 2]
            callback = LearningRateScheduler(1, 1, epochs, num_warmup_steps=1)
            assert callback.slope == 1 - 1e-2 / 3

            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=4,
                      verbose=0)

            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                0.01)

            # Here the epochs scheduling won't apply because the warmup hasn't been done
            num_warmup_steps = 16
            init_lr = 1e-2 / 3
            callback = LearningRateScheduler(1,
                                             16,
                                             epochs,
                                             num_warmup_steps=num_warmup_steps)
            expected_slope = (1 - init_lr * 0.5) / num_warmup_steps
            assert callback.slope == expected_slope

            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=2,
                      verbose=0)

            # There are 2 epochs wich will two a total of 4 steps (train_samples = 10 with batch_size 5)
            expected_lr = init_lr * 0.5 + expected_slope * 3
            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                expected_lr)

            callback = LearningRateScheduler(1,
                                             16,
                                             epochs,
                                             num_warmup_steps=num_warmup_steps,
                                             use_warmup=False)
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=2,
                      verbose=0)
            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                0.01)
예제 #59
0
    def testNumericEquivalenceForNesterovMomentum(self):
        np.random.seed(1331)
        with self.cached_session():
            train_samples = 20
            input_dim = 3
            num_classes = 2
            (x,
             y), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                 test_samples=10,
                                                 input_shape=(input_dim, ),
                                                 num_classes=num_classes)
            y = keras.utils.to_categorical(y)

            num_hidden = 5
            model_k_v1 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_k_v2 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_k_v2.set_weights(model_k_v1.get_weights())
            model_tf = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_tf.set_weights(model_k_v2.get_weights())

            opt_k_v1 = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
            opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
            opt_tf = momentum.MomentumOptimizer(learning_rate=0.001,
                                                momentum=0.9,
                                                use_nesterov=True)

            model_k_v1.compile(opt_k_v1,
                               loss='categorical_crossentropy',
                               metrics=[])
            model_k_v2.compile(opt_k_v2,
                               loss='categorical_crossentropy',
                               metrics=[])
            model_tf.compile(opt_tf,
                             loss='categorical_crossentropy',
                             metrics=[])

            hist_k_v1 = model_k_v1.fit(x,
                                       y,
                                       batch_size=5,
                                       epochs=10,
                                       shuffle=False)
            hist_k_v2 = model_k_v2.fit(x,
                                       y,
                                       batch_size=5,
                                       epochs=10,
                                       shuffle=False)
            hist_tf = model_tf.fit(x,
                                   y,
                                   batch_size=5,
                                   epochs=10,
                                   shuffle=False)

            self.assertAllClose(model_k_v1.get_weights(),
                                model_tf.get_weights())
            self.assertAllClose(model_k_v1.get_weights(),
                                model_k_v2.get_weights())
            self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
            self.assertAllClose(hist_k_v1.history['loss'],
                                hist_tf.history['loss'])
            self.assertAllClose(hist_k_v1.history['loss'],
                                hist_k_v2.history['loss'])
예제 #60
0
 def test_sequential_shape_inference_deferred(self):
   model = testing_utils.get_small_sequential_mlp(4, 5)
   output_shape = model.compute_output_shape((None, 7))
   self.assertEqual(tuple(output_shape.as_list()), (None, 5))