예제 #1
0
  def testNumericEquivalenceForNesterovMomentum(self):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with testing_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())
      model_tf = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_tf.set_weights(model_k_v2.get_weights())

      opt_k_v1 = optimizer_v1.SGD(momentum=0.9, nesterov=True)
      opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
      opt_tf = tf.compat.v1.train.MomentumOptimizer(
          learning_rate=0.01, momentum=0.9, use_nesterov=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_tf.compile(
          opt_tf,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
예제 #2
0
 def get_model():
     if deferred:
         model = testing_utils.get_small_sequential_mlp(10, 4)
     else:
         model = testing_utils.get_small_sequential_mlp(10,
                                                        4,
                                                        input_dim=3)
     model.compile(optimizer='rmsprop',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
     return model
예제 #3
0
    def test_sequential_nesting(self):
        model = testing_utils.get_small_sequential_mlp(4, 3)
        inner_model = testing_utils.get_small_sequential_mlp(4, 5)
        model.add(inner_model)

        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())
        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
예제 #4
0
    def test_sequential_build_deferred(self):
        model = testing_utils.get_small_sequential_mlp(4, 5)

        model.build((None, 10))
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 4)

        # Test with nested model
        model = testing_utils.get_small_sequential_mlp(4, 3)
        inner_model = testing_utils.get_small_sequential_mlp(4, 5)
        model.add(inner_model)

        model.build((None, 10))
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 8)
예제 #5
0
    def test_Tensorboard_eager(self):
        temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES)
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
                                                       num_classes=NUM_CLASSES,
                                                       input_dim=INPUT_DIM)
        model.compile(loss='binary_crossentropy',
                      optimizer=tf.compat.v1.train.AdamOptimizer(0.01),
                      metrics=['accuracy'])

        cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]

        model.fit(x_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  validation_data=(x_test, y_test),
                  callbacks=cbks,
                  epochs=2,
                  verbose=0)

        self.assertTrue(os.path.exists(temp_dir))
예제 #6
0
    def test_sequential_pop(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes,
                                                       input_dim)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())
        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.fit(x, y, epochs=1)
        model.pop()
        self.assertEqual(len(model.layers), 1)
        self.assertEqual(model.output_shape, (None, num_hidden))
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())
        y = np.random.random((batch_size, num_hidden))
        model.fit(x, y, epochs=1)

        # Test popping single-layer model
        model = keras.models.Sequential()
        model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
        model.pop()
        self.assertEqual(model.layers, [])
        self.assertEqual(model.outputs, None)

        # Invalid use case
        model = keras.models.Sequential()
        with self.assertRaises(TypeError):
            model.pop()
예제 #7
0
  def testOptimizerWithCallableVarList(self):
    train_samples = 20
    input_dim = 1
    num_classes = 2
    (x, y), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=10,
        input_shape=(input_dim,),
        num_classes=num_classes)
    y = np_utils.to_categorical(y)

    num_hidden = 1
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=num_hidden, num_classes=num_classes)
    opt = adam.Adam()

    loss = lambda: losses.mean_squared_error(model(x), y)
    var_list = lambda: model.trainable_weights

    with self.assertRaisesRegex(
        ValueError, 'Weights for model .* have not yet been created'):
      var_list()
    train_op = opt.minimize(loss, var_list)
    if not tf.executing_eagerly():
      self.evaluate(tf.compat.v1.global_variables_initializer())
      self.assertEqual(
          [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
      self.evaluate(train_op)
    self.assertNotEqual(
        [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
    self.assertLen(var_list(), 4)
예제 #8
0
    def test_sequential_deferred_build_serialization(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.train_on_batch(x, y)
        self.assertTrue(model.built)

        config = model.get_config()
        new_model = keras.models.Sequential.from_config(config)
        new_model.compile(loss='mse',
                          optimizer='rmsprop',
                          metrics=[keras.metrics.CategoricalAccuracy()],
                          run_eagerly=testing_utils.should_run_eagerly())
        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        new_model.train_on_batch(x, y)
        self.assertEqual(len(new_model.layers), 2)
        self.assertEqual(len(new_model.weights), 4)
예제 #9
0
    def test_sequential_deferred_build_with_dataset_iterators(self):
        num_hidden = 5
        input_dim = 3
        num_classes = 2
        num_samples = 50
        steps_per_epoch = 10

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertEqual(len(model.layers), 2)
        with self.assertRaisesRegex(
                ValueError, 'Weights for model .* have not yet been created'):
            len(model.weights)
        self.assertFalse(model.built)

        x = tf.ones((num_samples, input_dim))
        y = tf.zeros((num_samples, num_classes))
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=steps_per_epoch)
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 2 * 2)
예제 #10
0
  def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with testing_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v1.compile(
          opt_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_v1.fit(x, y, batch_size=5, epochs=1)

      model_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v2.set_weights(model_v1.get_weights())
      model_v2.compile(
          opt_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      if not tf.compat.v1.executing_eagerly_outside_functions():
        model_v2._make_train_function()
      if test_weights:
        opt_v2.set_weights(opt_v1.get_weights())

      hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
                          rtol=1e-5, atol=1e-5)
      self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
                          rtol=1e-5, atol=1e-5)
예제 #11
0
 def test_sequential_deferred_manual_build(self):
     model = testing_utils.get_small_sequential_mlp(4, 5)
     self.assertFalse(model.built)
     model(tf.zeros([1, 2]))
     self.assertTrue(model.built)
     model.compile('rmsprop',
                   loss='mse',
                   run_eagerly=testing_utils.should_run_eagerly())
     model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
예제 #12
0
    def test_build_before_fit(self):
        # Fix for b/112433577
        model = testing_utils.get_small_sequential_mlp(4, 5)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())

        model.build((None, 6))

        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
예제 #13
0
  def test_specify_input_signature(self):
    model = testing_utils.get_small_sequential_mlp(10, 3, None)
    inputs = tf.ones((8, 5))

    with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
      saving_utils.trace_model_call(model)

    fn = saving_utils.trace_model_call(
        model, [tf.TensorSpec(shape=[None, 5], dtype=tf.float32)])
    signature_outputs = fn(inputs)
    if model.output_names:
      expected_outputs = {model.output_names[0]: model(inputs)}
    else:
      expected_outputs = {'output_1': model(inputs)}
    self._assert_all_close(expected_outputs, signature_outputs)
예제 #14
0
    def test_Tensorboard_histogram_summaries_with_generator(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        def generator():
            x = np.random.randn(10, 100).astype(np.float32)
            y = np.random.randn(10, 10).astype(np.float32)
            while True:
                yield x, y

        with tf.Graph().as_default(), self.cached_session():
            model = testing_utils.get_small_sequential_mlp(num_hidden=10,
                                                           num_classes=10,
                                                           input_dim=100)
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            tsb = callbacks_v1.TensorBoard(log_dir=tmpdir,
                                           histogram_freq=1,
                                           write_images=True,
                                           write_grads=True,
                                           batch_size=5)
            cbks = [tsb]

            # fit with validation generator
            model.fit_generator(generator(),
                                steps_per_epoch=2,
                                epochs=2,
                                validation_data=generator(),
                                validation_steps=2,
                                callbacks=cbks,
                                verbose=0)

            with self.assertRaises(ValueError):
                # fit with validation generator but no
                # validation_steps
                model.fit_generator(generator(),
                                    steps_per_epoch=2,
                                    epochs=2,
                                    validation_data=generator(),
                                    callbacks=cbks,
                                    verbose=0)

            self.assertTrue(os.path.exists(tmpdir))
예제 #15
0
 def test_clone_optimizer_in_different_graph(self):
   with tf.Graph().as_default():
     with self.session():
       model = testing_utils.get_small_sequential_mlp(3, 4)
       optimizer = keras.optimizer_v2.adam.Adam()
       model.compile(
           optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
           )
       model.fit(
           x=np.array([[1., 2., 3., 4.]]),
           y=np.array([[1., 1., 1., 1.]]),
           epochs=1)
       optimizer_config = optimizer.get_config()
   with tf.Graph().as_default():
     with self.session():
       with self.assertRaisesRegex(ValueError, 'Cannot use the given session'):
         models.clone_and_build_model(model, compile_clone=True)
       # The optimizer_config object allows the model to be cloned in a
       # different graph.
       models.clone_and_build_model(model, compile_clone=True,
                                    optimizer_config=optimizer_config)
예제 #16
0
    def test_load_compiled_metrics(self):
        model = testing_utils.get_small_sequential_mlp(1, 3)

        # Compile with dense categorical accuracy
        model.compile('rmsprop', 'mse', 'acc')
        x = np.random.random((5, 10)).astype(np.float32)
        y_true = np.random.random((5, 3)).astype(np.float32)
        model.train_on_batch(x, y_true)

        model.save(self.path, include_optimizer=True, save_format='tf')
        revived = keras_load.load(self.path, compile=True)
        self.assertAllClose(model.test_on_batch(x, y_true),
                            revived.test_on_batch(x, y_true))

        # Compile with sparse categorical accuracy
        model.compile('rmsprop', 'mse', 'acc')
        y_true = np.random.randint(0, 3, (5, 1)).astype(np.float32)
        model.train_on_batch(x, y_true)
        model.save(self.path, include_optimizer=True, save_format='tf')
        revived = keras_load.load(self.path, compile=True)
        self.assertAllClose(model.test_on_batch(x, y_true),
                            revived.test_on_batch(x, y_true))
예제 #17
0
    def test_sequential_deferred_build_with_np_arrays(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertEqual(len(model.layers), 2)
        with self.assertRaisesRegex(
                ValueError, 'Weights for model .* have not yet been created'):
            len(model.weights)
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.fit(x, y, epochs=1)
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 2 * 2)
예제 #18
0
    def test_TensorBoard_with_ReduceLROnPlateau(self):
        with self.cached_session():
            temp_dir = self.get_temp_dir()
            self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            model = testing_utils.get_small_sequential_mlp(
                num_hidden=NUM_HIDDEN,
                num_classes=NUM_CLASSES,
                input_dim=INPUT_DIM)
            model.compile(loss='binary_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            cbks = [
                callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.5,
                                            patience=4,
                                            verbose=1),
                callbacks_v1.TensorBoard(log_dir=temp_dir)
            ]

            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            assert os.path.exists(temp_dir)
예제 #19
0
 def test_sequential_shape_inference_deferred(self):
     model = testing_utils.get_small_sequential_mlp(4, 5)
     output_shape = model.compute_output_shape((None, 7))
     self.assertEqual(tuple(output_shape.as_list()), (None, 5))
예제 #20
0
파일: save_test.py 프로젝트: ohsdba/keras
 def setUp(self):
   super(TestSaveModel, self).setUp()
   self.model = testing_utils.get_small_sequential_mlp(1, 2, 3)
   self.subclassed_model = testing_utils.get_small_subclass_mlp(1, 2)