def test_EarlyStopping_with_baseline(self):
    with self.cached_session():
      np.random.seed(1337)
      baseline = 0.5
      (data, labels), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=50,
          input_shape=(1,),
          num_classes=NUM_CLASSES)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=1, num_classes=1, input_dim=1)
      model.compile(
          optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])

      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) == 1

      patience = 3
      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              patience=patience,
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) >= patience
  def test_TensorBoard_with_ReduceLROnPlateau(self):
    with self.cached_session():
      temp_dir = self.get_temp_dir()
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])

      cbks = [
          keras.callbacks.ReduceLROnPlateau(
              monitor='val_loss', factor=0.5, patience=4, verbose=1),
          keras.callbacks.TensorBoard(log_dir=temp_dir)
      ]

      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)

      assert os.path.exists(temp_dir)
  def test_vector_classification_shared_model(self):
    # Test that functional models that feature internal updates
    # and internal losses can be shared.
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(10,),
          num_classes=2)
      y_train = keras.utils.to_categorical(y_train)

      inputs = keras.layers.Input(x_train.shape[1:])
      x = keras.layers.Dense(16,
                             activation='relu',
                             kernel_regularizer=keras.regularizers.l2(1e-5),
                             bias_regularizer=keras.regularizers.l2(1e-5),
                             input_shape=x_train.shape[1:])(inputs)
      x = keras.layers.BatchNormalization()(x)
      base_model = keras.models.Model(inputs, x)

      x = keras.layers.Input(x_train.shape[1:])
      y = base_model(x)
      y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
      model = keras.models.Model(x, y)
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.Adam(lr=0.1),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
  def test_temporal_sample_weights(self):
    num_classes = 5
    weighted_class = 3
    train_samples = 1000
    test_samples = 1000
    input_dim = 5
    timesteps = 3

    model = keras.models.Sequential()
    model.add(
        keras.layers.TimeDistributed(
            keras.layers.Dense(num_classes),
            input_shape=(timesteps, input_dim)))
    model.add(keras.layers.Activation('softmax'))

    np.random.seed(1337)
    (_, y_train), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=test_samples,
        input_shape=(input_dim,),
        num_classes=num_classes)
    int_y_train = y_train.copy()
    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)

    class_weight = dict([(i, 1.) for i in range(num_classes)])
    class_weight[weighted_class] = 2.

    sample_weight = np.ones((y_train.shape[0]))
    sample_weight[int_y_train == weighted_class] = 2.
    with self.assertRaises(ValueError):
      model.compile(
          loss='binary_crossentropy',
          optimizer=RMSPropOptimizer(learning_rate=0.001),
          sample_weight_mode='temporal')
  def test_invalid_loss_or_metrics(self):
    num_classes = 5
    train_samples = 1000
    test_samples = 1000
    input_dim = 5

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
    model.add(keras.layers.Activation('relu'))
    model.add(keras.layers.Dense(num_classes))
    model.add(keras.layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSPropOptimizer(learning_rate=0.001))
    np.random.seed(1337)

    (x_train, y_train), (_, _) = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=test_samples,
        input_shape=(input_dim,),
        num_classes=num_classes)

    with self.assertRaises(ValueError):
      model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))

    with self.assertRaises(TypeError):
      model.compile(loss='categorical_crossentropy',
                    optimizer=RMSPropOptimizer(learning_rate=0.001),
                    metrics=set(0))

    with self.assertRaises(ValueError):
      model.compile(loss=None,
                    optimizer='rms')
  def test_RemoteMonitorWithJsonPayload(self):
    if requests is None:
      self.skipTest('`requests` required to run this test')
    with self.test_session():
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.np_utils.to_categorical(y_test)
      y_train = keras.utils.np_utils.to_categorical(y_train)
      model = keras.models.Sequential()
      model.add(
          keras.layers.Dense(
              NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
      model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
      model.compile(
          loss='categorical_crossentropy',
          optimizer='rmsprop',
          metrics=['accuracy'])
      cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]

      with test.mock.patch.object(requests, 'post'):
        model.fit(
            x_train,
            y_train,
            batch_size=BATCH_SIZE,
            validation_data=(x_test, y_test),
            callbacks=cbks,
            epochs=1)
  def test_Tensorboard_eager(self):
    with self.test_session():
      temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      model = keras.models.Sequential()
      model.add(
          keras.layers.Dense(
              NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
      model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
      model.compile(
          loss='binary_crossentropy',
          optimizer=adam.AdamOptimizer(0.01),
          metrics=['accuracy'])

      cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]

      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)

      self.assertTrue(os.path.exists(temp_dir))
 def test_TF_LearningRateScheduler_GradientDescent(self):
   with self.test_session():
     with context.eager_mode():
       np.random.seed(1337)
       (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
           train_samples=TRAIN_SAMPLES,
           test_samples=TEST_SAMPLES,
           input_shape=(INPUT_DIM,),
           num_classes=NUM_CLASSES)
       y_test = keras.utils.to_categorical(y_test)
       y_train = keras.utils.to_categorical(y_train)
       model = keras.models.Sequential()
       model.add(
           keras.layers.Dense(
               NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
       model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
       model.compile(
           loss='categorical_crossentropy',
           optimizer=GradientDescentOptimizer(1e-3),
           metrics=['accuracy'])
       cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
       model.fit(
           x_train,
           y_train,
           batch_size=BATCH_SIZE,
           validation_data=(x_test, y_test),
           callbacks=cbks,
           epochs=5,
           verbose=0)
       opt_lr = model.optimizer.optimizer._learning_rate
       self.assertLess(
           float(keras.backend.get_value(
               Variable(opt_lr))) - 0.2, keras.backend.epsilon())
  def test_EarlyStopping_with_baseline(self):
    with self.test_session():
      np.random.seed(1337)
      baseline = 0.5
      (data, labels), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=50,
          input_shape=(1,),
          num_classes=NUM_CLASSES)
      model = keras.models.Sequential((keras.layers.Dense(
          1, input_dim=1, activation='relu'), keras.layers.Dense(
              1, activation='sigmoid'),))
      model.compile(
          optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])

      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) == 1

      patience = 3
      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              patience=patience,
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) >= patience
  def test_TerminateOnNaN(self):
    with self.test_session():
      np.random.seed(1337)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)

      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      cbks = [keras.callbacks.TerminateOnNaN()]
      model = keras.models.Sequential()
      initializer = keras.initializers.Constant(value=1e5)
      for _ in range(5):
        model.add(
            keras.layers.Dense(
                2,
                input_dim=INPUT_DIM,
                activation='relu',
                kernel_initializer=initializer))
      model.add(keras.layers.Dense(NUM_CLASSES))
      model.compile(loss='mean_squared_error', optimizer='rmsprop')

      history = model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=20)
      loss = history.history['loss']
      assert len(loss) == 1
      assert loss[0] == np.inf
Exemple #11
0
  def test_invalid_ionames_error(self):
    (x_train, y_train), (_, _) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=100,
        input_shape=(10,),
        num_classes=2)
    y_train = keras.utils.to_categorical(y_train)

    def invald_input_name_input_fn():
      input_dict = {'invalid_input_name': x_train}
      return input_dict, y_train

    def invald_output_name_input_fn():
      input_dict = {'input_1': x_train}
      output_dict = {'invalid_output_name': y_train}
      return input_dict, output_dict

    model = simple_functional_model()
    model.compile(
        loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
    with self.test_session():
      est_keras = keras_lib.model_to_estimator(
          keras_model=model, config=self._config)

    with self.test_session():
      with self.assertRaises(ValueError):
        est_keras.train(input_fn=invald_input_name_input_fn, steps=100)

      with self.assertRaises(ValueError):
        est_keras.train(input_fn=invald_output_name_input_fn, steps=100)
Exemple #12
0
  def testKerasAndTFRNNLayerOutputComparison(self):
    input_shape = 10
    output_shape = 5
    timestep = 4
    batch = 20
    (x_train, _), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
    fix_weights_generator.build((None, input_shape))
    weights = fix_weights_generator.get_weights()

    with self.session(graph=ops_lib.Graph()) as sess:
      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      cell = keras.layers.SimpleRNNCell(output_shape)
      tf_out, tf_state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      cell.set_weights(weights)
      [tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
    with self.session(graph=ops_lib.Graph()) as sess:
      k_input = keras.Input(shape=(timestep, input_shape),
                            dtype=dtypes.float32)
      cell = keras.layers.SimpleRNNCell(output_shape)
      layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
      keras_out = layer(k_input)
      cell.set_weights(weights)
      k_out, k_state = sess.run(keras_out, {k_input: x_train})
    self.assertAllClose(tf_out, k_out)
    self.assertAllClose(tf_state, k_state)
  def test_keras_model_with_lstm(self):
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 10

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    y_train = keras.utils.to_categorical(y_train, output_shape)

    K.set_session(session.Session(config=self.config))
    layer = UnifiedLSTM(rnn_state_size)

    inputs = keras.layers.Input(
        shape=[timestep, input_shape], dtype=dtypes.float32)

    outputs, unused_runtime = layer(inputs)
    model = keras.models.Model(inputs, outputs)
    model.compile('rmsprop', loss='mse')
    model.fit(x_train, y_train, epochs=epoch)
  def test_image_classification_sequential(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(12, 12, 3),
          num_classes=2)
      y_train = keras.utils.to_categorical(y_train)

      model = keras.models.Sequential()
      model.add(keras.layers.Conv2D(
          4, 3,
          padding='same',
          activation='relu',
          input_shape=x_train.shape[1:]))
      model.add(keras.layers.Conv2D(
          8, 3,
          padding='same',
          activation='relu'))
      model.add(keras.layers.Conv2D(
          16, 3,
          padding='same',
          activation='relu'))
      model.add(keras.layers.Flatten())
      model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
  def test_keras_model_with_gru(self):
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 10

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    y_train = keras.utils.to_categorical(y_train, output_shape)

    layer = keras.layers.UnifiedGRU(rnn_state_size)

    inputs = keras.layers.Input(
        shape=[timestep, input_shape], dtype=dtypes.float32)

    outputs = layer(inputs)
    model = keras.models.Model(inputs, outputs)
    model.compile('rmsprop', loss='mse')
    model.fit(x_train, y_train, epochs=epoch)
    model.evaluate(x_train, y_train)
    model.predict(x_train)
def _test_optimizer(optimizer, target=0.75):
  np.random.seed(1337)
  (x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000,
                                                      test_samples=200,
                                                      input_shape=(10,),
                                                      num_classes=2)
  y_train = keras.utils.to_categorical(y_train)
  model = _get_model(x_train.shape[1], 20, y_train.shape[1])
  model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])
  history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
  assert history.history['acc'][-1] >= target
  config = keras.optimizers.serialize(optimizer)
  optim = keras.optimizers.deserialize(config)
  new_config = keras.optimizers.serialize(optim)
  new_config['class_name'] = new_config['class_name'].lower()
  assert config == new_config

  # Test constraints.
  model = keras.models.Sequential()
  dense = keras.layers.Dense(10,
                             input_shape=(x_train.shape[1],),
                             kernel_constraint=lambda x: 0. * x + 1.,
                             bias_constraint=lambda x: 0. * x + 2.,
                             activation='relu')
  model.add(dense)
  model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
  model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])
  model.train_on_batch(x_train[:10], y_train[:10])
  kernel, bias = dense.get_weights()
  np.testing.assert_allclose(kernel, 1., atol=1e-3)
  np.testing.assert_allclose(bias, 2., atol=1e-3)
  def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v1.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
      model_v1.fit(x, y, batch_size=5, epochs=1)

      model_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v2.set_weights(model_v1.get_weights())
      model_v2.compile(opt_v2, loss='categorical_crossentropy', metrics=[])
      model_v2._make_train_function()
      if test_weights:
        opt_v2.set_weights(opt_v1.get_weights())

      hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
                          rtol=1e-5, atol=1e-5)
      self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
                          rtol=1e-5, atol=1e-5)
  def testOptimizerWithCallableVarList(self):
    train_samples = 20
    input_dim = 1
    num_classes = 2
    (x, y), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=10,
        input_shape=(input_dim,),
        num_classes=num_classes)
    y = keras.utils.to_categorical(y)

    num_hidden = 1
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=num_hidden, num_classes=num_classes)
    opt = adam.Adam()

    loss = lambda: losses.mean_squared_error(model(x), y)
    var_list = lambda: model.trainable_weights

    with self.assertRaisesRegexp(
        ValueError, 'Weights for model .* have not yet been created'):
      var_list()
    train_op = opt.minimize(loss, var_list)
    if not context.executing_eagerly():
      self.evaluate(variables.global_variables_initializer())
      self.assertEqual(
          [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
      self.evaluate(train_op)
    self.assertNotEqual(
        [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
    self.assertLen(var_list(), 4)
  def test_timeseries_classification_sequential_tf_rnn(self):
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=100,
        test_samples=0,
        input_shape=(4, 10),
        num_classes=2)
    y_train = keras.utils.to_categorical(y_train)

    model = keras.models.Sequential()
    model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
                               input_shape=x_train.shape[1:]))
    model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
                                                activation='softmax',
                                                dtype=dtypes.float32)))
    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizer_v2.adam.Adam(0.005),
        metrics=['acc'],
        run_eagerly=testing_utils.should_run_eagerly())
    history = model.fit(x_train, y_train, epochs=15, batch_size=10,
                        validation_data=(x_train, y_train),
                        verbose=2)
    self.assertGreater(history.history['val_acc'][-1], 0.7)
    _, val_acc = model.evaluate(x_train, y_train)
    self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
    predictions = model.predict(x_train)
    self.assertEqual(predictions.shape, (x_train.shape[0], 2))
  def testNumericEquivalenceForAmsgrad(self):
    np.random.seed(1331)
    with self.cached_session():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = keras.utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())

      opt_k_v1 = optimizers.Adam(amsgrad=True)
      opt_k_v2 = adam.Adam(amsgrad=True)

      model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[])
      model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[])

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
  def test_serialization_v2_model(self):
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=100,
        test_samples=0,
        input_shape=(10,),
        num_classes=2)
    y_train = keras.utils.to_categorical(y_train)

    model = keras.Sequential([
        keras.layers.Flatten(input_shape=x_train.shape[1:]),
        keras.layers.Dense(10, activation=nn.relu),
        # To mimic 'tf.nn.softmax' used in TF 2.x.
        keras.layers.Dense(y_train.shape[-1], activation=nn.softmax_v2),
    ])

    # Check if 'softmax' is in model.get_config().
    last_layer_activation = model.get_layer(index=2).get_config()['activation']
    self.assertEqual(last_layer_activation, 'softmax')

    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizer_v2.adam.Adam(0.005),
                  metrics=['accuracy'],
                  run_eagerly=testing_utils.should_run_eagerly())
    model.fit(x_train, y_train, epochs=2, batch_size=10,
              validation_data=(x_train, y_train),
              verbose=2)

    output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
    keras.saving.saved_model.export_saved_model(model, output_path)
    loaded_model = keras.saving.saved_model.load_from_saved_model(output_path)
    self.assertEqual(model.summary(), loaded_model.summary())
  def test_vector_classification(self):
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=100,
        test_samples=0,
        input_shape=(10,),
        num_classes=2)
    y_train = keras.utils.to_categorical(y_train)

    model = testing_utils.get_model_from_layers(
        [keras.layers.Dense(16, activation='relu'),
         keras.layers.Dropout(0.1),
         keras.layers.Dense(y_train.shape[-1], activation='softmax')],
        input_shape=x_train.shape[1:])
    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizer_v2.adam.Adam(0.005),
        metrics=['acc'],
        run_eagerly=testing_utils.should_run_eagerly())
    history = model.fit(x_train, y_train, epochs=10, batch_size=10,
                        validation_data=(x_train, y_train),
                        verbose=2)
    self.assertGreater(history.history['val_acc'][-1], 0.7)
    _, val_acc = model.evaluate(x_train, y_train)
    self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
    predictions = model.predict(x_train)
    self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Exemple #23
0
  def testRNNWithKerasGRUCell(self):
    with self.cached_session() as sess:
      input_shape = 10
      output_shape = 5
      timestep = 4
      batch = 100
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train)
      cell = keras.layers.GRUCell(output_shape)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape))

      outputs, state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
      self.assertEqual(state.shape.as_list(), [None, output_shape])
      loss = losses.softmax_cross_entropy(predict, state)
      train_op = training.GradientDescentOptimizer(0.001).minimize(loss)

      sess.run([variables_lib.global_variables_initializer()])
      _, outputs, state = sess.run(
          [train_op, outputs, state], {inputs: x_train, predict: y_train})

      self.assertEqual(len(outputs), batch)
      self.assertEqual(len(state), batch)
  def test_video_classification_functional(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(4, 8, 8, 3),
          num_classes=3)
      y_train = keras.utils.to_categorical(y_train)

      inputs = keras.layers.Input(shape=x_train.shape[1:])
      x = keras.layers.TimeDistributed(
          keras.layers.Conv2D(4, 3, activation='relu'))(inputs)
      x = keras.layers.BatchNormalization()(x)
      x = keras.layers.TimeDistributed(keras.layers.GlobalMaxPooling2D())(x)
      x = keras.layers.Conv1D(8, 3, activation='relu')(x)
      x = keras.layers.Flatten()(x)
      outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)

      model = keras.models.Model(inputs, outputs)
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
Exemple #25
0
  def test_multi_inputs_multi_outputs(self):
    np.random.seed(_RANDOM_SEED)
    (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=50,
        input_shape=(16,),
        num_classes=3)
    np.random.seed(_RANDOM_SEED)
    (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=50,
        input_shape=(16,),
        num_classes=2)
    np.random.seed(_RANDOM_SEED)
    (input_m_train, _), (input_m_test, _) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=50,
        input_shape=(8,),
        num_classes=2)

    c_train = keras.utils.to_categorical(c_train)
    c_test = keras.utils.to_categorical(c_test)
    d_train = keras.utils.to_categorical(d_train)
    d_test = keras.utils.to_categorical(d_test)

    def train_input_fn():
      input_dict = {'input_a': a_train, 'input_b': b_train,
                    'input_m': input_m_train > 0}
      output_dict = {'dense_2': c_train, 'dense_3': d_train}
      return input_dict, output_dict

    def eval_input_fn():
      input_dict = {'input_a': a_test, 'input_b': b_test,
                    'input_m': input_m_test > 0}
      output_dict = {'dense_2': c_test, 'dense_3': d_test}
      return input_dict, output_dict

    with self.test_session():
      model = multi_inputs_multi_outputs_model()
      est_keras = keras_lib.model_to_estimator(
          keras_model=model, config=self._config)
      before_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
      est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
      after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
      self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
 def get_data(self):
   (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
       train_samples=10,
       test_samples=10,
       input_shape=(DATA_DIM,),
       num_classes=NUM_CLASSES)
   y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
   y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
   return (x_train, y_train), (x_test, y_test)
  def test_stop_training_csv(self):
    # Test that using the CSVLogger callback with the TerminateOnNaN callback
    # does not result in invalid CSVs.
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    with self.test_session():
      fp = os.path.join(tmpdir, 'test.csv')
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)

      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
      model = keras.models.Sequential()
      for _ in range(5):
        model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
      model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
      model.compile(loss='mean_squared_error',
                    optimizer='rmsprop')

      def data_generator():
        i = 0
        max_batch_index = len(x_train) // BATCH_SIZE
        tot = 0
        while 1:
          if tot > 3 * len(x_train):
            yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
                   np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
          else:
            yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
                   y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
          i += 1
          tot += 1
          i %= max_batch_index

      history = model.fit_generator(data_generator(),
                                    len(x_train) // BATCH_SIZE,
                                    validation_data=(x_test, y_test),
                                    callbacks=cbks,
                                    epochs=20)
      loss = history.history['loss']
      assert len(loss) > 1
      assert loss[-1] == np.inf or np.isnan(loss[-1])

      values = []
      with open(fp) as f:
        for x in csv.reader(f):
          # In windows, due to \r\n line ends we may end up reading empty lines
          # after each line. Skip empty lines.
          if x:
            values.append(x)
      assert 'nan' in values[-1], 'The last epoch was not logged.'
  def test_unifiedRNN_with_cond(self):
    # This test is to demonstrate the graph rewrite of grappler plugin under
    # the condition that the function returns different number of internal
    # states.
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 1

    with self.cached_session(config=self.config, use_gpu=True) as sess:
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train, output_shape)

      layer = UnifiedLSTM(rnn_state_size)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape), name='predict')

      zeros = array_ops.zeros([batch, output_shape])
      dummy_runtime = constant_op.constant(
          'unknown', dtype=dtypes.string, name='runtime')
      a = constant_op.constant(0)
      b = constant_op.constant(1)
      # Will always run the lstm layer.
      outputs, runtime = control_flow_ops.cond(
          gen_math_ops.less(a, b),
          lambda: layer(inputs),
          lambda: (zeros, dummy_runtime))
      loss = losses.softmax_cross_entropy(predict, outputs)
      optimizer = gradient_descent.GradientDescentOptimizer(0.001)
      train_op = optimizer.minimize(loss)

      sess.run([variables.global_variables_initializer()])
      existing_loss = 0

      for _ in range(epoch):
        loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
            inputs: x_train,
            predict: y_train
        })
        if test.is_gpu_available():
          self.assertEquals(runtime_value, b'cudnn')
        else:
          self.assertEquals(runtime_value, b'cpu')
        # Make sure the loss is updated for every epoch
        # (layer weights properly updated).
        self.assertNotEqual(existing_loss, loss_value)
        existing_loss = loss_value
Exemple #29
0
  def _test_optimizer(self, optimizer, target=0.75):
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2)
    y_train = keras.utils.to_categorical(y_train)
    model = _get_model(x_train.shape[1], 20, y_train.shape[1])
    model.compile(
        loss='categorical_crossentropy',
        optimizer=optimizer,
        metrics=['accuracy'])
    np.testing.assert_equal(
        keras.backend.get_value(model.optimizer.iterations), 0)
    history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
    np.testing.assert_equal(
        keras.backend.get_value(model.optimizer.iterations),
        126)  # 63 steps per epoch
    self.assertGreaterEqual(history.history['acc'][-1], target)
    config = keras.optimizers.serialize(optimizer)
    optim = keras.optimizers.deserialize(config)
    new_config = keras.optimizers.serialize(optim)
    new_config['class_name'] = new_config['class_name'].lower()
    new_config['config'].pop('name', None)
    if 'amsgrad' not in config['config']:
      new_config['config'].pop('amsgrad', None)
    if 'decay' in new_config['config'] and 'schedule_decay' in config['config']:
      new_config['config']['schedule_decay'] = new_config['config'].pop('decay')
    if 'momentum' not in config['config']:
      new_config['config'].pop('momentum', None)
    if 'centered' not in config['config']:
      new_config['config'].pop('centered', None)
    self.assertDictEqual(config, new_config)

    # Test constraints.
    model = keras.models.Sequential()
    dense = keras.layers.Dense(
        10,
        input_shape=(x_train.shape[1],),
        kernel_constraint=lambda x: 0. * x + 1.,
        bias_constraint=lambda x: 0. * x + 2.,
        activation='relu')
    model.add(dense)
    model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
    model.compile(
        loss='categorical_crossentropy',
        optimizer=optimizer,
        metrics=['accuracy'])
    np.testing.assert_equal(
        keras.backend.get_value(model.optimizer.iterations),
        126)  # Using same optimizer from before
    model.train_on_batch(x_train[:10], y_train[:10])
    np.testing.assert_equal(
        keras.backend.get_value(model.optimizer.iterations), 127)
    kernel, bias = dense.get_weights()
    np.testing.assert_allclose(kernel, 1., atol=1e-3)
    np.testing.assert_allclose(bias, 2., atol=1e-3)
  def test_sample_weights(self):
    num_classes = 5
    batch_size = 5
    weighted_class = 3
    train_samples = 300
    test_samples = 300
    input_dim = 5

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
    model.add(keras.layers.Activation('relu'))
    model.add(keras.layers.Dense(num_classes))
    model.add(keras.layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSPropOptimizer(learning_rate=0.001))

    np.random.seed(43)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=test_samples,
        input_shape=(input_dim,),
        num_classes=num_classes)
    int_y_train = y_train.copy()
    y_train = keras.utils.to_categorical(y_train, num_classes)

    class_weight = dict([(i, 1.) for i in range(num_classes)])
    class_weight[weighted_class] = 4.

    sample_weight = np.ones((y_train.shape[0]))
    sample_weight[int_y_train == weighted_class] = 4.

    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=2,
        verbose=0,
        sample_weight=sample_weight)
    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=2,
        verbose=0,
        sample_weight=sample_weight,
        validation_split=0.1)
    model.train_on_batch(
        x_train[:batch_size],
        y_train[:batch_size],
        sample_weight=sample_weight[:batch_size])
    model.test_on_batch(
        x_train[:batch_size],
        y_train[:batch_size],
        sample_weight=sample_weight[:batch_size])
    def test_stop_training_csv(self):
        # Test that using the CSVLogger callback with the TerminateOnNaN callback
        # does not result in invalid CSVs.
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        with self.cached_session():
            fp = os.path.join(tmpdir, 'test.csv')
            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)

            y_test = keras.utils.to_categorical(y_test)
            y_train = keras.utils.to_categorical(y_train)
            cbks = [
                keras.callbacks.TerminateOnNaN(),
                keras.callbacks.CSVLogger(fp)
            ]
            model = keras.models.Sequential()
            for _ in range(5):
                model.add(
                    keras.layers.Dense(2,
                                       input_dim=INPUT_DIM,
                                       activation='relu'))
            model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
            model.compile(loss='mean_squared_error', optimizer='rmsprop')

            def data_generator():
                i = 0
                max_batch_index = len(x_train) // BATCH_SIZE
                tot = 0
                while 1:
                    if tot > 3 * len(x_train):
                        yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
                               np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
                    else:
                        yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                               y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
                    i += 1
                    tot += 1
                    i %= max_batch_index

            history = model.fit_generator(data_generator(),
                                          len(x_train) // BATCH_SIZE,
                                          validation_data=(x_test, y_test),
                                          callbacks=cbks,
                                          epochs=20)
            loss = history.history['loss']
            assert len(loss) > 1
            assert loss[-1] == np.inf or np.isnan(loss[-1])

            values = []
            with open(fp) as f:
                for x in csv.reader(f):
                    # In windows, due to \r\n line ends we may end up reading empty lines
                    # after each line. Skip empty lines.
                    if x:
                        values.append(x)
            assert 'nan' in values[-1], 'The last epoch was not logged.'
    def test_ReduceLROnPlateau(self):
        with self.test_session():
            np.random.seed(1337)
            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = keras.utils.to_categorical(y_test)
            y_train = keras.utils.to_categorical(y_train)

            def make_model():
                np.random.seed(1337)
                model = keras.models.Sequential()
                model.add(
                    keras.layers.Dense(NUM_HIDDEN,
                                       input_dim=INPUT_DIM,
                                       activation='relu'))
                model.add(keras.layers.Dense(NUM_CLASSES,
                                             activation='softmax'))

                model.compile(loss='categorical_crossentropy',
                              optimizer=keras.optimizers.SGD(lr=0.1),
                              metrics=['accuracy'])
                return model

            model = make_model()
            # This should reduce the LR after the first epoch (due to high epsilon).
            cbks = [
                keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.1,
                                                  min_delta=10,
                                                  patience=1,
                                                  cooldown=5)
            ]
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=5,
                      verbose=0)
            self.assertAllClose(float(
                keras.backend.get_value(model.optimizer.lr)),
                                0.01,
                                atol=1e-4)

            model = make_model()
            cbks = [
                keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.1,
                                                  min_delta=0,
                                                  patience=1,
                                                  cooldown=5)
            ]
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=5,
                      verbose=2)
            self.assertAllClose(float(
                keras.backend.get_value(model.optimizer.lr)),
                                0.1,
                                atol=1e-4)
    def test_sequential_save_and_pop(self):
        # Test the following sequence of actions:
        # - construct a Sequential model and train it
        # - save it
        # - load it
        # - pop its last layer and add a new layer instead
        # - continue training
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)
        model = keras.Sequential([
            keras.layers.Dense(16, activation='relu'),
            keras.layers.Dropout(0.1),
            keras.layers.Dense(y_train.shape[-1], activation='softmax')
        ])
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())
        model.fit(x_train,
                  y_train,
                  epochs=1,
                  batch_size=10,
                  validation_data=(x_train, y_train),
                  verbose=2)
        model = self._save_and_reload_model(model)

        # TODO(b/134537740): model.pop doesn't update model outputs properly when
        # model.outputs is already defined, so just set to `None` for now.
        model.inputs = None
        model.outputs = None

        model.pop()
        model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))

        # TODO(b/134523282): There is an bug with Sequential models, so the model
        # must be marked as compiled=False to ensure the next compile goes through.
        model._is_compiled = False

        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        model = self._save_and_reload_model(model)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Exemple #34
0
  def test_evaluate_multi_io_model(self):
    input_a = keras.layers.Input(shape=(16,), name='input_a')
    input_b = keras.layers.Input(shape=(16,), name='input_b')
    dense = keras.layers.Dense(8, name='dense_1')
    interm_a = dense(input_a)
    interm_b = dense(input_b)
    merged = keras.layers.concatenate([interm_a, interm_b], name='merge')
    output_a = keras.layers.Dense(
        3, activation='softmax', name='dense_2')(
            merged)
    output_b = keras.layers.Dense(
        2, activation='softmax', name='dense_3')(
            merged)
    keras_model = keras.models.Model(
        inputs=[input_a, input_b], outputs=[output_a, output_b])
    keras_model.compile(
        loss='categorical_crossentropy',
        optimizer='rmsprop',
        metrics={
            'dense_2': 'categorical_accuracy',
            'dense_3': 'categorical_accuracy'
        })

    np.random.seed(_RANDOM_SEED)
    (x_train_1, y_train_1), (x_test_1, y_test_1) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=50,
        input_shape=(16,),
        num_classes=3)
    (x_train_2, y_train_2), (x_test_2, y_test_2) = testing_utils.get_test_data(
        train_samples=_TRAIN_SIZE,
        test_samples=50,
        input_shape=(16,),
        num_classes=2)
    y_train_1 = np_utils.to_categorical(y_train_1)
    y_test_1 = np_utils.to_categorical(y_test_1)
    y_train_2 = np_utils.to_categorical(y_train_2)
    y_test_2 = np_utils.to_categorical(y_test_2)

    keras_model.fit((x_train_1, x_train_2), (y_train_1, y_train_2), epochs=1)
    keras_eval = keras_model.evaluate((x_test_1, x_test_2),
                                      (y_test_1, y_test_2),
                                      batch_size=32)

    def input_fn():
      ds = tf.compat.v1.data.Dataset.from_tensor_slices(
          ((x_test_1, x_test_2), (y_test_1, y_test_2)))
      return ds.batch(128)

    keras_est = keras_lib.model_to_estimator(
        keras_model=keras_model, config=self._config)
    est_eval = keras_est.evaluate(input_fn=input_fn)

    def verify_correctness(metric_names):
      for i, metric_name in enumerate(metric_names):
        if i < 3:  # TODO(b/148461691): Investigate 1% diff in loss.
          continue
        self.assertAlmostEqual(
            keras_eval[i],
            est_eval[metric_name],
            places=4,
            msg='%s mismatch, keras model: %s, estimator: %s' %
            (metric_name, keras_eval[i], est_eval[metric_name]))

    verify_correctness([
        'loss', 'dense_2_loss', 'dense_3_loss', 'dense_2_categorical_accuracy',
        'dense_3_categorical_accuracy'
    ])

    metric_names_map = {
        'dense_2_categorical_accuracy': 'acc_1',
        'dense_3_categorical_accuracy': 'acc_2',
    }
    keras_est = keras_lib.model_to_estimator(
        keras_model=keras_model,
        config=self._config,
        metric_names_map=metric_names_map)
    est_eval = keras_est.evaluate(input_fn=input_fn)
    verify_correctness(
        ['loss', 'dense_2_loss', 'dense_3_loss', 'acc_1', 'acc_2'])
Exemple #35
0
  def test_CSVLogger(self):
    with self.cached_session():
      np.random.seed(1337)
      temp_dir = self.get_temp_dir()
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
      filepath = os.path.join(temp_dir, 'log.tsv')

      sep = '\t'
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      def make_model():
        np.random.seed(1337)
        model = testing_utils.get_small_sequential_mlp(
            num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
        model.compile(
            loss='categorical_crossentropy',
            optimizer=keras.optimizers.SGD(lr=0.1),
            metrics=['accuracy'])
        return model

      # case 1, create new file with defined separator
      model = make_model()
      cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)

      assert os.path.exists(filepath)
      with open(filepath) as csvfile:
        dialect = csv.Sniffer().sniff(csvfile.read())
      assert dialect.delimiter == sep
      del model
      del cbks

      # case 2, append data to existing file, skip header
      model = make_model()
      cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)

      # case 3, reuse of CSVLogger object
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)

      with open(filepath) as csvfile:
        output = ' '.join(csvfile.readlines())
        assert len(re.findall('epoch', output)) == 1

      os.remove(filepath)
Exemple #36
0
  def test_ModelCheckpoint(self):
    if h5py is None:
      return  # Skip test if models cannot be saved.

    with self.cached_session():
      np.random.seed(1337)

      temp_dir = self.get_temp_dir()
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

      filepath = os.path.join(temp_dir, 'checkpoint.h5')
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      # case 1
      monitor = 'val_loss'
      save_best_only = False
      mode = 'auto'

      model = keras.models.Sequential()
      model.add(
          keras.layers.Dense(
              NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
      model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
      model.compile(
          loss='categorical_crossentropy',
          optimizer='rmsprop',
          metrics=['accuracy'])

      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor=monitor,
              save_best_only=save_best_only,
              mode=mode)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)
      assert os.path.exists(filepath)
      os.remove(filepath)

      # case 2
      mode = 'min'
      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor=monitor,
              save_best_only=save_best_only,
              mode=mode)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)
      assert os.path.exists(filepath)
      os.remove(filepath)

      # case 3
      mode = 'max'
      monitor = 'val_acc'
      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor=monitor,
              save_best_only=save_best_only,
              mode=mode)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)
      assert os.path.exists(filepath)
      os.remove(filepath)

      # case 4
      save_best_only = True
      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor=monitor,
              save_best_only=save_best_only,
              mode=mode)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)
      assert os.path.exists(filepath)
      os.remove(filepath)

      # Case: metric not available.
      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor='unknown',
              save_best_only=True)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=1,
          verbose=0)
      # File won't be written.
      assert not os.path.exists(filepath)

      # case 5
      save_best_only = False
      period = 2
      mode = 'auto'

      filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
      cbks = [
          keras.callbacks.ModelCheckpoint(
              filepath,
              monitor=monitor,
              save_best_only=save_best_only,
              mode=mode,
              period=period)
      ]
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=4,
          verbose=1)
      assert os.path.exists(filepath.format(epoch=2))
      assert os.path.exists(filepath.format(epoch=4))
      os.remove(filepath.format(epoch=2))
      os.remove(filepath.format(epoch=4))
      assert not os.path.exists(filepath.format(epoch=1))
      assert not os.path.exists(filepath.format(epoch=3))

      # Invalid use: this will raise a warning but not an Exception.
      keras.callbacks.ModelCheckpoint(
          filepath,
          monitor=monitor,
          save_best_only=save_best_only,
          mode='unknown')
Exemple #37
0
  def test_TensorBoard(self):
    np.random.seed(1337)

    temp_dir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

    (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
        train_samples=TRAIN_SAMPLES,
        test_samples=TEST_SAMPLES,
        input_shape=(INPUT_DIM,),
        num_classes=NUM_CLASSES)
    y_test = keras.utils.to_categorical(y_test)
    y_train = keras.utils.to_categorical(y_train)

    def data_generator(train):
      if train:
        max_batch_index = len(x_train) // BATCH_SIZE
      else:
        max_batch_index = len(x_test) // BATCH_SIZE
      i = 0
      while 1:
        if train:
          yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                 y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
        else:
          yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                 y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
        i += 1
        i %= max_batch_index

    # case: Sequential
    with self.cached_session():
      model = keras.models.Sequential()
      model.add(
          keras.layers.Dense(
              NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
      # non_trainable_weights: moving_variance, moving_mean
      model.add(keras.layers.BatchNormalization())
      model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      tsb = keras.callbacks.TensorBoard(
          log_dir=temp_dir, histogram_freq=1, write_images=True,
          write_grads=True, batch_size=5)
      cbks = [tsb]

      # fit with validation data
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=3,
          verbose=0)

      # fit with validation data and accuracy
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)

      # fit generator with validation data
      model.fit_generator(
          data_generator(True),
          len(x_train),
          epochs=2,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          verbose=0)

      # fit generator without validation data
      # histogram_freq must be zero
      tsb.histogram_freq = 0
      model.fit_generator(
          data_generator(True),
          len(x_train),
          epochs=2,
          callbacks=cbks,
          verbose=0)

      # fit generator with validation data and accuracy
      tsb.histogram_freq = 1
      model.fit_generator(
          data_generator(True),
          len(x_train),
          epochs=2,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          verbose=0)

      # fit generator without validation data and accuracy
      tsb.histogram_freq = 0
      model.fit_generator(
          data_generator(True), len(x_train), epochs=2, callbacks=cbks)
      assert os.path.exists(temp_dir)
Exemple #38
0
  def test_TensorBoard_histogram_freq_must_have_validation_data(self):
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    with self.cached_session():
      filepath = os.path.join(tmpdir, 'logs')

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      def data_generator(train):
        if train:
          max_batch_index = len(x_train) // BATCH_SIZE
        else:
          max_batch_index = len(x_test) // BATCH_SIZE
        i = 0
        while 1:
          if train:
            # simulate multi-input/output models
            yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
                   y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
          else:
            yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
                   y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
          i += 1
          i %= max_batch_index

      inp = keras.Input((INPUT_DIM,))
      hidden = keras.layers.Dense(2, activation='relu')(inp)
      hidden = keras.layers.Dropout(0.1)(hidden)
      output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
      model = keras.models.Model(inputs=inp, outputs=output)
      model.compile(loss='categorical_crossentropy',
                    optimizer='sgd',
                    metrics=['accuracy'])

      # we must generate new callbacks for each test, as they aren't stateless
      def callbacks_factory(histogram_freq):
        return [keras.callbacks.TensorBoard(
            log_dir=filepath,
            histogram_freq=histogram_freq,
            write_images=True, write_grads=True,
            batch_size=5)]

      # fit w/o validation data should raise ValueError if histogram_freq > 0
      cbs = callbacks_factory(histogram_freq=1)
      with self.assertRaises(ValueError):
        model.fit(
            x_train, y_train, batch_size=BATCH_SIZE, callbacks=cbs, epochs=3)

      for cb in cbs:
        cb.on_train_end()

      # fit generator without validation data should raise ValueError if
      # histogram_freq > 0
      cbs = callbacks_factory(histogram_freq=1)
      with self.assertRaises(ValueError):
        model.fit_generator(
            data_generator(True), len(x_train), epochs=2, callbacks=cbs)

      for cb in cbs:
        cb.on_train_end()

      # Make sure file writer cache is clear to avoid failures during cleanup.
      writer_cache.FileWriterCache.clear()
Exemple #39
0
  def test_TensorBoard_multi_input_output(self):
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    with self.cached_session():
      filepath = os.path.join(tmpdir, 'logs')

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      def data_generator(train):
        if train:
          max_batch_index = len(x_train) // BATCH_SIZE
        else:
          max_batch_index = len(x_test) // BATCH_SIZE
        i = 0
        while 1:
          if train:
            # simulate multi-input/output models
            yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
                   [y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
          else:
            yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
                   [y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
          i += 1
          i %= max_batch_index

      inp1 = keras.Input((INPUT_DIM,))
      inp2 = keras.Input((INPUT_DIM,))
      inp = keras.layers.add([inp1, inp2])
      hidden = keras.layers.Dense(2, activation='relu')(inp)
      hidden = keras.layers.Dropout(0.1)(hidden)
      output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
      output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
      model = keras.models.Model([inp1, inp2], [output1, output2])
      model.compile(loss='categorical_crossentropy',
                    optimizer='sgd',
                    metrics=['accuracy'])

      # we must generate new callbacks for each test, as they aren't stateless
      def callbacks_factory(histogram_freq):
        return [keras.callbacks.TensorBoard(log_dir=filepath,
                                            histogram_freq=histogram_freq,
                                            write_images=True, write_grads=True,
                                            batch_size=5)]

      # fit without validation data
      model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
                callbacks=callbacks_factory(histogram_freq=0), epochs=3)

      # fit with validation data and accuracy
      model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
                validation_data=([x_test] * 2, [y_test] * 2),
                callbacks=callbacks_factory(histogram_freq=1), epochs=2)

      # fit generator without validation data
      model.fit_generator(data_generator(True), len(x_train), epochs=2,
                          callbacks=callbacks_factory(histogram_freq=0))

      # fit generator with validation data and accuracy
      model.fit_generator(data_generator(True), len(x_train), epochs=2,
                          validation_data=([x_test] * 2, [y_test] * 2),
                          callbacks=callbacks_factory(histogram_freq=1))
      assert os.path.isdir(filepath)
Exemple #40
0
  def test_Tensorboard_histogram_summaries_in_test_function(self):

    class FileWriterStub(object):

      def __init__(self, logdir, graph=None):
        self.logdir = logdir
        self.graph = graph
        self.steps_seen = []

      def add_summary(self, summary, global_step):
        summary_obj = summary_pb2.Summary()

        # ensure a valid Summary proto is being sent
        if isinstance(summary, bytes):
          summary_obj.ParseFromString(summary)
        else:
          assert isinstance(summary, summary_pb2.Summary)
          summary_obj = summary

        # keep track of steps seen for the merged_summary op,
        # which contains the histogram summaries
        if len(summary_obj.value) > 1:
          self.steps_seen.append(global_step)

      def flush(self):
        pass

      def close(self):
        pass

    def _init_writer(obj):
      obj.writer = FileWriterStub(obj.log_dir)

    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
    (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
        train_samples=TRAIN_SAMPLES,
        test_samples=TEST_SAMPLES,
        input_shape=(INPUT_DIM,),
        num_classes=NUM_CLASSES)
    y_test = keras.utils.to_categorical(y_test)
    y_train = keras.utils.to_categorical(y_train)

    with self.cached_session():
      model = keras.models.Sequential()
      model.add(
          keras.layers.Dense(
              NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
      # non_trainable_weights: moving_variance, moving_mean
      model.add(keras.layers.BatchNormalization())
      model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
      model.compile(
          loss='categorical_crossentropy',
          optimizer='sgd',
          metrics=['accuracy'])
      keras.callbacks.TensorBoard._init_writer = _init_writer
      tsb = keras.callbacks.TensorBoard(
          log_dir=tmpdir,
          histogram_freq=1,
          write_images=True,
          write_grads=True,
          batch_size=5)
      cbks = [tsb]

      # fit with validation data
      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=3,
          verbose=0)

      self.assertAllEqual(tsb.writer.steps_seen, [0, 0.5, 1, 1.5, 2, 2.5])
    def testNumericEquivalenceForNesterovMomentum(self):
        np.random.seed(1331)
        with self.cached_session():
            train_samples = 20
            input_dim = 3
            num_classes = 2
            (x,
             y), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                 test_samples=10,
                                                 input_shape=(input_dim, ),
                                                 num_classes=num_classes)
            y = keras.utils.to_categorical(y)

            num_hidden = 5
            model_k_v1 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_k_v2 = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_k_v2.set_weights(model_k_v1.get_weights())
            model_tf = testing_utils.get_small_sequential_mlp(
                num_hidden=num_hidden,
                num_classes=num_classes,
                input_dim=input_dim)
            model_tf.set_weights(model_k_v2.get_weights())

            opt_k_v1 = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
            opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
            opt_tf = momentum.MomentumOptimizer(learning_rate=0.001,
                                                momentum=0.9,
                                                use_nesterov=True)

            model_k_v1.compile(opt_k_v1,
                               loss='categorical_crossentropy',
                               metrics=[])
            model_k_v2.compile(opt_k_v2,
                               loss='categorical_crossentropy',
                               metrics=[])
            model_tf.compile(opt_tf,
                             loss='categorical_crossentropy',
                             metrics=[])

            hist_k_v1 = model_k_v1.fit(x,
                                       y,
                                       batch_size=5,
                                       epochs=10,
                                       shuffle=False)
            hist_k_v2 = model_k_v2.fit(x,
                                       y,
                                       batch_size=5,
                                       epochs=10,
                                       shuffle=False)
            hist_tf = model_tf.fit(x,
                                   y,
                                   batch_size=5,
                                   epochs=10,
                                   shuffle=False)

            self.assertAllClose(model_k_v1.get_weights(),
                                model_tf.get_weights())
            self.assertAllClose(model_k_v1.get_weights(),
                                model_k_v2.get_weights())
            self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
            self.assertAllClose(hist_k_v1.history['loss'],
                                hist_tf.history['loss'])
            self.assertAllClose(hist_k_v1.history['loss'],
                                hist_k_v2.history['loss'])
    def test_ReduceLROnPlateau(self):
        with self.cached_session():
            np.random.seed(1337)
            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = keras.utils.to_categorical(y_test)
            y_train = keras.utils.to_categorical(y_train)

            def make_model():
                random_seed.set_random_seed(1234)
                np.random.seed(1337)
                model = testing_utils.get_small_sequential_mlp(
                    num_hidden=NUM_HIDDEN,
                    num_classes=NUM_CLASSES,
                    input_dim=INPUT_DIM)
                model.compile(loss='categorical_crossentropy',
                              optimizer=keras.optimizers.SGD(lr=0.1))
                return model

            # TODO(psv): Make sure the callback works correctly when min_delta is
            # set as 0. Test fails when the order of this callback and assertion is
            # interchanged.
            model = make_model()
            cbks = [
                keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.1,
                                                  min_delta=0,
                                                  patience=1,
                                                  cooldown=5)
            ]
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=5,
                      verbose=0)
            self.assertAllClose(float(
                keras.backend.get_value(model.optimizer.lr)),
                                0.1,
                                atol=1e-4)

            model = make_model()
            # This should reduce the LR after the first epoch (due to high epsilon).
            cbks = [
                keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.1,
                                                  min_delta=10,
                                                  patience=1,
                                                  cooldown=5)
            ]
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=5,
                      verbose=2)
            self.assertAllClose(float(
                keras.backend.get_value(model.optimizer.lr)),
                                0.01,
                                atol=1e-4)
Exemple #43
0
    def test_case_optimizer(self):
        np.random.seed(1337)
        (x_train,
         y_train), (x_test,
                    y_test) = testing_utils.get_test_data(train_samples=1000,
                                                          test_samples=0,
                                                          input_shape=(10, ),
                                                          num_classes=2)

        y_train = tf.keras.utils.to_categorical(y_train)

        model = lq_testing_utils.get_small_bnn_model(x_train.shape[1], 20,
                                                     y_train.shape[1])

        bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
        adam = tf.keras.optimizers.Adam(0.01)
        case_optimizer = lq.optimizers.CaseOptimizer(
            (lq.optimizers.Bop.is_binary_variable, bop),
            default_optimizer=adam,
        )

        model.compile(
            loss="categorical_crossentropy",
            optimizer=case_optimizer,
            metrics=["accuracy"],
        )

        def scheduler(x):
            return 1.0 / (1.0 + x)

        cbk_gamma_scheduler = HyperparameterScheduler(
            schedule=scheduler,
            optimizer=model.optimizer.optimizers[0],
            hyperparameter="gamma",
            verbose=1,
        )
        cbk_threshold_scheduler = HyperparameterScheduler(
            schedule=scheduler,
            optimizer=model.optimizer.optimizers[0],
            hyperparameter="threshold",
            verbose=1,
        )
        cbk_lr_scheduler = HyperparameterScheduler(
            schedule=scheduler,
            optimizer=model.optimizer.optimizers[1],
            hyperparameter="lr",
            verbose=1,
        )

        num_epochs = 3
        model.fit(
            x_train,
            y_train,
            epochs=num_epochs,
            batch_size=16,
            callbacks=[
                cbk_gamma_scheduler, cbk_lr_scheduler, cbk_threshold_scheduler
            ],
            verbose=0,
        )

        np.testing.assert_almost_equal(
            tf.keras.backend.get_value(model.optimizer.optimizers[0].gamma),
            scheduler(num_epochs - 1),
            decimal=8,
        )

        np.testing.assert_almost_equal(
            tf.keras.backend.get_value(
                model.optimizer.optimizers[0].threshold),
            scheduler(num_epochs - 1),
            decimal=8,
        )

        np.testing.assert_almost_equal(
            tf.keras.backend.get_value(model.optimizer.optimizers[1].lr),
            scheduler(num_epochs - 1),
            decimal=8,
        )
    def test_time_major_and_go_backward(self, time_major, go_backwards):
        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 100

        x_train = np.random.random((batch, timestep, input_shape))

        def build_model(layer_cls):
            inputs = keras.layers.Input(shape=[timestep, input_shape],
                                        dtype=dtypes.float32)
            layer = layer_cls(rnn_state_size,
                              recurrent_activation='sigmoid',
                              time_major=time_major,
                              return_sequences=True,
                              go_backwards=go_backwards)
            if time_major:
                converted_input = keras.layers.Lambda(
                    lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
                outputs = layer(converted_input)
                outputs = keras.layers.Lambda(
                    lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
            else:
                outputs = layer(inputs)
            return keras.models.Model(inputs, outputs)

        lstm_model = build_model(rnn_v1.LSTM)
        y_ref = lstm_model.predict(x_train)
        weights = lstm_model.get_weights()

        lstm_v2_model = build_model(rnn.LSTM)
        lstm_v2_model.set_weights(weights)
        y = lstm_v2_model.predict(x_train)

        self.assertAllClose(y, y_ref)

        input_shape = 10
        rnn_state_size = 8
        output_shape = 8
        timestep = 4
        batch = 100
        epoch = 10

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=batch,
                                                   test_samples=0,
                                                   input_shape=(timestep,
                                                                input_shape),
                                                   num_classes=output_shape)
        y_train = np_utils.to_categorical(y_train, output_shape)

        layer = rnn.LSTM(rnn_state_size)

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=dtypes.float32)

        outputs = layer(inputs)
        model = keras.models.Model(inputs, outputs)
        model.compile('rmsprop', loss='mse')
        model.fit(x_train, y_train, epochs=epoch)
        model.evaluate(x_train, y_train)
        model.predict(x_train)
Exemple #45
0
    def _test_optimizer(self, optimizer, target=0.75):
        if context.executing_eagerly():
            self.skipTest('v1 optimizer does not run in eager mode')
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000,
                                                            test_samples=200,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)
        model = _get_model(x_train.shape[1], 20, y_train.shape[1])
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 0)
        history = model.fit(x_train,
                            y_train,
                            epochs=2,
                            batch_size=16,
                            verbose=0)
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # 63 steps per epoch
        self.assertGreaterEqual(history.history['acc'][-1], target)
        config = keras.optimizers.serialize(optimizer)
        optim = keras.optimizers.deserialize(config)
        new_config = keras.optimizers.serialize(optim)
        new_config['class_name'] = new_config['class_name'].lower()
        new_config['config'].pop('name', None)
        if 'amsgrad' not in config['config']:
            new_config['config'].pop('amsgrad', None)
        if 'decay' in new_config['config'] and 'schedule_decay' in config[
                'config']:
            new_config['config']['schedule_decay'] = new_config['config'].pop(
                'decay')
        if 'momentum' not in config['config']:
            new_config['config'].pop('momentum', None)
        if 'centered' not in config['config']:
            new_config['config'].pop('centered', None)
        self.assertDictEqual(config, new_config)

        # Test constraints.
        model = keras.models.Sequential()
        dense = keras.layers.Dense(10,
                                   input_shape=(x_train.shape[1], ),
                                   kernel_constraint=lambda x: 0. * x + 1.,
                                   bias_constraint=lambda x: 0. * x + 2.,
                                   activation='relu')
        model.add(dense)
        model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'],
                      run_eagerly=testing_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # Using same optimizer from before
        model.train_on_batch(x_train[:10], y_train[:10])
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 127)
        kernel, bias = dense.get_weights()
        np.testing.assert_allclose(kernel, 1., atol=1e-3)
        np.testing.assert_allclose(bias, 2., atol=1e-3)
Exemple #46
0
    def test_LearningRateScheduler(self):
        with self.cached_session():
            np.random.seed(1337)
            batch_size = 5
            num_classes = 2
            input_dim = 3

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=10,
                test_samples=10,
                input_shape=(input_dim, ),
                num_classes=num_classes)
            y_test = tf.keras.utils.to_categorical(y_test)
            y_train = tf.keras.utils.to_categorical(y_train)
            model = testing_utils.get_small_sequential_mlp(
                num_hidden=5, num_classes=num_classes, input_dim=input_dim)

            epochs = [1, 2]
            callback = LearningRateScheduler(1, 1, epochs, num_warmup_steps=1)
            assert callback.slope == 1 - 1e-2 / 3

            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=4,
                      verbose=0)

            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                0.01)

            # Here the epochs scheduling won't apply because the warmup hasn't been done
            num_warmup_steps = 16
            init_lr = 1e-2 / 3
            callback = LearningRateScheduler(1,
                                             16,
                                             epochs,
                                             num_warmup_steps=num_warmup_steps)
            expected_slope = (1 - init_lr * 0.5) / num_warmup_steps
            assert callback.slope == expected_slope

            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=2,
                      verbose=0)

            # There are 2 epochs wich will two a total of 4 steps (train_samples = 10 with batch_size 5)
            expected_lr = init_lr * 0.5 + expected_slope * 3
            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                expected_lr)

            callback = LearningRateScheduler(1,
                                             16,
                                             epochs,
                                             num_warmup_steps=num_warmup_steps,
                                             use_warmup=False)
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      validation_data=(x_test, y_test),
                      callbacks=[callback],
                      epochs=2,
                      verbose=0)
            self.assertAllClose(tf.keras.backend.get_value(model.optimizer.lr),
                                0.01)
Exemple #47
0
    def test_class_weight_invalid_use_case(self):
        num_classes = 5
        train_samples = 1000
        test_samples = 1000
        input_dim = 5
        timesteps = 3

        model = keras.models.Sequential()
        model.add(
            keras.layers.TimeDistributed(keras.layers.Dense(num_classes),
                                         input_shape=(timesteps, input_dim)))
        model.add(keras.layers.Activation('softmax'))
        model.compile(loss='binary_crossentropy',
                      optimizer=RMSPropOptimizer(learning_rate=0.001))

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=train_samples,
                                                   test_samples=test_samples,
                                                   input_shape=(input_dim, ),
                                                   num_classes=num_classes)
        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)
        class_weight = dict([(i, 1.) for i in range(num_classes)])

        del class_weight[1]
        with self.assertRaises(ValueError):
            model.fit(x_train,
                      y_train,
                      epochs=0,
                      verbose=0,
                      class_weight=class_weight)

        with self.assertRaises(ValueError):
            model.compile(loss='binary_crossentropy',
                          optimizer=RMSPropOptimizer(learning_rate=0.001),
                          sample_weight_mode=[])

        # Build multi-output model
        x = keras.Input((3, ))
        y1 = keras.layers.Dense(4, name='1')(x)
        y2 = keras.layers.Dense(4, name='2')(x)
        model = keras.models.Model(x, [y1, y2])
        model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001),
                      loss='mse')
        x_np = np.random.random((10, 3))
        y_np = np.random.random((10, 4))
        w_np = np.random.random((10, ))
        # This will work
        model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
        # These will not
        with self.assertRaises(ValueError):
            model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
        with self.assertRaises(TypeError):
            model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
        with self.assertRaises(ValueError):
            bad_w_np = np.random.random((11, ))
            model.fit(x_np, [y_np, y_np],
                      epochs=1,
                      sample_weight={'1': bad_w_np})
        with self.assertRaises(ValueError):
            bad_w_np = np.random.random((10, 2))
            model.fit(x_np, [y_np, y_np],
                      epochs=1,
                      sample_weight={'1': bad_w_np})
        with self.assertRaises(ValueError):
            bad_w_np = np.random.random((10, 2, 2))
            model.fit(x_np, [y_np, y_np],
                      epochs=1,
                      sample_weight={'1': bad_w_np})
Exemple #48
0
    def test_TensorBoard(self):
        np.random.seed(1337)

        temp_dir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, temp_dir)

        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES)
        y_test = keras.utils.to_categorical(y_test)
        y_train = keras.utils.to_categorical(y_train)

        def data_generator(train):
            if train:
                max_batch_index = len(x_train) // BATCH_SIZE
            else:
                max_batch_index = len(x_test) // BATCH_SIZE
            i = 0
            while 1:
                if train:
                    yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                           y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
                else:
                    yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                           y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
                i += 1
                i %= max_batch_index

        class DummyStatefulMetric(keras.layers.Layer):
            def __init__(self, name='dummy_stateful_metric', **kwargs):
                super(DummyStatefulMetric, self).__init__(name=name, **kwargs)
                self.stateful = True
                self.state = keras.backend.variable(value=0, dtype='int32')

            def reset_states(self):
                pass

            def __call__(self, y_true, y_pred):
                return self.state

        # case: Sequential
        with self.test_session():
            model = keras.models.Sequential()
            model.add(
                keras.layers.Dense(NUM_HIDDEN,
                                   input_dim=INPUT_DIM,
                                   activation='relu'))
            # non_trainable_weights: moving_variance, moving_mean
            model.add(keras.layers.BatchNormalization())
            model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy',
                                   DummyStatefulMetric()])
            tsb = keras.callbacks.TensorBoard(log_dir=temp_dir,
                                              histogram_freq=1,
                                              write_images=True,
                                              write_grads=True,
                                              batch_size=5)
            cbks = [tsb]

            # fit with validation data
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=3,
                      verbose=0)

            # fit with validation data and accuracy
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            # fit generator with validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=(x_test, y_test),
                                callbacks=cbks,
                                verbose=0)

            # fit generator without validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=cbks,
                                verbose=0)

            # fit generator with validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=(x_test, y_test),
                                callbacks=cbks,
                                verbose=0)

            # fit generator without validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=cbks)
            assert os.path.exists(temp_dir)
Exemple #49
0
    def test_class_weights(self):
        num_classes = 5
        batch_size = 5
        weighted_class = 3
        train_samples = 300
        test_samples = 300
        input_dim = 5

        model = keras.models.Sequential()
        model.add(keras.layers.Dense(10, input_shape=(input_dim, )))
        model.add(keras.layers.Activation('relu'))
        model.add(keras.layers.Dense(num_classes))
        model.add(keras.layers.Activation('softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSPropOptimizer(learning_rate=0.001))

        np.random.seed(1337)
        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=train_samples,
            test_samples=test_samples,
            input_shape=(input_dim, ),
            num_classes=num_classes)
        int_y_test = y_test.copy()
        int_y_train = y_train.copy()
        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)
        test_ids = np.where(int_y_test == np.array(weighted_class))[0]

        class_weight = dict([(i, 1.) for i in range(num_classes)])
        class_weight[weighted_class] = 4.

        sample_weight = np.ones((y_train.shape[0]))
        sample_weight[int_y_train == weighted_class] = 4.

        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=2,
                  verbose=0,
                  class_weight=class_weight,
                  validation_data=(x_train, y_train, sample_weight))
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=2,
                  verbose=0,
                  class_weight=class_weight)
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=2,
                  verbose=0,
                  class_weight=class_weight,
                  validation_split=0.1)

        model.train_on_batch(x_train[:batch_size],
                             y_train[:batch_size],
                             class_weight=class_weight)
        ref_score = model.evaluate(x_test, y_test, verbose=0)
        score = model.evaluate(x_test[test_ids, :],
                               y_test[test_ids, :],
                               verbose=0)
        self.assertLess(score, ref_score)
Exemple #50
0
  def test_unifiedRNN_with_cond(self):
    # This test is to demonstrate the graph rewrite of grappler plugin under
    # the condition that the function returns different number of internal
    # states.
    rewrites = rewriter_config_pb2.RewriterConfig()
    rewrites.function_optimization = rewriter_config_pb2.RewriterConfig.OFF
    customer_optimizer = rewrites.custom_optimizers.add()
    customer_optimizer.name = 'ExperimentalImplementationSelector'
    rewrites.min_graph_nodes = -1
    graph_options = config_pb2.GraphOptions(rewrite_options=rewrites)
    config = config_pb2.ConfigProto(graph_options=graph_options)

    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 1

    with ops.Graph().as_default(), session.Session(config=config) as sess:
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train)

      layer = UnifiedLSTM(rnn_state_size)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape), name='predict')

      zeros = array_ops.zeros([batch, output_shape])
      dummy_runtime = constant_op.constant(
          'unknown', dtype=dtypes.string, name='runtime')
      a = constant_op.constant(0)
      b = constant_op.constant(1)
      # Will always run the lstm layer.
      outputs, runtime = control_flow_ops.cond(
          gen_math_ops.less(a, b),
          lambda: layer(inputs),
          lambda: (zeros, dummy_runtime))
      loss = losses.softmax_cross_entropy(predict, outputs)
      optimizer = gradient_descent.GradientDescentOptimizer(0.001)
      train_op = optimizer.minimize(loss)

      sess.run([variables.global_variables_initializer()])
      existing_loss = 0

      for _ in range(epoch):
        loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
            inputs: x_train,
            predict: y_train
        })
        if test.is_gpu_available():
          self.assertEquals(runtime_value, b'cudnn')
        else:
          self.assertEquals(runtime_value, b'cpu')
        # Make sure the loss is updated for every epoch
        # (layer weights properly updated).
        self.assertNotEqual(existing_loss, loss_value)
        existing_loss = loss_value