Пример #1
0
    def test_Tensorboard_eager(self):
        temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES)
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
                                                       num_classes=NUM_CLASSES,
                                                       input_dim=INPUT_DIM)
        model.compile(loss='binary_crossentropy',
                      optimizer=tf.compat.v1.train.AdamOptimizer(0.01),
                      metrics=['accuracy'])

        cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]

        model.fit(x_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  validation_data=(x_test, y_test),
                  callbacks=cbks,
                  epochs=2,
                  verbose=0)

        self.assertTrue(os.path.exists(temp_dir))
Пример #2
0
  def testOptimizerWithCallableVarList(self):
    train_samples = 20
    input_dim = 1
    num_classes = 2
    (x, y), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=10,
        input_shape=(input_dim,),
        num_classes=num_classes)
    y = np_utils.to_categorical(y)

    num_hidden = 1
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=num_hidden, num_classes=num_classes)
    opt = adam.Adam()

    loss = lambda: losses.mean_squared_error(model(x), y)
    var_list = lambda: model.trainable_weights

    with self.assertRaisesRegex(
        ValueError, 'Weights for model .* have not yet been created'):
      var_list()
    train_op = opt.minimize(loss, var_list)
    if not tf.executing_eagerly():
      self.evaluate(tf.compat.v1.global_variables_initializer())
      self.assertEqual(
          [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
      self.evaluate(train_op)
    self.assertNotEqual(
        [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
    self.assertLen(var_list(), 4)
Пример #3
0
  def test_timeseries_classification_sequential_tf_rnn(self):
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=100,
        test_samples=0,
        input_shape=(4, 10),
        num_classes=2)
    y_train = np_utils.to_categorical(y_train)

    with base_layer.keras_style_scope():
      model = keras.models.Sequential()
      model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
                                 input_shape=x_train.shape[1:]))
      model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
                                                  activation='softmax',
                                                  dtype=tf.float32)))
      model.compile(
          loss='categorical_crossentropy',
          optimizer=keras.optimizer_v2.adam.Adam(0.005),
          metrics=['acc'],
          run_eagerly=testing_utils.should_run_eagerly())

    history = model.fit(x_train, y_train, epochs=15, batch_size=10,
                        validation_data=(x_train, y_train),
                        verbose=2)
    self.assertGreater(history.history['val_acc'][-1], 0.7)
    _, val_acc = model.evaluate(x_train, y_train)
    self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
    predictions = model.predict(x_train)
    self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #4
0
    def test_keras_model_with_gru(self):
        input_shape = 10
        rnn_state_size = 8
        output_shape = 8
        timestep = 4
        batch = 100
        epoch = 10

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=batch,
                                                   test_samples=0,
                                                   input_shape=(timestep,
                                                                input_shape),
                                                   num_classes=output_shape)
        y_train = np_utils.to_categorical(y_train, output_shape)

        layer = rnn.GRU(rnn_state_size)

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=tf.float32)

        outputs = layer(inputs)
        model = keras.models.Model(inputs, outputs)
        model.compile('rmsprop', loss='mse')
        model.fit(x_train, y_train, epochs=epoch)
        model.evaluate(x_train, y_train)
        model.predict(x_train)
Пример #5
0
    def test_serialization_v2_model(self):
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)

        model = keras.Sequential([
            keras.layers.Flatten(input_shape=x_train.shape[1:]),
            keras.layers.Dense(10, activation=tf.nn.relu),
            # To mimic 'tf.nn.softmax' used in TF 2.x.
            keras.layers.Dense(y_train.shape[-1], activation=tf.math.softmax),
        ])

        # Check if 'softmax' is in model.get_config().
        last_layer_activation = model.get_layer(
            index=2).get_config()['activation']
        self.assertEqual(last_layer_activation, 'softmax')

        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['accuracy'],
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(x_train,
                  y_train,
                  epochs=2,
                  batch_size=10,
                  validation_data=(x_train, y_train),
                  verbose=2)

        output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
        model.save(output_path, save_format='tf')
        loaded_model = keras.models.load_model(output_path)
        self.assertEqual(model.summary(), loaded_model.summary())
Пример #6
0
    def test_vector_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)

        model = testing_utils.get_model_from_layers(
            [
                keras.layers.Dense(16, activation='relu'),
                keras.layers.Dropout(0.1),
                keras.layers.Dense(y_train.shape[-1], activation='softmax')
            ],
            input_shape=x_train.shape[1:])
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #7
0
  def _test_runtime_with_model(self, model):

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=self.batch,
        test_samples=0,
        input_shape=(self.timestep, self.input_shape),
        num_classes=self.output_shape)
    y_train = np_utils.to_categorical(y_train, self.output_shape)

    model.compile(
        optimizer='sgd',
        loss=['categorical_crossentropy', None],
        run_eagerly=testing_utils.should_run_eagerly())

    existing_loss = 0
    for _ in range(self.epoch):
      history = model.fit(x_train, y_train)
      loss_value = history.history['loss'][0]

      self.assertNotEqual(existing_loss, loss_value)
      existing_loss = loss_value

    _, runtime_value = model.predict(x_train)
    if tf.test.is_gpu_available():
      self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
    else:
      self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
Пример #8
0
  def test_LSTM_runtime_with_mask(self):
    if tf.test.is_built_with_rocm():
      self.skipTest('Skipping the test as ROCm MIOpen does not '
                    'support padded input yet.')

    # Masking will affect which backend is selected based on whether the mask
    # is strictly right padded.
    layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)

    inputs = keras.layers.Input(
        shape=[self.timestep, self.input_shape], dtype=tf.float32)
    masked_inputs = keras.layers.Masking()(inputs)

    outputs, runtime = layer(masked_inputs)
    # Expand the runtime so that it is a 1D tensor instead of scalar.
    # TF model does not work with scalar model output, specially during
    # aggregation.
    runtime = keras.layers.Lambda(
        lambda x: tf.compat.v1.expand_dims(x, axis=-1))(runtime)
    model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=self.batch,
        test_samples=0,
        input_shape=(self.timestep, self.input_shape),
        num_classes=self.output_shape)
    y_train = np_utils.to_categorical(y_train, self.output_shape)

    model.compile(
        optimizer='sgd',
        loss=['categorical_crossentropy', None],
        run_eagerly=testing_utils.should_run_eagerly())

    model.fit(x_train, y_train)

    # Verify unpadded data.
    _, runtime_value = model.predict(x_train)
    if tf.test.is_gpu_available():
      self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
    else:
      self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)

    # Update x/y to be right padded by setting the last timestep to 0
    x_train[:, -1, :] = 0
    y_train[:, -1] = 0
    _, runtime_value = model.predict(x_train)
    if tf.test.is_gpu_available():
      self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
    else:
      self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)

    # Further update x/y to be mix padded (masks in the middle), and verify
    # only cpu kernel can be selected.
    x_train[:, -3, :] = 0
    y_train[:, -3] = 0
    _, runtime_value = model.predict(x_train)
    self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
Пример #9
0
 def get_data(self):
     (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
         train_samples=10,
         test_samples=10,
         input_shape=(DATA_DIM, ),
         num_classes=NUM_CLASSES)
     y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
     y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
     return (x_train, y_train), (x_test, y_test)
Пример #10
0
  def testNumericEquivalenceForNesterovMomentum(self):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with testing_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())
      model_tf = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_tf.set_weights(model_k_v2.get_weights())

      opt_k_v1 = optimizer_v1.SGD(momentum=0.9, nesterov=True)
      opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
      opt_tf = tf.compat.v1.train.MomentumOptimizer(
          learning_rate=0.01, momentum=0.9, use_nesterov=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_tf.compile(
          opt_tf,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
Пример #11
0
  def _benchmark_performance_with_standard_cudnn_impl(self):
    if not tf.test.is_gpu_available():
      self.skipTest('performance test will only run on GPU')

    mode = 'eager' if tf.executing_eagerly() else 'graph'
    batch = 64
    num_batch = 10
    test_config = {
        'input_shape': 128,
        'rnn_state_size': 64,
        'output_shape': 64,
        'timestep': 50,
        'batch': batch,
        'epoch': 20,
        # The performance for warmup epoch is ignored.
        'warmup_epoch': 1,
    }
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=(batch * num_batch),
        test_samples=0,
        input_shape=(test_config['timestep'], test_config['input_shape']),
        num_classes=test_config['output_shape'])
    y_train = np_utils.to_categorical(y_train, test_config['output_shape'])

    cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
        test_config, x_train, y_train)
    lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
        test_config, x_train, y_train)
    normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
        test_config, x_train, y_train)

    cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch
    v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch

    self.report_benchmark(name='keras_cudnn_lstm_' + mode,
                          wall_time=cudnn_sec_per_epoch,
                          iters=test_config['epoch'],
                          extras=test_config)
    self.report_benchmark(name='keras_lstm_v2_' + mode,
                          wall_time=lstm_v2_sec_per_epoch,
                          iters=test_config['epoch'],
                          extras=test_config)
    self.report_benchmark(name='keras_canonical_lstm_' + mode,
                          wall_time=normal_lstm_sec_per_epoch,
                          iters=test_config['epoch'],
                          extras=test_config)

    logging.info('Expect the performance of LSTM V2 is within 80% of '
                 'CuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_v2 * 100))
    logging.info('Expect the performance of LSTM V2 is more than 5 times'
                 ' of normal LSTM, got {0:.2f}'.format(v2_vs_normal))
Пример #12
0
    def test_gru_v2_feature_parity_with_canonical_gru(self):
        if tf.test.is_built_with_rocm():
            self.skipTest('Skipping the test as ROCm MIOpen does not '
                          'support padded input yet.')

        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 20

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=batch,
                                                   test_samples=0,
                                                   input_shape=(timestep,
                                                                input_shape),
                                                   num_classes=rnn_state_size,
                                                   random_seed=87654321)
        y_train = np_utils.to_categorical(y_train, rnn_state_size)
        # For the last batch item of the test data, we filter out the last
        # timestep to simulate the variable length sequence and masking test.
        x_train[-2:, -1, :] = 0.0
        y_train[-2:] = 0

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=tf.float32)
        masked_input = keras.layers.Masking()(inputs)
        gru_layer = rnn_v1.GRU(rnn_state_size,
                               recurrent_activation='sigmoid',
                               reset_after=True)
        output = gru_layer(masked_input)
        gru_model = keras.models.Model(inputs, output)
        weights = gru_model.get_weights()
        y_1 = gru_model.predict(x_train)
        gru_model.compile('rmsprop', 'mse')
        gru_model.fit(x_train, y_train)
        y_2 = gru_model.predict(x_train)

        with testing_utils.device(should_use_gpu=True):
            cudnn_layer = rnn.GRU(rnn_state_size,
                                  recurrent_activation='sigmoid',
                                  reset_after=True)
            cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
        cudnn_model.set_weights(weights)
        y_3 = cudnn_model.predict(x_train)
        cudnn_model.compile('rmsprop', 'mse')
        cudnn_model.fit(x_train, y_train)
        y_4 = cudnn_model.predict(x_train)

        self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5)
        self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5)
Пример #13
0
    def test_sequential_save_and_pop(self):
        # Test the following sequence of actions:
        # - construct a Sequential model and train it
        # - save it
        # - load it
        # - pop its last layer and add a new layer instead
        # - continue training
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)
        model = keras.Sequential([
            keras.layers.Dense(16, activation='relu'),
            keras.layers.Dropout(0.1),
            keras.layers.Dense(y_train.shape[-1], activation='softmax')
        ])
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(x_train,
                  y_train,
                  epochs=1,
                  batch_size=10,
                  validation_data=(x_train, y_train),
                  verbose=2)
        model = self._save_and_reload_model(model)

        model.pop()
        model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        model = self._save_and_reload_model(model)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #14
0
def assert_regression_works(reg):
  np.random.seed(42)
  (x_train, y_train), (x_test, _) = testing_utils.get_test_data(
      train_samples=TRAIN_SAMPLES,
      test_samples=TEST_SAMPLES,
      input_shape=(INPUT_DIM,),
      num_classes=NUM_CLASSES)

  reg.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)

  score = reg.score(x_train, y_train, batch_size=BATCH_SIZE)
  assert np.isscalar(score) and np.isfinite(score)

  preds = reg.predict(x_test, batch_size=BATCH_SIZE)
  assert preds.shape == (TEST_SAMPLES,)
Пример #15
0
  def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with testing_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = testing_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_v1 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v1.compile(
          opt_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      model_v1.fit(x, y, batch_size=5, epochs=1)

      model_v2 = testing_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_v2.set_weights(model_v1.get_weights())
      model_v2.compile(
          opt_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=testing_utils.should_run_eagerly())
      if not tf.compat.v1.executing_eagerly_outside_functions():
        model_v2._make_train_function()
      if test_weights:
        opt_v2.set_weights(opt_v1.get_weights())

      hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
      self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
                          rtol=1e-5, atol=1e-5)
      self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
                          rtol=1e-5, atol=1e-5)
Пример #16
0
    def test_vector_classification_shared_model(self):
        # Test that Sequential models that feature internal updates
        # and internal losses can be shared.
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)

        base_model = testing_utils.get_model_from_layers(
            [
                keras.layers.Dense(
                    16,
                    activation='relu',
                    kernel_regularizer=keras.regularizers.l2(1e-5),
                    bias_regularizer=keras.regularizers.l2(1e-5)),
                keras.layers.BatchNormalization()
            ],
            input_shape=x_train.shape[1:])
        x = keras.layers.Input(x_train.shape[1:])
        y = base_model(x)
        y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
        model = keras.models.Model(x, y)
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertLen(model.losses, 2)
        if not tf.executing_eagerly():
            self.assertLen(model.get_updates_for(x), 2)
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #17
0
def assert_classification_works(clf):
  np.random.seed(42)
  (x_train, y_train), (x_test, _) = testing_utils.get_test_data(
      train_samples=TRAIN_SAMPLES,
      test_samples=TEST_SAMPLES,
      input_shape=(INPUT_DIM,),
      num_classes=NUM_CLASSES)

  clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)

  score = clf.score(x_train, y_train, batch_size=BATCH_SIZE)
  assert np.isscalar(score) and np.isfinite(score)

  preds = clf.predict(x_test, batch_size=BATCH_SIZE)
  assert preds.shape == (TEST_SAMPLES,)
  for prediction in np.unique(preds):
    assert prediction in range(NUM_CLASSES)

  proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE)
  assert proba.shape == (TEST_SAMPLES, NUM_CLASSES)
  assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES))
Пример #18
0
    def test_TensorBoard_with_ReduceLROnPlateau(self):
        with self.cached_session():
            temp_dir = self.get_temp_dir()
            self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            model = testing_utils.get_small_sequential_mlp(
                num_hidden=NUM_HIDDEN,
                num_classes=NUM_CLASSES,
                input_dim=INPUT_DIM)
            model.compile(loss='binary_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            cbks = [
                callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.5,
                                            patience=4,
                                            verbose=1),
                callbacks_v1.TensorBoard(log_dir=temp_dir)
            ]

            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            assert os.path.exists(temp_dir)
Пример #19
0
    def test_reset_after_GRU(self):
        num_samples = 2
        timesteps = 3
        embedding_dim = 4
        units = 2

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=num_samples,
                                                   test_samples=0,
                                                   input_shape=(timesteps,
                                                                embedding_dim),
                                                   num_classes=units)
        y_train = np_utils.to_categorical(y_train, units)

        inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
        gru_layer = keras.layers.GRU(units, reset_after=True)
        output = gru_layer(inputs)
        gru_model = keras.models.Model(inputs, output)
        gru_model.compile('rmsprop',
                          'mse',
                          run_eagerly=testing_utils.should_run_eagerly())
        gru_model.fit(x_train, y_train)
        gru_model.predict(x_train)
Пример #20
0
    def test_TensorBoard_multi_input_output(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        with tf.Graph().as_default(), self.cached_session():
            filepath = os.path.join(tmpdir, 'logs')

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            def data_generator(train):
                if train:
                    max_batch_index = len(x_train) // BATCH_SIZE
                else:
                    max_batch_index = len(x_test) // BATCH_SIZE
                i = 0
                while 1:
                    if train:
                        # simulate multi-input/output models
                        yield ([x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    else:
                        yield ([x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    i += 1
                    i %= max_batch_index

            inp1 = input_layer.Input((INPUT_DIM, ))
            inp2 = input_layer.Input((INPUT_DIM, ))
            inp = layers.add([inp1, inp2])
            hidden = layers.Dense(2, activation='relu')(inp)
            hidden = layers.Dropout(0.1)(hidden)
            output1 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
            output2 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
            model = training.Model([inp1, inp2], [output1, output2])
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            # we must generate new callbacks for each test, as they aren't stateless
            def callbacks_factory(histogram_freq):
                return [
                    callbacks_v1.TensorBoard(log_dir=filepath,
                                             histogram_freq=histogram_freq,
                                             write_images=True,
                                             write_grads=True,
                                             batch_size=5)
                ]

            # fit without validation data
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      callbacks=callbacks_factory(histogram_freq=0),
                      epochs=3)

            # fit with validation data and accuracy
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      validation_data=([x_test] * 2, [y_test] * 2),
                      callbacks=callbacks_factory(histogram_freq=1),
                      epochs=2)

            # fit generator without validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=callbacks_factory(histogram_freq=0))

            # fit generator with validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=([x_test] * 2, [y_test] * 2),
                                callbacks=callbacks_factory(histogram_freq=1))
            assert os.path.isdir(filepath)
Пример #21
0
    def test_Tensorboard_histogram_summaries_in_test_function(self):
        class FileWriterStub(object):
            def __init__(self, logdir, graph=None):
                self.logdir = logdir
                self.graph = graph
                self.steps_seen = []

            def add_summary(self, summary, global_step):
                summary_obj = tf.compat.v1.Summary()

                # ensure a valid Summary proto is being sent
                if isinstance(summary, bytes):
                    summary_obj.ParseFromString(summary)
                else:
                    assert isinstance(summary, tf.compat.v1.Summary)
                    summary_obj = summary

                # keep track of steps seen for the merged_summary op,
                # which contains the histogram summaries
                if len(summary_obj.value) > 1:
                    self.steps_seen.append(global_step)

            def flush(self):
                pass

            def close(self):
                pass

        def _init_writer(obj, _):
            obj.writer = FileWriterStub(obj.log_dir)

        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES)
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        with tf.Graph().as_default(), self.cached_session():
            model = sequential.Sequential()
            model.add(
                layers.Dense(NUM_HIDDEN,
                             input_dim=INPUT_DIM,
                             activation='relu'))
            # non_trainable_weights: moving_variance, moving_mean
            model.add(layers.BatchNormalization())
            model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            callbacks_v1.TensorBoard._init_writer = _init_writer
            tsb = callbacks_v1.TensorBoard(log_dir=tmpdir,
                                           histogram_freq=1,
                                           write_images=True,
                                           write_grads=True,
                                           batch_size=5)
            cbks = [tsb]

            # fit with validation data
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=3,
                      verbose=0)

            self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
Пример #22
0
  def test_time_major_and_go_backward(self, time_major, go_backwards):
    input_shape = 10
    rnn_state_size = 8
    timestep = 4
    batch = 100

    x_train = np.random.random((batch, timestep, input_shape))

    def build_model(layer_cls):
      inputs = keras.layers.Input(
          shape=[timestep, input_shape], dtype=tf.float32)
      layer = layer_cls(rnn_state_size,
                        recurrent_activation='sigmoid',
                        time_major=time_major,
                        return_sequences=True,
                        go_backwards=go_backwards)
      if time_major:
        converted_input = keras.layers.Lambda(
            lambda t: tf.compat.v1.transpose(t, [1, 0, 2]))(inputs)
        outputs = layer(converted_input)
        outputs = keras.layers.Lambda(
            lambda t: tf.compat.v1.transpose(t, [1, 0, 2]))(outputs)
      else:
        outputs = layer(inputs)
      return keras.models.Model(inputs, outputs)

    lstm_model = build_model(rnn_v1.LSTM)
    y_ref = lstm_model.predict(x_train)
    weights = lstm_model.get_weights()

    lstm_v2_model = build_model(rnn.LSTM)
    lstm_v2_model.set_weights(weights)
    y = lstm_v2_model.predict(x_train)

    self.assertAllClose(y, y_ref)

    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 10

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    y_train = np_utils.to_categorical(y_train, output_shape)

    layer = rnn.LSTM(rnn_state_size)

    inputs = keras.layers.Input(
        shape=[timestep, input_shape], dtype=tf.float32)

    outputs = layer(inputs)
    model = keras.models.Model(inputs, outputs)
    model.compile('rmsprop', loss='mse')
    model.fit(x_train, y_train, epochs=epoch)
    model.evaluate(x_train, y_train)
    model.predict(x_train)
Пример #23
0
    def _test_optimizer(self, optimizer, target=0.75):
        if tf.executing_eagerly():
            self.skipTest('v1 optimizer does not run in eager mode')
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000,
                                                            test_samples=200,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)
        model = _get_model(x_train.shape[1], 20, y_train.shape[1])
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 0)
        history = model.fit(x_train,
                            y_train,
                            epochs=2,
                            batch_size=16,
                            verbose=0)
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # 63 steps per epoch
        self.assertGreaterEqual(history.history['acc'][-1], target)
        config = keras.optimizers.serialize(optimizer)
        optim = keras.optimizers.deserialize(config)
        new_config = keras.optimizers.serialize(optim)
        new_config['class_name'] = new_config['class_name'].lower()
        new_config['config'].pop('name', None)
        if 'amsgrad' not in config['config']:
            new_config['config'].pop('amsgrad', None)
        if 'decay' in new_config['config'] and 'schedule_decay' in config[
                'config']:
            new_config['config']['schedule_decay'] = new_config['config'].pop(
                'decay')
        if 'momentum' not in config['config']:
            new_config['config'].pop('momentum', None)
        if 'centered' not in config['config']:
            new_config['config'].pop('centered', None)
        self.assertDictEqual(config, new_config)

        # Test constraints.
        model = keras.models.Sequential()
        dense = keras.layers.Dense(10,
                                   input_shape=(x_train.shape[1], ),
                                   kernel_constraint=lambda x: 0. * x + 1.,
                                   bias_constraint=lambda x: 0. * x + 2.,
                                   activation='relu')
        model.add(dense)
        model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'],
                      run_eagerly=testing_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # Using same optimizer from before
        model.train_on_batch(x_train[:10], y_train[:10])
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 127)
        kernel, bias = dense.get_weights()
        np.testing.assert_allclose(kernel, 1., atol=1e-3)
        np.testing.assert_allclose(bias, 2., atol=1e-3)
Пример #24
0
    def test_TensorBoard(self):
        np.random.seed(1337)

        temp_dir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES)
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        def data_generator(train):
            if train:
                max_batch_index = len(x_train) // BATCH_SIZE
            else:
                max_batch_index = len(x_test) // BATCH_SIZE
            i = 0
            while 1:
                if train:
                    yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                           y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
                else:
                    yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                           y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
                i += 1
                i %= max_batch_index

        # case: Sequential
        with tf.Graph().as_default(), self.cached_session():
            model = sequential.Sequential()
            model.add(
                layers.Dense(NUM_HIDDEN,
                             input_dim=INPUT_DIM,
                             activation='relu'))
            # non_trainable_weights: moving_variance, moving_mean
            model.add(layers.BatchNormalization())
            model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
            tsb = callbacks_v1.TensorBoard(log_dir=temp_dir,
                                           histogram_freq=1,
                                           write_images=True,
                                           write_grads=True,
                                           batch_size=5)
            cbks = [tsb]

            # fit with validation data
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=3,
                      verbose=0)

            # fit with validation data and accuracy
            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            # fit generator with validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=(x_test, y_test),
                                callbacks=cbks,
                                verbose=0)

            # fit generator without validation data
            # histogram_freq must be zero
            tsb.histogram_freq = 0
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=cbks,
                                verbose=0)

            # fit generator with validation data and accuracy
            tsb.histogram_freq = 1
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=(x_test, y_test),
                                callbacks=cbks,
                                verbose=0)

            # fit generator without validation data and accuracy
            tsb.histogram_freq = 0
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=cbks)
            assert os.path.exists(temp_dir)