Exemple #1
0
  def test_cudnnrnn_bidirectional(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True):
        rnn = keras.layers.CuDNNGRU
        samples = 2
        dim = 2
        timesteps = 2
        output_dim = 2
        mode = 'concat'

        x = np.random.random((samples, timesteps, dim))
        target_dim = 2 * output_dim if mode == 'concat' else output_dim
        y = np.random.random((samples, target_dim))

        # test with Sequential model
        model = keras.Sequential()
        model.add(
            keras.layers.Bidirectional(
                rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # test config
        model.get_config()
        model = keras.models.model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = keras.Sequential()
        model.add(
            keras.layers.Bidirectional(
                rnn(output_dim, return_sequences=True),
                merge_mode=mode,
                input_shape=(None, dim)))
        model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # test with functional API
        inputs = keras.Input((timesteps, dim))
        outputs = keras.layers.Bidirectional(
            rnn(output_dim), merge_mode=mode)(
                inputs)
        model = keras.Model(inputs, outputs)
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # Bidirectional and stateful
        inputs = keras.Input(batch_shape=(1, timesteps, dim))
        outputs = keras.layers.Bidirectional(
            rnn(output_dim, stateful=True), merge_mode=mode)(
                inputs)
        model = keras.Model(inputs, outputs)
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)
  def test_metrics_correctness_with_iterator(self):
    model = keras.Sequential()
    model.add(
        keras.layers.Dense(
            8, activation='relu', input_dim=4, kernel_initializer='ones'))
    model.add(
        keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones'))
    model.compile(
        loss='binary_crossentropy',
        metrics=['accuracy'],
        optimizer=RMSPropOptimizer(learning_rate=0.001))
    np.random.seed(123)
    x = np.random.randint(10, size=(100, 4)).astype(np.float32)
    y = np.random.randint(2, size=(100, 1)).astype(np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    outs = model.evaluate(iterator, steps=10)
    self.assertEqual(np.around(outs[1], decimals=1), 0.5)

    y = np.zeros((100, 1), dtype=np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    outs = model.evaluate(iterator, steps=10)
    self.assertEqual(outs[1], 0.)
  def test_generator_methods(self):
    model = keras.Sequential()
    model.add(keras.layers.Dense(4, input_shape=(3,)))
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    model.compile(optimizer, 'mse', metrics=['mae'])

    x = np.random.random((10, 3))
    y = np.random.random((10, 4))

    def iterator():
      while 1:
        yield x, y

    model.fit_generator(iterator(), steps_per_epoch=3, epochs=1)
    model.evaluate_generator(iterator(), steps=3)
    out = model.predict_generator(iterator(), steps=3)
    self.assertEqual(out.shape, (30, 4))
Exemple #4
0
 def test_loss_correctness(self):
   # Test that training loss is the same in eager and graph
   # (by comparing it to a reference value in a deterministic case)
   model = keras.Sequential()
   model.add(keras.layers.Dense(3,
                                activation='relu',
                                input_dim=4,
                                kernel_initializer='ones'))
   model.add(keras.layers.Dense(2,
                                activation='softmax',
                                kernel_initializer='ones'))
   model.compile(loss='sparse_categorical_crossentropy',
                 optimizer=RMSPropOptimizer(learning_rate=0.001))
   x = np.ones((100, 4))
   np.random.seed(123)
   y = np.random.randint(0, 1, size=(100, 1))
   history = model.fit(x, y, epochs=1, batch_size=10)
   self.assertEqual(
       np.around(history.history['loss'][-1], decimals=4), 0.6173)
 def test_metrics_correctness(self):
   model = keras.Sequential()
   model.add(keras.layers.Dense(3,
                                activation='relu',
                                input_dim=4,
                                kernel_initializer='ones'))
   model.add(keras.layers.Dense(1,
                                activation='sigmoid',
                                kernel_initializer='ones'))
   model.compile(loss='mae',
                 metrics=['acc'],
                 optimizer=RMSPropOptimizer(learning_rate=0.001))
   x = np.ones((100, 4))
   y = np.ones((100, 1))
   outs = model.evaluate(x, y)
   self.assertEqual(outs[1], 1.)
   y = np.zeros((100, 1))
   outs = model.evaluate(x, y)
   self.assertEqual(outs[1], 0.)
 def test_loss_correctness_with_iterator(self):
   # Test that training loss is the same in eager and graph
   # (by comparing it to a reference value in a deterministic case)
   model = keras.Sequential()
   model.add(
       keras.layers.Dense(
           3, activation='relu', input_dim=4, kernel_initializer='ones'))
   model.add(
       keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
   model.compile(
       loss='sparse_categorical_crossentropy',
       optimizer=RMSPropOptimizer(learning_rate=0.001))
   x = np.ones((100, 4), dtype=np.float32)
   np.random.seed(123)
   y = np.random.randint(0, 1, size=(100, 1))
   dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
   dataset = dataset.repeat(100)
   dataset = dataset.batch(10)
   iterator = dataset.make_one_shot_iterator()
   history = model.fit(iterator, epochs=1, steps_per_epoch=10)
   self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)