예제 #1
0
파일: lstm_test.py 프로젝트: ttigong/keras
    def test_lstm_v2_feature_parity_with_canonical_lstm(self):
        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 20

        (x_train,
         y_train), _ = test_utils.get_test_data(train_samples=batch,
                                                test_samples=0,
                                                input_shape=(timestep,
                                                             input_shape),
                                                num_classes=rnn_state_size,
                                                random_seed=87654321)
        y_train = np_utils.to_categorical(y_train, rnn_state_size)
        # For the last batch item of the test data, we filter out the last
        # timestep to simulate the variable length sequence and masking test.
        x_train[-2:, -1, :] = 0.0
        y_train[-2:] = 0

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=tf.float32)
        masked_input = keras.layers.Masking()(inputs)
        lstm_layer = lstm_v1.LSTM(rnn_state_size,
                                  recurrent_activation='sigmoid')
        output = lstm_layer(masked_input)
        lstm_model = keras.models.Model(inputs, output)
        weights = lstm_model.get_weights()
        y_1 = lstm_model.predict(x_train)
        lstm_model.compile('rmsprop', 'mse')
        lstm_model.fit(x_train, y_train)
        y_2 = lstm_model.predict(x_train)

        with test_utils.device(should_use_gpu=True):
            cudnn_layer = lstm.LSTM(rnn_state_size)
            cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
        cudnn_model.set_weights(weights)
        y_3 = cudnn_model.predict(x_train)
        cudnn_model.compile('rmsprop', 'mse')
        cudnn_model.fit(x_train, y_train)
        y_4 = cudnn_model.predict(x_train)

        self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
        self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
예제 #2
0
  def testNumericEquivalenceForAmsgrad(self):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with test_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = test_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = test_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = test_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())

      opt_k_v1 = optimizer_v1.Adam(amsgrad=True)
      opt_k_v2 = adam.Adam(amsgrad=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=test_utils.should_run_eagerly())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=test_utils.should_run_eagerly())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
예제 #3
0
    def test_vector_classification_shared_model(self):
        # Test that Sequential models that feature internal updates
        # and internal losses can be shared.
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(10, ),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        base_model = test_utils.get_model_from_layers(
            [
                keras.layers.Dense(
                    16,
                    activation='relu',
                    kernel_regularizer=keras.regularizers.l2(1e-5),
                    bias_regularizer=keras.regularizers.l2(1e-5)),
                keras.layers.BatchNormalization()
            ],
            input_shape=x_train.shape[1:])
        x = keras.layers.Input(x_train.shape[1:])
        y = base_model(x)
        y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
        model = keras.models.Model(x, y)
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=test_utils.should_run_eagerly())
        self.assertLen(model.losses, 2)
        if not tf.executing_eagerly():
            self.assertLen(model.get_updates_for(x), 2)
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
예제 #4
0
def assert_classification_works(clf):
  np.random.seed(42)
  (x_train, y_train), (x_test, _) = test_utils.get_test_data(
      train_samples=TRAIN_SAMPLES,
      test_samples=TEST_SAMPLES,
      input_shape=(INPUT_DIM,),
      num_classes=NUM_CLASSES)

  clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)

  score = clf.score(x_train, y_train, batch_size=BATCH_SIZE)
  assert np.isscalar(score) and np.isfinite(score)

  preds = clf.predict(x_test, batch_size=BATCH_SIZE)
  assert preds.shape == (TEST_SAMPLES,)
  for prediction in np.unique(preds):
    assert prediction in range(NUM_CLASSES)

  proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE)
  assert proba.shape == (TEST_SAMPLES, NUM_CLASSES)
  assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES))
예제 #5
0
    def test_image_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=100,
            test_samples=0,
            input_shape=(10, 10, 3),
            num_classes=2,
        )
        y_train = utils.to_categorical(y_train)

        layers = [
            keras.layers.Conv2D(4, 3, padding="same", activation="relu"),
            keras.layers.Conv2D(8, 3, padding="same"),
            keras.layers.BatchNormalization(),
            keras.layers.Conv2D(8, 3, padding="same"),
            keras.layers.Flatten(),
            keras.layers.Dense(y_train.shape[-1], activation="softmax"),
        ]
        model = test_utils.get_model_from_layers(
            layers, input_shape=x_train.shape[1:]
        )
        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        history = model.fit(
            x_train,
            y_train,
            epochs=10,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )
        self.assertGreater(history.history["val_acc"][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
예제 #6
0
    def test_serialization_v2_model(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(10, ),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        model = keras.Sequential([
            keras.layers.Flatten(input_shape=x_train.shape[1:]),
            keras.layers.Dense(10, activation=tf.nn.relu),
            # To mimic 'tf.nn.softmax' used in TF 2.x.
            keras.layers.Dense(y_train.shape[-1], activation=tf.math.softmax),
        ])

        # Check if 'softmax' is in model.get_config().
        last_layer_activation = model.get_layer(
            index=2).get_config()["activation"]
        self.assertEqual(last_layer_activation, "softmax")

        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["accuracy"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        model.fit(
            x_train,
            y_train,
            epochs=2,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )

        output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
        model.save(output_path, save_format="tf")
        loaded_model = keras.models.load_model(output_path)
        self.assertEqual(model.summary(), loaded_model.summary())
예제 #7
0
    def test_keras_model_with_gru(self):
        epoch = 10

        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=self.batch,
            test_samples=0,
            input_shape=(self.timestep, self.input_shape),
            num_classes=self.output_shape,
        )
        y_train = np_utils.to_categorical(y_train, self.output_shape)

        layer = keras.layers.GRU(self.rnn_state_size)

        inputs = keras.layers.Input(shape=[self.timestep, self.input_shape],
                                    dtype=tf.float32)

        outputs = layer(inputs)
        model = keras.models.Model(inputs, outputs)
        model.compile("rmsprop", loss="mse")
        model.fit(x_train, y_train, epochs=epoch)
        model.evaluate(x_train, y_train)
        model.predict(x_train)
예제 #8
0
    def test_TensorBoard_with_ReduceLROnPlateau(self):
        with self.cached_session():
            temp_dir = self.get_temp_dir()
            self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

            (x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            model = test_utils.get_small_sequential_mlp(
                num_hidden=NUM_HIDDEN,
                num_classes=NUM_CLASSES,
                input_dim=INPUT_DIM)
            model.compile(loss='binary_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            cbks = [
                callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.5,
                                            patience=4,
                                            verbose=1),
                callbacks_v1.TensorBoard(log_dir=temp_dir)
            ]

            model.fit(x_train,
                      y_train,
                      batch_size=BATCH_SIZE,
                      validation_data=(x_test, y_test),
                      callbacks=cbks,
                      epochs=2,
                      verbose=0)

            assert os.path.exists(temp_dir)
예제 #9
0
    def test_reset_after_gru(self):
        num_samples = 2
        timesteps = 3
        embedding_dim = 4
        units = 2

        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=num_samples,
            test_samples=0,
            input_shape=(timesteps, embedding_dim),
            num_classes=units,
        )
        y_train = np_utils.to_categorical(y_train, units)

        inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
        gru_layer = keras.layers.GRU(units, reset_after=True)
        output = gru_layer(inputs)
        gru_model = keras.models.Model(inputs, output)
        gru_model.compile("rmsprop",
                          "mse",
                          run_eagerly=test_utils.should_run_eagerly())
        gru_model.fit(x_train, y_train)
        gru_model.predict(x_train)
예제 #10
0
    def test_timeseries_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=100,
            test_samples=0,
            input_shape=(4, 10),
            num_classes=2,
        )
        y_train = utils.to_categorical(y_train)

        layers = [
            keras.layers.LSTM(5, return_sequences=True),
            keras.layers.GRU(y_train.shape[-1], activation="softmax"),
        ]
        model = test_utils.get_model_from_layers(
            layers, input_shape=x_train.shape[1:]
        )
        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        history = model.fit(
            x_train,
            y_train,
            epochs=15,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )
        self.assertGreater(history.history["val_acc"][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
예제 #11
0
    def test_timeseries_classification_sequential_tf_rnn(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(4, 10),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        with base_layer.keras_style_scope():
            model = keras.models.Sequential()
            model.add(
                keras.layers.RNN(legacy_cells.LSTMCell(5),
                                 return_sequences=True,
                                 input_shape=x_train.shape[1:]))
            model.add(
                keras.layers.RNN(
                    legacy_cells.GRUCell(y_train.shape[-1],
                                         activation='softmax',
                                         dtype=tf.float32)))
            model.compile(
                loss='categorical_crossentropy',
                optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
                metrics=['acc'],
                run_eagerly=test_utils.should_run_eagerly())

        history = model.fit(x_train,
                            y_train,
                            epochs=15,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
예제 #12
0
    def _test_runtime_with_model(self, model):
        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=self.batch,
            test_samples=0,
            input_shape=(self.timestep, self.input_shape),
            num_classes=self.output_shape,
        )
        y_train = np_utils.to_categorical(y_train, self.output_shape)

        model.compile(optimizer="sgd", loss=["categorical_crossentropy", None])

        existing_loss = 0
        for _ in range(self.epoch):
            history = model.fit(x_train, y_train)
            loss_value = history.history["loss"][0]

            self.assertNotEqual(existing_loss, loss_value)
            existing_loss = loss_value

        _, runtime_value = model.predict(x_train)
        if tf.test.is_gpu_available():
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
        else:
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
예제 #13
0
    def test_TensorBoard(self):
        np.random.seed(1337)

        temp_dir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

        (x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES,
        )
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        def data_generator(train):
            if train:
                max_batch_index = len(x_train) // BATCH_SIZE
            else:
                max_batch_index = len(x_test) // BATCH_SIZE
            i = 0
            while 1:
                if train:
                    yield (
                        x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                        y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                    )
                else:
                    yield (
                        x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                        y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                    )
                i += 1
                i %= max_batch_index

        # case: Sequential
        with tf.Graph().as_default(), self.cached_session():
            model = sequential.Sequential()
            model.add(
                layers.Dense(NUM_HIDDEN,
                             input_dim=INPUT_DIM,
                             activation="relu"))
            # non_trainable_weights: moving_variance, moving_mean
            model.add(layers.BatchNormalization())
            model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
            model.compile(
                loss="categorical_crossentropy",
                optimizer="sgd",
                metrics=["accuracy"],
            )
            tsb = callbacks_v1.TensorBoard(
                log_dir=temp_dir,
                histogram_freq=1,
                write_images=True,
                write_grads=True,
                batch_size=5,
            )
            cbks = [tsb]

            # fit with validation data
            model.fit(
                x_train,
                y_train,
                batch_size=BATCH_SIZE,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                epochs=3,
                verbose=0,
            )

            # fit with validation data and accuracy
            model.fit(
                x_train,
                y_train,
                batch_size=BATCH_SIZE,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                epochs=2,
                verbose=0,
            )

            # fit generator with validation data
            model.fit_generator(
                data_generator(True),
                len(x_train),
                epochs=2,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                verbose=0,
            )

            # fit generator without validation data
            # histogram_freq must be zero
            tsb.histogram_freq = 0
            model.fit_generator(
                data_generator(True),
                len(x_train),
                epochs=2,
                callbacks=cbks,
                verbose=0,
            )

            # fit generator with validation data and accuracy
            tsb.histogram_freq = 1
            model.fit_generator(
                data_generator(True),
                len(x_train),
                epochs=2,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                verbose=0,
            )

            # fit generator without validation data and accuracy
            tsb.histogram_freq = 0
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=cbks)
            assert os.path.exists(temp_dir)
예제 #14
0
    def test_Tensorboard_histogram_summaries_in_test_function(self):
        class FileWriterStub:
            def __init__(self, logdir, graph=None):
                self.logdir = logdir
                self.graph = graph
                self.steps_seen = []

            def add_summary(self, summary, global_step):
                summary_obj = tf.compat.v1.Summary()

                # ensure a valid Summary proto is being sent
                if isinstance(summary, bytes):
                    summary_obj.ParseFromString(summary)
                else:
                    assert isinstance(summary, tf.compat.v1.Summary)
                    summary_obj = summary

                # keep track of steps seen for the merged_summary op,
                # which contains the histogram summaries
                if len(summary_obj.value) > 1:
                    self.steps_seen.append(global_step)

            def flush(self):
                pass

            def close(self):
                pass

        def _init_writer(obj, _):
            obj.writer = FileWriterStub(obj.log_dir)

        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
        (x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
            train_samples=TRAIN_SAMPLES,
            test_samples=TEST_SAMPLES,
            input_shape=(INPUT_DIM, ),
            num_classes=NUM_CLASSES,
        )
        y_test = np_utils.to_categorical(y_test)
        y_train = np_utils.to_categorical(y_train)

        with tf.Graph().as_default(), self.cached_session():
            model = sequential.Sequential()
            model.add(
                layers.Dense(NUM_HIDDEN,
                             input_dim=INPUT_DIM,
                             activation="relu"))
            # non_trainable_weights: moving_variance, moving_mean
            model.add(layers.BatchNormalization())
            model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
            model.compile(
                loss="categorical_crossentropy",
                optimizer="sgd",
                metrics=["accuracy"],
            )
            callbacks_v1.TensorBoard._init_writer = _init_writer
            tsb = callbacks_v1.TensorBoard(
                log_dir=tmpdir,
                histogram_freq=1,
                write_images=True,
                write_grads=True,
                batch_size=5,
            )
            cbks = [tsb]

            # fit with validation data
            model.fit(
                x_train,
                y_train,
                batch_size=BATCH_SIZE,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                epochs=3,
                verbose=0,
            )

            self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
예제 #15
0
    def test_TensorBoard_multi_input_output(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        with tf.Graph().as_default(), self.cached_session():
            filepath = os.path.join(tmpdir, "logs")

            (x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES,
            )
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            def data_generator(train):
                if train:
                    max_batch_index = len(x_train) // BATCH_SIZE
                else:
                    max_batch_index = len(x_test) // BATCH_SIZE
                i = 0
                while 1:
                    if train:
                        # simulate multi-input/output models
                        yield (
                            [x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] * 2,
                            [y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] * 2,
                        )
                    else:
                        yield (
                            [x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] * 2,
                            [y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] * 2,
                        )
                    i += 1
                    i %= max_batch_index

            inp1 = input_layer.Input((INPUT_DIM, ))
            inp2 = input_layer.Input((INPUT_DIM, ))
            inp = layers.add([inp1, inp2])
            hidden = layers.Dense(2, activation="relu")(inp)
            hidden = layers.Dropout(0.1)(hidden)
            output1 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
            output2 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
            model = training.Model([inp1, inp2], [output1, output2])
            model.compile(
                loss="categorical_crossentropy",
                optimizer="sgd",
                metrics=["accuracy"],
            )

            # we must generate new callbacks for each test, as they aren't stateless
            def callbacks_factory(histogram_freq):
                return [
                    callbacks_v1.TensorBoard(
                        log_dir=filepath,
                        histogram_freq=histogram_freq,
                        write_images=True,
                        write_grads=True,
                        batch_size=5,
                    )
                ]

            # fit without validation data
            model.fit(
                [x_train] * 2,
                [y_train] * 2,
                batch_size=BATCH_SIZE,
                callbacks=callbacks_factory(histogram_freq=0),
                epochs=3,
            )

            # fit with validation data and accuracy
            model.fit(
                [x_train] * 2,
                [y_train] * 2,
                batch_size=BATCH_SIZE,
                validation_data=([x_test] * 2, [y_test] * 2),
                callbacks=callbacks_factory(histogram_freq=1),
                epochs=2,
            )

            # fit generator without validation data
            model.fit_generator(
                data_generator(True),
                len(x_train),
                epochs=2,
                callbacks=callbacks_factory(histogram_freq=0),
            )

            # fit generator with validation data and accuracy
            model.fit_generator(
                data_generator(True),
                len(x_train),
                epochs=2,
                validation_data=([x_test] * 2, [y_test] * 2),
                callbacks=callbacks_factory(histogram_freq=1),
            )
            assert os.path.isdir(filepath)
예제 #16
0
    def _test_optimizer(self, optimizer, target=0.75):
        if tf.executing_eagerly():
            self.skipTest('v1 optimizer does not run in eager mode')
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=1000,
                                                         test_samples=200,
                                                         input_shape=(10, ),
                                                         num_classes=2)
        y_train = np_utils.to_categorical(y_train)
        model = _get_model(x_train.shape[1], 20, y_train.shape[1])
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'],
                      run_eagerly=test_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 0)
        history = model.fit(x_train,
                            y_train,
                            epochs=2,
                            batch_size=16,
                            verbose=0)
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # 63 steps per epoch
        self.assertGreaterEqual(history.history['acc'][-1], target)
        config = keras.optimizers.serialize(optimizer)
        optim = keras.optimizers.deserialize(config)
        new_config = keras.optimizers.serialize(optim)
        new_config['class_name'] = new_config['class_name'].lower()
        new_config['config'].pop('name', None)
        if 'amsgrad' not in config['config']:
            new_config['config'].pop('amsgrad', None)
        if 'decay' in new_config['config'] and 'schedule_decay' in config[
                'config']:
            new_config['config']['schedule_decay'] = new_config['config'].pop(
                'decay')
        if 'momentum' not in config['config']:
            new_config['config'].pop('momentum', None)
        if 'centered' not in config['config']:
            new_config['config'].pop('centered', None)
        self.assertDictEqual(config, new_config)

        # Test constraints.
        model = keras.models.Sequential()
        dense = keras.layers.Dense(10,
                                   input_shape=(x_train.shape[1], ),
                                   kernel_constraint=lambda x: 0. * x + 1.,
                                   bias_constraint=lambda x: 0. * x + 2.,
                                   activation='relu')
        model.add(dense)
        model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'],
                      run_eagerly=test_utils.should_run_eagerly())
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # Using same optimizer from before
        model.train_on_batch(x_train[:10], y_train[:10])
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 127)
        kernel, bias = dense.get_weights()
        np.testing.assert_allclose(kernel, 1., atol=1e-3)
        np.testing.assert_allclose(bias, 2., atol=1e-3)
예제 #17
0
    def test_time_major_and_go_backward(self, time_major, go_backwards):
        input_shape = 10
        rnn_state_size = 8
        timestep = 4
        batch = 100

        x_train = np.random.random((batch, timestep, input_shape))

        def build_model(layer_cls):
            inputs = keras.layers.Input(shape=[timestep, input_shape],
                                        dtype=tf.float32)
            layer = layer_cls(rnn_state_size,
                              recurrent_activation='sigmoid',
                              time_major=time_major,
                              return_sequences=True,
                              go_backwards=go_backwards)
            if time_major:
                converted_input = keras.layers.Lambda(
                    lambda t: tf.transpose(t, [1, 0, 2]))(inputs)
                outputs = layer(converted_input)
                outputs = keras.layers.Lambda(
                    lambda t: tf.transpose(t, [1, 0, 2]))(outputs)
            else:
                outputs = layer(inputs)
            return keras.models.Model(inputs, outputs)

        lstm_model = build_model(rnn_v1.LSTM)
        y_ref = lstm_model.predict(x_train)
        weights = lstm_model.get_weights()

        lstm_v2_model = build_model(rnn.LSTM)
        lstm_v2_model.set_weights(weights)
        y = lstm_v2_model.predict(x_train)

        self.assertAllClose(y, y_ref)

        input_shape = 10
        rnn_state_size = 8
        output_shape = 8
        timestep = 4
        batch = 100
        epoch = 10

        (x_train,
         y_train), _ = test_utils.get_test_data(train_samples=batch,
                                                test_samples=0,
                                                input_shape=(timestep,
                                                             input_shape),
                                                num_classes=output_shape)
        y_train = np_utils.to_categorical(y_train, output_shape)

        layer = rnn.LSTM(rnn_state_size)

        inputs = keras.layers.Input(shape=[timestep, input_shape],
                                    dtype=tf.float32)

        outputs = layer(inputs)
        model = keras.models.Model(inputs, outputs)
        model.compile('rmsprop', loss='mse')
        model.fit(x_train, y_train, epochs=epoch)
        model.evaluate(x_train, y_train)
        model.predict(x_train)
예제 #18
0
    def _test_optimizer(self, optimizer, target=0.75):
        if tf.executing_eagerly():
            self.skipTest("v1 optimizer does not run in eager mode")
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=1000,
            test_samples=200,
            input_shape=(10, ),
            num_classes=2,
        )
        y_train = np_utils.to_categorical(y_train)
        model = _get_model(x_train.shape[1], 20, y_train.shape[1])
        model.compile(
            loss="categorical_crossentropy",
            optimizer=optimizer,
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 0)
        history = model.fit(x_train,
                            y_train,
                            epochs=2,
                            batch_size=16,
                            verbose=0)
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # 63 steps per epoch
        self.assertGreaterEqual(history.history["acc"][-1], target)
        config = keras.optimizers.serialize(optimizer)
        optim = keras.optimizers.deserialize(config)
        new_config = keras.optimizers.serialize(optim)
        new_config["class_name"] = new_config["class_name"].lower()
        new_config["config"].pop("name", None)
        if "amsgrad" not in config["config"]:
            new_config["config"].pop("amsgrad", None)
        if ("decay" in new_config["config"]
                and "schedule_decay" in config["config"]):
            new_config["config"]["schedule_decay"] = new_config["config"].pop(
                "decay")
        if "momentum" not in config["config"]:
            new_config["config"].pop("momentum", None)
        if "centered" not in config["config"]:
            new_config["config"].pop("centered", None)
        self.assertDictEqual(config, new_config)

        # Test constraints.
        model = keras.models.Sequential()
        dense = keras.layers.Dense(
            10,
            input_shape=(x_train.shape[1], ),
            kernel_constraint=lambda x: 0.0 * x + 1.0,
            bias_constraint=lambda x: 0.0 * x + 2.0,
            activation="relu",
        )
        model.add(dense)
        model.add(keras.layers.Dense(y_train.shape[1], activation="softmax"))
        model.compile(
            loss="categorical_crossentropy",
            optimizer=optimizer,
            metrics=["accuracy"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations),
            126)  # Using same optimizer from before
        model.train_on_batch(x_train[:10], y_train[:10])
        np.testing.assert_equal(
            keras.backend.get_value(model.optimizer.iterations), 127)
        kernel, bias = dense.get_weights()
        np.testing.assert_allclose(kernel, 1.0, atol=1e-3)
        np.testing.assert_allclose(bias, 2.0, atol=1e-3)