Exemple #1
0
    def test_generator_methods(self):
        model = keras.Sequential()
        model.add(keras.layers.Dense(4, input_shape=(3, )))
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        model.compile(optimizer,
                      'mse',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        x = np.random.random((10, 3))
        y = np.random.random((10, 4))

        def iterator():
            while True:
                yield x, y

        model.fit_generator(iterator(), steps_per_epoch=3, epochs=1)
        model.evaluate_generator(iterator(), steps=3)
        out = model.predict_generator(iterator(), steps=3)
        self.assertEqual(out.shape, (30, 4))
Exemple #2
0
    def test_subclass_nested_in_graph(self):
        num_classes = 2
        num_samples = 100
        input_dim = 50

        model = get_nested_model_3(input_dim=input_dim,
                                   num_classes=num_classes)
        model.compile(loss='mse',
                      optimizer=RMSPropOptimizer(learning_rate=0.001),
                      metrics=['acc'])

        x = np.ones((num_samples, input_dim))
        y = np.zeros((num_samples, num_classes))

        model.fit(x, y, epochs=2, batch_size=32, verbose=0)
        _ = model.evaluate(x, y, verbose=0)

        self.assertEqual(len(model.weights), 16)
        self.assertEqual(len(model.non_trainable_weights), 4)
        self.assertEqual(len(model.trainable_weights), 12)
Exemple #3
0
  def test_iterators_running_out_of_data(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics,
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(2)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)

    with test.mock.patch.object(logging, 'warning') as mock_log:
      model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
      self.assertRegexpMatches(
          str(mock_log.call_args),
          'dataset iterator ran out of data')
def keras_model_fn(hyperparameters):
    model = Sequential()

    model.add(Conv2D(64, kernel_size=(3, 3), input_shape=(HEIGHT, WIDTH, DEPTH), activation="relu", name="inputs",
                     padding="same"))
    model.add(MaxPooling2D())

    model.add(Conv2D(64, kernel_size=(3, 3), activation="relu", padding="same"))
    model.add(MaxPooling2D())

    model.add(Conv2D(96, kernel_size=(3, 3), activation="relu", padding="same"))
    model.add(Flatten())

    model.add(Dense(256, activation="relu"))
    model.add(Dense(2, activation="softmax"))

    opt = RMSPropOptimizer(learning_rate=hyperparameters['learning_rate'], decay=hyperparameters['decay'])

    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
    return model
Exemple #5
0
  def test_timedistributed_dense(self):
    model = keras.models.Sequential()
    model.add(
        keras.layers.TimeDistributed(
            keras.layers.Dense(2), input_shape=(3, 4)))
    model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
    model.fit(
        np.random.random((10, 3, 4)),
        np.random.random((10, 3, 2)),
        epochs=1,
        batch_size=10)

    # test config
    model.get_config()

    # check whether the model variables are present in the
    # checkpointable list of objects
    checkpointed_objects = set(checkpointable_util.list_objects(model))
    for v in model.variables:
      self.assertIn(v, checkpointed_objects)
Exemple #6
0
    def test_generator_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        def custom_generator():
            while True:
                yield np.ones([10, 10], np.float32), np.ones([10, 1],
                                                             np.float32)

        inputs = keras.layers.Input(shape=(10, ))
        x = keras.layers.Dense(10, activation='relu')(inputs)
        outputs = keras.layers.Dense(1, activation='sigmoid')(x)
        model = keras.Model(inputs, outputs)

        model.compile(RMSPropOptimizer(0.001), 'binary_crossentropy')
        model.fit(custom_generator(),
                  steps_per_epoch=2,
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(custom_generator(), steps=2)
        model.predict(custom_generator(), steps=2)
 def test_loss_correctness(self):
     # Test that training loss is the same in eager and graph
     # (by comparing it to a reference value in a deterministic case)
     model = keras.Sequential()
     model.add(
         keras.layers.Dense(3,
                            activation='relu',
                            input_dim=4,
                            kernel_initializer='ones'))
     model.add(
         keras.layers.Dense(2,
                            activation='softmax',
                            kernel_initializer='ones'))
     model.compile(loss='sparse_categorical_crossentropy',
                   optimizer=RMSPropOptimizer(learning_rate=0.001))
     x = np.ones((100, 4))
     np.random.seed(123)
     y = np.random.randint(0, 1, size=(100, 1))
     history = model.fit(x, y, epochs=1, batch_size=10)
     self.assertAlmostEqual(history.history['loss'][-1], 0.6173, 4)
    def test_saving(self):

        num_classes = (2, 3)
        num_samples = 100
        input_dim = 50

        x1 = np.ones((num_samples, input_dim))
        x2 = np.ones((num_samples, input_dim))
        y1 = np.zeros((num_samples, num_classes[0]))
        y2 = np.zeros((num_samples, num_classes[1]))

        model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
        model.compile(loss='mse',
                      optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
        y_ref_1, y_ref_2 = model.predict([x1, x2])

        tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
        model.save_weights(tf_format_name)
        if h5py is not None:
            hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
            model.save_weights(hdf5_format_name)

        model = MultiIOTestModel(num_classes=num_classes, use_bn=True)

        if h5py is not None:
            with self.assertRaises(ValueError):
                model.load_weights(hdf5_format_name)

        model.load_weights(tf_format_name)

        y1, y2 = model.predict([x1, x2])
        self.assertAllClose(y_ref_1, y1, atol=1e-5)
        self.assertAllClose(y_ref_2, y2, atol=1e-5)

        if h5py is not None:
            model.load_weights(hdf5_format_name)

            y1, y2 = model.predict([x1, x2])
            self.assertAllClose(y_ref_1, y1, atol=1e-5)
            self.assertAllClose(y_ref_2, y2, atol=1e-5)
 def test_loss_correctness_with_iterator(self):
   # Test that training loss is the same in eager and graph
   # (by comparing it to a reference value in a deterministic case)
   model = keras.Sequential()
   model.add(
       keras.layers.Dense(
           3, activation='relu', input_dim=4, kernel_initializer='ones'))
   model.add(
       keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
   model.compile(
       loss='sparse_categorical_crossentropy',
       optimizer=RMSPropOptimizer(learning_rate=0.001))
   x = np.ones((100, 4), dtype=np.float32)
   np.random.seed(123)
   y = np.random.randint(0, 1, size=(100, 1))
   dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
   dataset = dataset.repeat(100)
   dataset = dataset.batch(10)
   iterator = dataset.make_one_shot_iterator()
   history = model.fit(iterator, epochs=1, steps_per_epoch=10)
   self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
    def test_single_io_workflow_with_dataset_iterators(self):
        num_classes = 2
        num_samples = 10
        input_dim = 50

        with self.test_session():
            model = SimpleTestModel(num_classes=num_classes,
                                    use_dp=True,
                                    use_bn=True)
            model.compile(loss='mse',
                          optimizer=RMSPropOptimizer(learning_rate=0.001))

            x = np.ones((num_samples, input_dim))
            y = np.zeros((num_samples, num_classes))
            dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
            dataset = dataset.repeat(100)
            dataset = dataset.batch(10)
            iterator = dataset.make_one_shot_iterator()

            model.fit(iterator, epochs=2, steps_per_epoch=10, verbose=0)
            _ = model.evaluate(iterator, steps=10, verbose=0)
Exemple #12
0
  def test_specify_state_with_masking(self):
    num_states = 2
    timesteps = 3
    embedding_dim = 4
    units = 3
    num_samples = 2

    inputs = keras.Input((timesteps, embedding_dim))
    _ = keras.layers.Masking()(inputs)
    initial_state = [keras.Input((units,)) for _ in range(num_states)]
    output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)

    model = keras.models.Model([inputs] + initial_state, output)
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSPropOptimizer(0.01))

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_state = [np.random.random((num_samples, units))
                     for _ in range(num_states)]
    targets = np.random.random((num_samples, units))
    model.train_on_batch([inputs] + initial_state, targets)
Exemple #13
0
  def test_model_methods_with_eager_tensors_single_io(self):
    x = keras.layers.Input(shape=(3,), name='input')
    y = keras.layers.Dense(4, name='dense')(x)
    model = keras.Model(x, y)

    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = keras.backend.zeros(shape=(10, 3))
    targets = keras.backend.zeros(shape=(10, 4))

    model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
    model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
    model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
              validation_data=(inputs, targets))
    model.evaluate(inputs, targets, batch_size=2, verbose=0)
    model.predict(inputs, batch_size=2)
    model.train_on_batch(inputs, targets)
    model.test_on_batch(inputs, targets)
    def test_generator_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        def ones_generator():
            while True:
                yield np.ones([10, 10], np.float32), np.ones([10, 1],
                                                             np.float32)

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(RMSPropOptimizer(0.001),
                      'binary_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(ones_generator(),
                  steps_per_epoch=2,
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(ones_generator(), steps=2)
        model.predict(ones_generator(), steps=2)
  def test_calling_model_on_same_dataset(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    # Call fit with validation data
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
    # Finalize the graph to make sure new ops aren't added when calling on the
    # same dataset
    ops.get_default_graph().finalize()
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
 def test_metrics_correctness(self):
     model = keras.Sequential()
     model.add(
         keras.layers.Dense(3,
                            activation='relu',
                            input_dim=4,
                            kernel_initializer='ones'))
     model.add(
         keras.layers.Dense(1,
                            activation='sigmoid',
                            kernel_initializer='ones'))
     model.compile(loss='mae',
                   metrics=['acc'],
                   optimizer=RMSPropOptimizer(learning_rate=0.001))
     x = np.ones((100, 4))
     y = np.ones((100, 1))
     outs = model.evaluate(x, y)
     self.assertEqual(outs[1], 1.)
     y = np.zeros((100, 1))
     outs = model.evaluate(x, y)
     self.assertEqual(outs[1], 0.)
def keras_model_fn(hyperparameters):
    """keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
    The model will be transformed into a TensorFlow Estimator before training and it will be saved in a 
    TensorFlow Serving SavedModel at the end of training.

    Args:
        hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow 
                         training script.
    Returns: A compiled Keras model
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))
    
    opt = RMSPropOptimizer(learning_rate=hyperparameters['learning_rate'], decay=hyperparameters['decay'])

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    return model
Exemple #18
0
def keras_model_fn(hyperparameters):
    
    model = Sequential()

    model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))
    
    opt = RMSPropOptimizer(learning_rate=hyperparameters['learning_rate'], decay=hyperparameters['decay'])

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    return model
def get_model(
    implementation,
    filters,
    kernel_size,
    strides,
    layers,
    num_classes,
    data_format,
):
    model = keras.Sequential()

    if len(kernel_size) == 1:
        lc_layer = keras.layers.LocallyConnected1D
    elif len(kernel_size) == 2:
        lc_layer = keras.layers.LocallyConnected2D
    else:
        raise NotImplementedError(kernel_size)

    for _ in range(layers):
        model.add(
            lc_layer(
                padding="valid",
                kernel_initializer=keras.initializers.random_normal(),
                bias_initializer=keras.initializers.random_normal(),
                filters=filters,
                strides=strides,
                kernel_size=kernel_size,
                activation=keras.activations.relu,
                data_format=data_format,
                implementation=implementation,
            ))

    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(num_classes))
    model.compile(
        optimizer=RMSPropOptimizer(0.01),
        metrics=[keras.metrics.categorical_accuracy],
        loss=keras.losses.CategoricalCrossentropy(from_logits=True),
    )
    return model
    def test_get_next_op_created_once(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        # Finalize graph to make sure we are not appending another iterator
        # get_next op in the graph.
        ops.get_default_graph().finalize()
        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    def test_multi_io_workflow_with_tensors(self):
        num_classes = (2, 3)
        num_samples = 10
        input_dim = 50

        with self.cached_session():
            model = MultiIOTestModel(num_classes=num_classes,
                                     use_dp=True,
                                     use_bn=True)
            model.compile(loss='mse',
                          optimizer=RMSPropOptimizer(learning_rate=0.001))

            x1 = array_ops.ones((num_samples, input_dim))
            x2 = array_ops.ones((num_samples, input_dim))
            y1 = array_ops.zeros((num_samples, num_classes[0]))
            y2 = array_ops.zeros((num_samples, num_classes[1]))

            model.fit([x1, x2], [y1, y2],
                      epochs=2,
                      steps_per_epoch=10,
                      verbose=0)
            _ = model.evaluate(steps=10, verbose=0)
  def test_inference_methods(self):
    # test predict, evaluate, test_on_batch, predict_on_batch
    # on different input types: list, dict
    num_classes = (2, 3)
    num_samples = 100
    input_dim = 50

    x1 = np.ones((num_samples, input_dim))
    x2 = np.ones((num_samples, input_dim))
    y1 = np.zeros((num_samples, num_classes[0]))
    y2 = np.zeros((num_samples, num_classes[1]))

    model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
    model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
    model.evaluate([x1, x2], [y1, y2])
    model.test_on_batch([x1, x2], [y1, y2])

    model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
    model.predict([x1, x2])

    model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
    model.predict_on_batch([x1, x2])
Exemple #23
0
  def test_graph_nested_in_subclass(self):
    num_classes = 2
    num_samples = 100
    input_dim = 50

    with self.test_session():
      model = NestedTestModel2(num_classes=num_classes)
      model.compile(loss='mse',
                    optimizer=RMSPropOptimizer(learning_rate=0.001),
                    metrics=['acc'])

      x = np.ones((num_samples, input_dim))
      y = np.zeros((num_samples, num_classes))

      model.fit(x, y, epochs=2, batch_size=32, verbose=0)
      _ = model.evaluate(x, y, verbose=0)

      self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
      self.assertEqual(len(model.non_trainable_weights),
                       2 + len(model.test_net.non_trainable_weights))
      self.assertEqual(len(model.trainable_weights),
                       6 + len(model.test_net.trainable_weights))
  def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
    num_classes = (2, 3)
    num_samples = 1000
    input_dim = 50

    with self.cached_session():
      model = MultiIOTestModel(num_classes=num_classes,
                               use_dp=True,
                               use_bn=True)
      model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))

      x1 = np.ones((num_samples, input_dim))
      x2 = np.ones((num_samples, input_dim))
      y1 = np.zeros((num_samples, num_classes[0]))
      y2 = np.zeros((num_samples, num_classes[1]))

      x2_placeholder = array_ops.placeholder(
          dtype='float32', shape=(None, input_dim))
      model._set_inputs([x1, x2_placeholder])

      model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
      _ = model.evaluate([x1, x2], [y1, y2], verbose=0)
    def test_bidirectional(self):
        rnn = keras.layers.SimpleRNN
        samples = 2
        dim = 2
        timesteps = 2
        output_dim = 2
        with self.cached_session():
            for mode in ['sum', 'concat', 'ave', 'mul']:
                x = np.random.random((samples, timesteps, dim))
                target_dim = 2 * output_dim if mode == 'concat' else output_dim
                y = np.random.random((samples, target_dim))

                # test with Sequential model
                model = keras.models.Sequential()
                model.add(
                    keras.layers.Bidirectional(rnn(output_dim),
                                               merge_mode=mode,
                                               input_shape=(timesteps, dim)))
                model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
                model.fit(x, y, epochs=1, batch_size=1)

                # check whether the model variables are present in the
                # checkpointable list of objects
                checkpointed_objects = set(
                    checkpointable_util.list_objects(model))
                for v in model.variables:
                    self.assertIn(v, checkpointed_objects)

                # test compute output shape
                ref_shape = model.layers[-1].output.get_shape()
                shape = model.layers[-1].compute_output_shape(
                    (None, timesteps, dim))
                self.assertListEqual(shape.as_list(), ref_shape.as_list())

                # test config
                model.get_config()
                model = keras.models.model_from_json(model.to_json())
                model.summary()
Exemple #26
0
    def test_reset_after_GRU(self):
        num_samples = 2
        timesteps = 3
        embedding_dim = 4
        units = 2

        (x_train,
         y_train), _ = testing_utils.get_test_data(train_samples=num_samples,
                                                   test_samples=0,
                                                   input_shape=(timesteps,
                                                                embedding_dim),
                                                   num_classes=units)
        y_train = keras.utils.to_categorical(y_train, units)

        inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
        gru_layer = keras.layers.GRU(units, reset_after=True)
        output = gru_layer(inputs)
        gru_model = keras.models.Model(inputs, output)
        gru_model.compile(RMSPropOptimizer(0.01),
                          'mse',
                          run_eagerly=testing_utils.should_run_eagerly())
        gru_model.fit(x_train, y_train)
        gru_model.predict(x_train)
    def test_metrics_correctness_with_iterator(self):
        layers = [
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'),
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones')
        ]

        model = testing_utils.get_model_from_layers(layers, (4, ))

        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer=RMSPropOptimizer(learning_rate=0.001),
                      run_eagerly=testing_utils.should_run_eagerly())

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
    def test_calling_model_on_same_dataset(self):
        if ((not testing_utils.should_run_eagerly())
                and testing_utils.get_model_type() == 'subclass'
                and context.executing_eagerly()):
            self.skipTest('b/120673224')

        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        # Call fit with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
        # Finalize the graph to make sure new ops aren't added when calling on the
        # same dataset
        ops.get_default_graph().finalize()
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
    def test_training_and_eval_methods_on_iterators_single_io(self, model):
        if model == 'functional':
            model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        elif model == 'subclass':
            model = testing_utils.get_small_sequential_mlp(1, 4)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(iterator, steps=2, verbose=1)
        model.predict(iterator, steps=2)

        # Test with validation data
        model.fit(iterator,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=iterator,
                  validation_steps=2)
        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(iterator,
                      iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError,
                'you should specify the `steps_per_epoch` argument'):
            model.fit(iterator, epochs=1, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.evaluate(iterator, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.predict(iterator, verbose=0)
def layer_test(layer_cls,
               kwargs=None,
               input_shape=None,
               input_dtype=None,
               input_data=None,
               expected_output=None,
               expected_output_dtype=None):
    """Test routine for a layer with a single input and single output.

  Arguments:
    layer_cls: Layer class object.
    kwargs: Optional dictionary of keyword arguments for instantiating the
      layer.
    input_shape: Input shape tuple.
    input_dtype: Data type of the input data.
    input_data: Numpy array of input data.
    expected_output: Shape tuple for the expected shape of the output.
    expected_output_dtype: Data type expected for the output.

  Returns:
    The output data (Numpy array) returned by the layer, for additional
    checks to be done by the calling code.
  """
    if input_data is None:
        assert input_shape
        if not input_dtype:
            input_dtype = 'float32'
        input_data_shape = list(input_shape)
        for i, e in enumerate(input_data_shape):
            if e is None:
                input_data_shape[i] = np.random.randint(1, 4)
        input_data = 10 * np.random.random(input_data_shape)
        if input_dtype[:5] == 'float':
            input_data -= 0.5
        input_data = input_data.astype(input_dtype)
    elif input_shape is None:
        input_shape = input_data.shape
    if input_dtype is None:
        input_dtype = input_data.dtype
    if expected_output_dtype is None:
        expected_output_dtype = input_dtype

    # instantiation
    kwargs = kwargs or {}
    layer = layer_cls(**kwargs)

    # test get_weights , set_weights at layer level
    weights = layer.get_weights()
    layer.set_weights(weights)

    # test and instantiation from weights
    if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
        kwargs['weights'] = weights
        layer = layer_cls(**kwargs)

    # test in functional API
    x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
    y = layer(x)
    if keras.backend.dtype(y) != expected_output_dtype:
        raise AssertionError(
            'When testing layer %s, for input %s, found output '
            'dtype=%s but expected to find %s.\nFull kwargs: %s' %
            (layer_cls.__name__, x, keras.backend.dtype(y),
             expected_output_dtype, kwargs))
    # check shape inference
    model = keras.models.Model(x, y)
    expected_output_shape = tuple(
        layer.compute_output_shape(
            tensor_shape.TensorShape(input_shape)).as_list())
    actual_output = model.predict(input_data)
    actual_output_shape = actual_output.shape
    for expected_dim, actual_dim in zip(expected_output_shape,
                                        actual_output_shape):
        if expected_dim is not None:
            if expected_dim != actual_dim:
                raise AssertionError(
                    'When testing layer %s, for input %s, found output_shape='
                    '%s but expected to find %s.\nFull kwargs: %s' %
                    (layer_cls.__name__, x, actual_output_shape,
                     expected_output_shape, kwargs))
    if expected_output is not None:
        np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)

    # test serialization, weight setting at model level
    model_config = model.get_config()
    recovered_model = keras.models.Model.from_config(model_config)
    if model.weights:
        weights = model.get_weights()
        recovered_model.set_weights(weights)
        output = recovered_model.predict(input_data)
        np.testing.assert_allclose(output, actual_output, rtol=1e-3)

    # test training mode (e.g. useful for dropout tests)
    model.compile(RMSPropOptimizer(0.01), 'mse')
    model.train_on_batch(input_data, actual_output)

    # test as first layer in Sequential API
    layer_config = layer.get_config()
    layer_config['batch_input_shape'] = input_shape
    layer = layer.__class__.from_config(layer_config)

    model = keras.models.Sequential()
    model.add(layer)
    actual_output = model.predict(input_data)
    actual_output_shape = actual_output.shape
    for expected_dim, actual_dim in zip(expected_output_shape,
                                        actual_output_shape):
        if expected_dim is not None:
            if expected_dim != actual_dim:
                raise AssertionError(
                    'When testing layer %s, for input %s, found output_shape='
                    '%s but expected to find %s.\nFull kwargs: %s' %
                    (layer_cls.__name__, x, actual_output_shape,
                     expected_output_shape, kwargs))
    if expected_output is not None:
        np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)

    # test serialization, weight setting at model level
    model_config = model.get_config()
    recovered_model = keras.models.Sequential.from_config(model_config)
    if model.weights:
        weights = model.get_weights()
        recovered_model.set_weights(weights)
        output = recovered_model.predict(input_data)
        np.testing.assert_allclose(output, actual_output, rtol=1e-3)

    # for further checks in the caller function
    return actual_output