Exemplo n.º 1
0
    def test_multi_output_model_with_none_masking(self):

        with self.test_session():

            def func(x):
                return [x * 0.2, x * 0.3]

            def output_shape(input_shape):
                return [input_shape, input_shape]

            i = keras.layers.Input(shape=(3, 2, 1))
            o = keras.layers.Lambda(function=func,
                                    output_shape=output_shape)(i)

            self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))
            self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))

            o = keras.layers.add(o)
            model = keras.Model(i, o)

            i2 = keras.layers.Input(shape=(3, 2, 1))
            o2 = model(i2)
            model2 = keras.Model(i2, o2)

            x = np.random.random((4, 3, 2, 1))
            out = model2.predict(x)
            assert out.shape == (4, 3, 2, 1)
            self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
Exemplo n.º 2
0
  def test_cudnnrnn_bidirectional(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True):
        rnn = keras.layers.CuDNNGRU
        samples = 2
        dim = 2
        timesteps = 2
        output_dim = 2
        mode = 'concat'

        x = np.random.random((samples, timesteps, dim))
        target_dim = 2 * output_dim if mode == 'concat' else output_dim
        y = np.random.random((samples, target_dim))

        # test with Sequential model
        model = keras.Sequential()
        model.add(
            keras.layers.Bidirectional(
                rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # test config
        model.get_config()
        model = keras.models.model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = keras.Sequential()
        model.add(
            keras.layers.Bidirectional(
                rnn(output_dim, return_sequences=True),
                merge_mode=mode,
                input_shape=(None, dim)))
        model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # test with functional API
        inputs = keras.Input((timesteps, dim))
        outputs = keras.layers.Bidirectional(
            rnn(output_dim), merge_mode=mode)(
                inputs)
        model = keras.Model(inputs, outputs)
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)

        # Bidirectional and stateful
        inputs = keras.Input(batch_shape=(1, timesteps, dim))
        outputs = keras.layers.Bidirectional(
            rnn(output_dim, stateful=True), merge_mode=mode)(
                inputs)
        model = keras.Model(inputs, outputs)
        model.compile(
            loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
        model.fit(x, y, epochs=1, batch_size=1)
Exemplo n.º 3
0
    def test_Bidirectional_with_constants_layer_passing_initial_state(self):
        with self.test_session():
            # Test basic case.
            x = keras.Input((5, 5))
            c = keras.Input((3, ))
            s_for = keras.Input((32, ))
            s_bac = keras.Input((32, ))
            cell = _RNNCellWithConstants(32)
            custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
            y = layer(x, initial_state=[s_for, s_bac], constants=c)
            model = keras.Model([x, s_for, s_bac, c], y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch([
                np.zeros((6, 5, 5)),
                np.zeros((6, 32)),
                np.zeros((6, 32)),
                np.zeros((6, 3))
            ], np.zeros((6, 64)))

            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            s_fw_np = np.random.random((6, 32))
            s_bk_np = np.random.random((6, 32))
            c_np = np.random.random((6, 3))
            y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            weights = model.get_weights()
            config = layer.get_config()

            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer(x, initial_state=[s_for, s_bac], constants=c)
            model = keras.Model([x, s_for, s_bac, c], y)
            model.set_weights(weights)
            y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

            # Verify that state is used
            y_np_2_different_s = model.predict(
                [x_np, s_fw_np + 10., s_bk_np + 10., c_np])
            assert np.mean(y_np - y_np_2_different_s) != 0

            # Test flat list inputs
            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.Bidirectional.from_config(
                    copy.deepcopy(config))
            y = layer([x, s_for, s_bac, c])
            model = keras.Model([x, s_for, s_bac, c], y)
            model.set_weights(weights)
            y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
            self.assertAllClose(y_np, y_np_3, atol=1e-4)
Exemplo n.º 4
0
    def test_saving_model_with_long_weights_names(self):
        if h5py is None:
            return  # Skip test if models cannot be saved.

        with self.test_session():
            x = keras.Input(shape=(2, ), name='nested_model_input')
            f = x
            for i in range(4):
                f = keras.layers.Dense(2,
                                       name='nested_model_dense_%d' % (i, ))(f)
            # This layer name will make the `weights_name`
            # HDF5 attribute blow out of proportion.
            f = keras.layers.Dense(2,
                                   name='nested_model_output' + ('x' *
                                                                 (2**14)))(f)
            nested_model = keras.Model(inputs=[x],
                                       outputs=[f],
                                       name='nested_model')

            x = keras.Input(shape=(2, ), name='outer_model_input')
            f = nested_model(x)
            f = keras.layers.Dense(2, name='outer_model_output')(f)

            model = keras.Model(inputs=[x], outputs=[f])
            model.compile(loss='mse', optimizer='adam', metrics=['acc'])

            x = np.random.random((1, 2))
            y = np.random.random((1, 2))
            model.train_on_batch(x, y)
            out = model.predict(x)

            fd, fname = tempfile.mkstemp('.h5')
            keras.models.save_model(model, fname)
            model = keras.models.load_model(fname)

            # Check that the HDF5 files contains chunked array
            # of weight names.
            with h5py.File(fname, 'r') as h5file:
                num_weight_arrays = len([
                    attr
                    for attr in h5file['model_weights']['nested_model'].attrs
                    if attr.startswith('weight_names')
                ])
            # The chunking of layer names array should have happened.
            self.assertGreater(num_weight_arrays, 0)
            out2 = model.predict(x)
            self.assertAllClose(out, out2, atol=1e-05)

            # Cleanup
            os.close(fd)
            os.remove(fname)
Exemplo n.º 5
0
    def test_using_tf_layers_in_keras_functional_model(self):
        with self.test_session():
            np.random.seed(1337)
            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=200,
                test_samples=100,
                input_shape=(10, ),
                num_classes=2)
            y_train = keras.utils.to_categorical(y_train)
            y_test = keras.utils.to_categorical(y_test)

            inputs = keras.Input(shape=(10, ))
            x = tf_core_layers.Dense(32, activation=nn.relu)(inputs)
            outputs = tf_core_layers.Dense(2, activation=nn.softmax)(x)
            model = keras.Model(inputs, outputs)
            model.summary()

            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])
            history = model.fit(x_train,
                                y_train,
                                epochs=10,
                                batch_size=16,
                                validation_data=(x_test, y_test),
                                verbose=0)
            self.assertGreater(history.history['val_acc'][-1], 0.85)
Exemplo n.º 6
0
def get_nested_model_3(input_dim, num_classes):
  # A functional-API model with a subclassed model inside.
  # NOTE: this requires the inner subclass to implement `compute_output_shape`.

  inputs = keras.Input(shape=(input_dim,))
  x = keras.layers.Dense(32, activation='relu')(inputs)
  x = keras.layers.BatchNormalization()(x)

  class Inner(keras.Model):

    def __init__(self):
      super(Inner, self).__init__()
      self.dense1 = keras.layers.Dense(32, activation='relu')
      self.dense2 = keras.layers.Dense(5, activation='relu')
      self.bn = keras.layers.BatchNormalization()

    def call(self, inputs):
      x = self.dense1(inputs)
      x = self.dense2(x)
      return self.bn(x)

    def compute_output_shape(self, input_shape):
      return tensor_shape.TensorShape((input_shape[0], 5))

  test_model = Inner()
  x = test_model(x)  # pylint: disable=not-callable
  outputs = keras.layers.Dense(num_classes)(x)
  return keras.Model(inputs, outputs, name='nested_model_3')
Exemplo n.º 7
0
def get_functional_graph_model(input_dim, num_classes):
  # A simple functional-API model (a.k.a. graph network)
  inputs = keras.Input(shape=(input_dim,))
  x = keras.layers.Dense(32, activation='relu')(inputs)
  x = keras.layers.BatchNormalization()(x)
  outputs = keras.layers.Dense(num_classes)(x)
  return keras.Model(inputs, outputs)
Exemplo n.º 8
0
    def test_Bidirectional_state_reuse(self):
        rnn = keras.layers.LSTM
        samples = 2
        dim = 5
        timesteps = 3
        units = 3

        with self.test_session():
            inputs = keras.Input((timesteps, dim))
            layer = keras.layers.Bidirectional(
                rnn(units, return_state=True, return_sequences=True))
            outputs = layer(inputs)
            output, state = outputs[0], outputs[1:]

            # test passing invalid initial_state: passing a tensor
            with self.assertRaises(ValueError):
                output = keras.layers.Bidirectional(rnn(units))(
                    output, initial_state=state[0])

            # test valid usage: passing a list
            output = keras.layers.Bidirectional(rnn(units))(
                output, initial_state=state)
            model = keras.Model(inputs, output)
            inputs = np.random.rand(samples, timesteps, dim)
            outputs = model.predict(inputs)
Exemplo n.º 9
0
    def test_Bidirectional_dropout(self):
        rnn = keras.layers.LSTM
        samples = 2
        dim = 5
        timesteps = 3
        units = 3
        merge_mode = 'sum'
        x = [np.random.rand(samples, timesteps, dim)]

        with self.test_session():
            inputs = keras.Input((timesteps, dim))
            wrapped = keras.layers.Bidirectional(rnn(units,
                                                     dropout=0.2,
                                                     recurrent_dropout=0.2),
                                                 merge_mode=merge_mode)
            outputs = _to_list(wrapped(inputs, training=True))
            assert all(not getattr(x, '_uses_learning_phase') for x in outputs)

            inputs = keras.Input((timesteps, dim))
            wrapped = keras.layers.Bidirectional(rnn(units,
                                                     dropout=0.2,
                                                     return_state=True),
                                                 merge_mode=merge_mode)
            outputs = _to_list(wrapped(inputs))
            assert all(x._uses_learning_phase for x in outputs)

            model = keras.Model(inputs, outputs)
            assert model.uses_learning_phase
            y1 = _to_list(model.predict(x))
            y2 = _to_list(model.predict(x))
            for x1, x2 in zip(y1, y2):
                self.assertAllClose(x1, x2, atol=1e-5)
Exemplo n.º 10
0
    def test_state_reuse_with_dropout(self):
        layer_class = keras.layers.SimpleRNN
        embedding_dim = 4
        units = 3
        timesteps = 2
        num_samples = 2

        with self.test_session():
            input1 = keras.Input(batch_shape=(num_samples, timesteps,
                                              embedding_dim))
            layer = layer_class(units,
                                return_state=True,
                                return_sequences=True,
                                dropout=0.2)
            state = layer(input1)[1:]

            input2 = keras.Input(batch_shape=(num_samples, timesteps,
                                              embedding_dim))
            output = layer_class(units)(input2, initial_state=state)
            model = keras.Model([input1, input2], output)

            inputs = [
                np.random.random((num_samples, timesteps, embedding_dim)),
                np.random.random((num_samples, timesteps, embedding_dim))
            ]
            model.predict(inputs)
Exemplo n.º 11
0
    def test_model_methods_with_eager_tensors_single_io(self):
        x = keras.layers.Input(shape=(3, ), name='input')
        y = keras.layers.Dense(4, name='dense')(x)
        model = keras.Model(x, y)

        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer, loss, metrics=metrics)

        inputs = keras.backend.zeros(shape=(10, 3))
        targets = keras.backend.zeros(shape=(10, 4))

        model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=3,
                  verbose=0,
                  shuffle=False)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=4,
                  verbose=0,
                  validation_data=(inputs, targets))
        model.evaluate(inputs, targets, batch_size=2, verbose=0)
        model.predict(inputs, batch_size=2)
        model.train_on_batch(inputs, targets)
        model.test_on_batch(inputs, targets)
Exemplo n.º 12
0
    def test_activity_regularization_with_model_composition(self):
        def reg(x):
            return keras.backend.sum(x)

        net_a_input = keras.Input((2, ))
        net_a = net_a_input
        net_a = keras.layers.Dense(2,
                                   kernel_initializer='ones',
                                   use_bias=False,
                                   activity_regularizer=reg)(net_a)
        model_a = keras.Model([net_a_input], [net_a])

        net_b_input = keras.Input((2, ))
        net_b = model_a(net_b_input)
        model_b = keras.Model([net_b_input], [net_b])

        model_b.compile(optimizer='sgd', loss=None)
        x = np.ones((1, 2))
        loss = model_b.evaluate(x)
        self.assertEqual(loss, 4.)
  def test_nested_model_with_tensor_input(self):
    gpus = 2
    input_dim = 10
    shape = (input_dim,)
    num_samples = 16
    num_classes = 10

    if not check_if_compatible_devices(gpus=gpus):
      return

    with self.test_session():
      input_shape = (num_samples,) + shape
      x_train = np.random.randint(0, 255, input_shape)
      y_train = np.random.randint(0, num_classes, (input_shape[0],))
      keras.backend.set_learning_phase(True)

      y_train = keras.utils.to_categorical(y_train, num_classes)

      x_train = x_train.astype('float32')
      y_train = y_train.astype('float32')

      dataset = data.Dataset.from_tensor_slices((x_train, y_train))
      dataset = dataset.repeat()
      dataset = dataset.batch(4)
      iterator = dataset.make_one_shot_iterator()

      inputs, targets = iterator.get_next()

      input_tensor = keras.layers.Input(tensor=inputs)

      model = keras.models.Sequential()
      model.add(keras.layers.Dense(3,
                                   input_shape=(input_dim,)))
      model.add(keras.layers.Dense(num_classes))

      output = model(input_tensor)
      outer_model = keras.Model(input_tensor, output)
      parallel_model = keras.utils.multi_gpu_model(outer_model, gpus=gpus)

      parallel_model.compile(
          loss='categorical_crossentropy',
          optimizer=keras.optimizers.RMSprop(lr=0.0001, decay=1e-6),
          metrics=['accuracy'],
          target_tensors=[targets])
      parallel_model.fit(epochs=1, steps_per_epoch=3)
Exemplo n.º 14
0
    def test_saving_model_with_long_layer_names(self):
        if h5py is None:
            return  # Skip test if models cannot be saved.

        with self.test_session():
            # This layer name will make the `layers_name` HDF5 attribute blow
            # out of proportion. Note that it fits into the internal HDF5
            # attribute memory limit on its own but because h5py converts
            # the list of layer names into numpy array, which uses the same
            # amout of memory for every item, it increases the memory
            # requirements substantially.
            x = keras.Input(shape=(2, ), name='input_' + ('x' * (2**15)))
            f = x
            for i in range(4):
                f = keras.layers.Dense(2, name='dense_%d' % (i, ))(f)
            model = keras.Model(inputs=[x], outputs=[f])
            model.compile(loss='mse', optimizer='adam', metrics=['acc'])

            x = np.random.random((1, 2))
            y = np.random.random((1, 2))
            model.train_on_batch(x, y)
            out = model.predict(x)

            fd, fname = tempfile.mkstemp('.h5')
            keras.models.save_model(model, fname)
            model = keras.models.load_model(fname)

            # Check that the HDF5 files contains chunked array
            # of layer names.
            with h5py.File(fname, 'r') as h5file:
                num_names_arrays = len([
                    attr for attr in h5file['model_weights'].attrs
                    if attr.startswith('layer_names')
                ])
            # The chunking of layer names array should have happened.
            self.assertGreater(num_names_arrays, 0)
            out2 = model.predict(x)
            self.assertAllClose(out, out2, atol=1e-05)

            # Cleanup
            os.close(fd)
            os.remove(fname)
Exemplo n.º 15
0
  def test_model_saving_to_pre_created_h5py_file(self):
    if h5py is None:
      self.skipTest('h5py required to run this test')

    with self.test_session():
      inputs = keras.Input(shape=(3,))
      x = keras.layers.Dense(2)(inputs)
      outputs = keras.layers.Dense(3)(x)

      model = keras.Model(inputs, outputs)
      model.compile(loss=keras.losses.MSE,
                    optimizer=keras.optimizers.Adam(),
                    metrics=[keras.metrics.categorical_accuracy])
      x = np.random.random((1, 3))
      y = np.random.random((1, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      fd, fname = tempfile.mkstemp('.h5')
      with h5py.File(fname, mode='r+') as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Test non-default options in h5
      with h5py.File('_', driver='core',
                     backing_store=False) as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Cleanup
      os.close(fd)
      os.remove(fname)
Exemplo n.º 16
0
    def test_stateful_metrics(self):
        np.random.seed(1334)

        class BinaryTruePositives(keras.layers.Layer):
            """Stateful Metric to count the total true positives over all batches.

      Assumes predictions and targets of shape `(samples, 1)`.

      Arguments:
          threshold: Float, lower limit on prediction value that counts as a
              positive class prediction.
          name: String, name for the metric.
      """
            def __init__(self, name='true_positives', **kwargs):
                super(BinaryTruePositives, self).__init__(name=name, **kwargs)
                self.true_positives = keras.backend.variable(value=0,
                                                             dtype='int32')

            def reset_states(self):
                keras.backend.set_value(self.true_positives, 0)

            def __call__(self, y_true, y_pred):
                """Computes the number of true positives in a batch.

        Args:
            y_true: Tensor, batch_wise labels
            y_pred: Tensor, batch_wise predictions

        Returns:
            The total number of true positives seen this epoch at the
                completion of the batch.
        """
                y_true = keras.backend.cast(y_true, 'int32')
                y_pred = keras.backend.cast(keras.backend.round(y_pred),
                                            'int32')
                correct_preds = keras.backend.cast(
                    keras.backend.equal(y_pred, y_true), 'int32')
                true_pos = keras.backend.cast(
                    keras.backend.sum(correct_preds * y_true), 'int32')
                current_true_pos = self.true_positives * 1
                self.add_update(keras.backend.update_add(
                    self.true_positives, true_pos),
                                inputs=[y_true, y_pred])
                return current_true_pos + true_pos

        metric_fn = BinaryTruePositives()
        config = keras.metrics.serialize(metric_fn)
        metric_fn = keras.metrics.deserialize(
            config,
            custom_objects={'BinaryTruePositives': BinaryTruePositives})

        # Test on simple model
        inputs = keras.Input(shape=(2, ))
        outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
        model = keras.Model(inputs, outputs)
        model.compile(optimizer='sgd',
                      loss='binary_crossentropy',
                      metrics=['acc', metric_fn])

        # Test fit, evaluate
        samples = 1000
        x = np.random.random((samples, 2))
        y = np.random.randint(2, size=(samples, 1))
        model.fit(x, y, epochs=1, batch_size=10)
        outs = model.evaluate(x, y, batch_size=10)
        preds = model.predict(x)

        def ref_true_pos(y_true, y_pred):
            return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

        # Test correctness (e.g. updates should have been run)
        self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)
Exemplo n.º 17
0
  def test_stateful_metrics(self):
    with self.test_session():
      np.random.seed(1334)

      class BinaryTruePositives(keras.layers.Layer):
        """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        Arguments:
            threshold: Float, lower limit on prediction value that counts as a
                positive class prediction.
            name: String, name for the metric.
        """

        def __init__(self, name='true_positives', **kwargs):
          super(BinaryTruePositives, self).__init__(name=name, **kwargs)
          self.true_positives = keras.backend.variable(value=0, dtype='int32')
          self.stateful = True

        def reset_states(self):
          keras.backend.set_value(self.true_positives, 0)

        def __call__(self, y_true, y_pred):
          """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
          y_true = math_ops.cast(y_true, 'int32')
          y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
          correct_preds = math_ops.cast(math_ops.equal(y_pred, y_true), 'int32')
          true_pos = math_ops.cast(
              math_ops.reduce_sum(correct_preds * y_true), 'int32')
          current_true_pos = self.true_positives * 1
          self.add_update(
              state_ops.assign_add(self.true_positives, true_pos),
              inputs=[y_true, y_pred])
          return current_true_pos + true_pos

      metric_fn = BinaryTruePositives()
      config = keras.metrics.serialize(metric_fn)
      metric_fn = keras.metrics.deserialize(
          config, custom_objects={'BinaryTruePositives': BinaryTruePositives})

      # Test on simple model
      inputs = keras.Input(shape=(2,))
      outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
      model = keras.Model(inputs, outputs)
      model.compile(optimizer='sgd',
                    loss='binary_crossentropy',
                    metrics=['acc', metric_fn])

      # Test fit, evaluate
      samples = 100
      x = np.random.random((samples, 2))
      y = np.random.randint(2, size=(samples, 1))
      val_samples = 10
      val_x = np.random.random((val_samples, 2))
      val_y = np.random.randint(2, size=(val_samples, 1))

      history = model.fit(x, y,
                          epochs=1,
                          batch_size=10,
                          validation_data=(val_x, val_y))
      outs = model.evaluate(x, y, batch_size=10)
      preds = model.predict(x)

      def ref_true_pos(y_true, y_pred):
        return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

      # Test correctness (e.g. updates should have been run)
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict(val_x)
      val_outs = model.evaluate(val_x, val_y, batch_size=10)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)

      # Test with generators
      gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
      val_gen = [(np.array([x0]), np.array([y0]))
                 for x0, y0 in zip(val_x, val_y)]
      history = model.fit_generator(iter(gen),
                                    epochs=1,
                                    steps_per_epoch=samples,
                                    validation_data=iter(val_gen),
                                    validation_steps=val_samples)
      outs = model.evaluate_generator(iter(gen), steps=samples)
      preds = model.predict_generator(iter(gen), steps=samples)

      # Test correctness of the metric results
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict_generator(iter(val_gen), steps=val_samples)
      val_outs = model.evaluate_generator(iter(val_gen), steps=val_samples)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)